repo_name
stringclasses 6
values | pr_number
int64 512
78.9k
| pr_title
stringlengths 3
144
| pr_description
stringlengths 0
30.3k
| author
stringlengths 2
21
| date_created
timestamp[ns, tz=UTC] | date_merged
timestamp[ns, tz=UTC] | previous_commit
stringlengths 40
40
| pr_commit
stringlengths 40
40
| query
stringlengths 17
30.4k
| filepath
stringlengths 9
210
| before_content
stringlengths 0
112M
| after_content
stringlengths 0
112M
| label
int64 -1
1
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|
dotnet/runtime | 66,257 | Cloning improvements | Remove loop cloning variable initialization condition:
Assume that any pre-existing initialization is acceptable.
Check condition against zero if necessary. Const inits remain as before.
Lots of diffs due to more cloning for cases of `for (i = expression...`
where `expression` is not just a constant or local var.
Also, fix various comments that were no longer correct (e.g., "first" block
concept is gone) | BruceForstall | 2022-03-06T02:30:47Z | 2022-03-21T16:36:12Z | a1f26fbc29e645eda585fd3a4093311101521855 | a1bf79e06b64edef0957a9cc907180c25fa8aab9 | Cloning improvements. Remove loop cloning variable initialization condition:
Assume that any pre-existing initialization is acceptable.
Check condition against zero if necessary. Const inits remain as before.
Lots of diffs due to more cloning for cases of `for (i = expression...`
where `expression` is not just a constant or local var.
Also, fix various comments that were no longer correct (e.g., "first" block
concept is gone) | ./src/tests/JIT/Methodical/ELEMENT_TYPE_IU/conv_i8_u_il_d.ilproj | <Project Sdk="Microsoft.NET.Sdk.IL">
<PropertyGroup>
<OutputType>Exe</OutputType>
<CLRTestPriority>1</CLRTestPriority>
</PropertyGroup>
<PropertyGroup>
<DebugType>Full</DebugType>
</PropertyGroup>
<ItemGroup>
<Compile Include="conv_i8_u.il" />
</ItemGroup>
</Project>
| <Project Sdk="Microsoft.NET.Sdk.IL">
<PropertyGroup>
<OutputType>Exe</OutputType>
<CLRTestPriority>1</CLRTestPriority>
</PropertyGroup>
<PropertyGroup>
<DebugType>Full</DebugType>
</PropertyGroup>
<ItemGroup>
<Compile Include="conv_i8_u.il" />
</ItemGroup>
</Project>
| -1 |
dotnet/runtime | 66,257 | Cloning improvements | Remove loop cloning variable initialization condition:
Assume that any pre-existing initialization is acceptable.
Check condition against zero if necessary. Const inits remain as before.
Lots of diffs due to more cloning for cases of `for (i = expression...`
where `expression` is not just a constant or local var.
Also, fix various comments that were no longer correct (e.g., "first" block
concept is gone) | BruceForstall | 2022-03-06T02:30:47Z | 2022-03-21T16:36:12Z | a1f26fbc29e645eda585fd3a4093311101521855 | a1bf79e06b64edef0957a9cc907180c25fa8aab9 | Cloning improvements. Remove loop cloning variable initialization condition:
Assume that any pre-existing initialization is acceptable.
Check condition against zero if necessary. Const inits remain as before.
Lots of diffs due to more cloning for cases of `for (i = expression...`
where `expression` is not just a constant or local var.
Also, fix various comments that were no longer correct (e.g., "first" block
concept is gone) | ./src/coreclr/debug/di/module.cpp | // Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
//*****************************************************************************
// File: module.cpp
//
//
//*****************************************************************************
#include "stdafx.h"
#include "winbase.h"
#include "metadataexports.h"
#include "winbase.h"
#include "corpriv.h"
#include "corsym.h"
#include "pedecoder.h"
#include "stgpool.h"
//---------------------------------------------------------------------------------------
// Update an existing metadata importer with a buffer
//
// Arguments:
// pUnk - IUnknoown of importer to update.
// pData - local buffer containing new metadata
// cbData - size of buffer in bytes.
// dwReOpenFlags - metadata flags to pass for reopening.
//
// Returns:
// S_OK on success. Else failure.
//
// Notes:
// This will call code:MDReOpenMetaDataWithMemoryEx from the metadata engine.
STDAPI ReOpenMetaDataWithMemoryEx(
void *pUnk,
LPCVOID pData,
ULONG cbData,
DWORD dwReOpenFlags)
{
HRESULT hr = MDReOpenMetaDataWithMemoryEx(pUnk,pData, cbData, dwReOpenFlags);
return hr;
}
//---------------------------------------------------------------------------------------
// Initialize a new CordbModule around a Module in the target.
//
// Arguments:
// pProcess - process that this module lives in
// vmDomainAssembly - CLR cookie for module.
CordbModule::CordbModule(
CordbProcess * pProcess,
VMPTR_Module vmModule,
VMPTR_DomainAssembly vmDomainAssembly)
: CordbBase(pProcess, vmDomainAssembly.IsNull() ? VmPtrToCookie(vmModule) : VmPtrToCookie(vmDomainAssembly), enumCordbModule),
m_pAssembly(0),
m_pAppDomain(0),
m_classes(11),
m_functions(101),
m_vmDomainAssembly(vmDomainAssembly),
m_vmModule(vmModule),
m_EnCCount(0),
m_fForceMetaDataSerialize(FALSE),
m_nativeCodeTable(101)
{
_ASSERTE(pProcess->GetProcessLock()->HasLock());
_ASSERTE(!vmModule.IsNull());
m_nLoadEventContinueCounter = 0;
#ifdef _DEBUG
m_classes.DebugSetRSLock(pProcess->GetProcessLock());
m_functions.DebugSetRSLock(pProcess->GetProcessLock());
#endif
// Fill out properties via DAC.
ModuleInfo modInfo;
pProcess->GetDAC()->GetModuleData(vmModule, &modInfo); // throws
m_PEBuffer.Init(modInfo.pPEBaseAddress, modInfo.nPESize);
m_fDynamic = modInfo.fIsDynamic;
m_fInMemory = modInfo.fInMemory;
m_vmPEFile = modInfo.vmPEAssembly;
if (!vmDomainAssembly.IsNull())
{
DomainAssemblyInfo dfInfo;
pProcess->GetDAC()->GetDomainAssemblyData(vmDomainAssembly, &dfInfo); // throws
m_pAppDomain = pProcess->LookupOrCreateAppDomain(dfInfo.vmAppDomain);
m_pAssembly = m_pAppDomain->LookupOrCreateAssembly(dfInfo.vmDomainAssembly);
}
else
{
// Not yet implemented
m_pAppDomain = pProcess->GetSharedAppDomain();
m_pAssembly = m_pAppDomain->LookupOrCreateAssembly(modInfo.vmAssembly);
}
#ifdef _DEBUG
m_nativeCodeTable.DebugSetRSLock(GetProcess()->GetProcessLock());
#endif
// MetaData is initialized lazily (via code:CordbModule::GetMetaDataImporter).
// Getting the metadata may be very expensive (especially if we go through the metadata locator, which
// invokes back to the data-target), so don't do it until asked.
// m_pIMImport, m_pInternalMetaDataImport are smart pointers that already initialize to NULL.
}
#ifdef _DEBUG
//---------------------------------------------------------------------------------------
// Callback helper for code:CordbModule::DbgAssertModuleDeleted
//
// Arguments
// vmDomainAssembly - domain file in the enumeration
// pUserData - pointer to the CordbModule that we just got an exit event for.
//
void DbgAssertModuleDeletedCallback(VMPTR_DomainAssembly vmDomainAssembly, void * pUserData)
{
CordbModule * pThis = reinterpret_cast<CordbModule *>(pUserData);
INTERNAL_DAC_CALLBACK(pThis->GetProcess());
if (!pThis->m_vmDomainAssembly.IsNull())
{
VMPTR_DomainAssembly vmDomainAssemblyDeleted = pThis->m_vmDomainAssembly;
CONSISTENCY_CHECK_MSGF((vmDomainAssemblyDeleted != vmDomainAssembly),
("A Module Unload event was sent for a module, but it still shows up in the enumeration.\n vmDomainAssemblyDeleted=%p\n",
VmPtrToCookie(vmDomainAssemblyDeleted)));
}
}
//---------------------------------------------------------------------------------------
// Assert that a module is no longer discoverable via enumeration.
//
// Notes:
// See code:IDacDbiInterface#Enumeration for rules that we're asserting.
// This is a debug only method. It's conceptually similar to
// code:CordbProcess::DbgAssertAppDomainDeleted.
//
void CordbModule::DbgAssertModuleDeleted()
{
GetProcess()->GetDAC()->EnumerateModulesInAssembly(
m_pAssembly->GetDomainAssemblyPtr(),
DbgAssertModuleDeletedCallback,
this);
}
#endif // _DEBUG
CordbModule::~CordbModule()
{
// We should have been explicitly neutered before our internal ref went to 0.
_ASSERTE(IsNeutered());
_ASSERTE(m_pIMImport == NULL);
}
// Neutered by CordbAppDomain
void CordbModule::Neuter()
{
// m_pAppDomain, m_pAssembly assigned w/o AddRef()
m_classes.NeuterAndClear(GetProcess()->GetProcessLock());
m_functions.NeuterAndClear(GetProcess()->GetProcessLock());
m_nativeCodeTable.NeuterAndClear(GetProcess()->GetProcessLock());
m_pClass.Clear();
// This is very important because it also releases the metadata's potential file locks.
m_pInternalMetaDataImport.Clear();
m_pIMImport.Clear();
CordbBase::Neuter();
}
//
// Creates an IStream based off the memory described by the TargetBuffer.
//
// Arguments:
// pProcess - process that buffer is valid in.
// buffer - memory range in target
// ppStream - out parameter to receive the new stream. *ppStream == NULL on input.
// caller owns the new object and must call Release.
//
// Returns:
// Throws on error.
// Common errors include if memory is missing in the target.
//
// Notes:
// This will copy the memory over from the TargetBuffer, and then create a new IStream
// object around it.
//
void GetStreamFromTargetBuffer(CordbProcess * pProcess, TargetBuffer buffer, IStream ** ppStream)
{
CONTRACTL
{
THROWS;
}
CONTRACTL_END;
_ASSERTE(ppStream != NULL);
_ASSERTE(*ppStream == NULL);
int cbSize = buffer.cbSize;
NewArrayHolder<BYTE> localBuffer(new BYTE[cbSize]);
pProcess->SafeReadBuffer(buffer, localBuffer);
HRESULT hr = E_FAIL;
hr = CInMemoryStream::CreateStreamOnMemoryCopy(localBuffer, cbSize, ppStream);
IfFailThrow(hr);
_ASSERTE(*ppStream != NULL);
}
//
// Helper API to get in-memory symbols from the target into a host stream object.
//
// Arguments:
// ppStream - out parameter to receive the new stream. *ppStream == NULL on input.
// caller owns the new object and must call Release.
//
// Returns:
// kSymbolFormatNone if no PDB stream is present. This is a common case for
// file-based modules, and also for dynamic modules that just aren't tracking
// debug information.
// The format of the symbols stored into ppStream. This is common:
// - Ref.Emit modules if the debuggee generated debug symbols,
// - in-memory modules (such as Load(Byte[], Byte[])
// - hosted modules.
// Throws on error
//
IDacDbiInterface::SymbolFormat CordbModule::GetInMemorySymbolStream(IStream ** ppStream)
{
// @dbgtodo : add a PUBLIC_REENTRANT_API_ENTRY_FOR_SHIM contract
// This function is mainly called internally in dbi, and also by the shim to emulate the
// UpdateModuleSymbols callback on attach.
CONTRACTL
{
THROWS;
}
CONTRACTL_END;
_ASSERTE(ppStream != NULL);
_ASSERTE(*ppStream == NULL);
*ppStream = NULL;
TargetBuffer bufferPdb;
IDacDbiInterface::SymbolFormat symFormat;
GetProcess()->GetDAC()->GetSymbolsBuffer(m_vmModule, &bufferPdb, &symFormat);
if (bufferPdb.IsEmpty())
{
// No in-memory PDB. Common case.
_ASSERTE(symFormat == IDacDbiInterface::kSymbolFormatNone);
return IDacDbiInterface::kSymbolFormatNone;
}
else
{
_ASSERTE(symFormat != IDacDbiInterface::kSymbolFormatNone);
GetStreamFromTargetBuffer(GetProcess(), bufferPdb, ppStream);
return symFormat;
}
}
//---------------------------------------------------------------------------------------
// Accessor for PE file.
//
// Returns:
// VMPTR_PEAssembly for this module. Should always be non-null
//
// Notes:
// A main usage of this is to find the proper internal MetaData importer.
// DACized code needs to map from PEAssembly --> IMDInternalImport.
//
VMPTR_PEAssembly CordbModule::GetPEFile()
{
return m_vmPEFile;
}
//---------------------------------------------------------------------------------------
//
// Top-level getter for the public metadata importer for this module
//
// Returns:
// metadata importer.
// Never returns NULL. Will throw some hr (likely CORDBG_E_MISSING_METADATA) instead.
//
// Notes:
// This will lazily create the metadata, possibly invoking back into the data-target.
IMetaDataImport * CordbModule::GetMetaDataImporter()
{
CONTRACTL
{
THROWS;
}
CONTRACTL_END;
// If we already have it, then we're done.
// This is critical to do at the top of this function to avoid potential recursion.
if (m_pIMImport != NULL)
{
return m_pIMImport;
}
// Lazily initialize
// Fetch metadata from target
LOG((LF_CORDB,LL_INFO1000, "CM::GMI Lazy init refreshing metadata\n"));
ALLOW_DATATARGET_MISSING_MEMORY(
RefreshMetaData();
);
// If lookup failed from the Module & target memory, try the metadata locator interface
// from debugger, if we have one.
if (m_pIMImport == NULL)
{
bool isILMetaDataForNGENImage; // Not currently used for anything.
// The process's LookupMetaData will ping the debugger's ICorDebugMetaDataLocator iface.
CordbProcess * pProcess = GetProcess();
RSLockHolder processLockHolder(pProcess->GetProcessLock());
m_pInternalMetaDataImport.Clear();
// Do not call code:CordbProcess::LookupMetaData from this function. It will try to load
// through the CordbModule again which will end up back here, and on failure you'll fill the stack.
// Since we've already done everything possible from the Module anyhow, just call the
// stuff that talks to the debugger.
// Don't do anything with the ptr returned here, since it's really m_pInternalMetaDataImport.
pProcess->LookupMetaDataFromDebugger(m_vmPEFile, isILMetaDataForNGENImage, this);
}
// If we still can't get it, throw.
if (m_pIMImport == NULL)
{
ThrowHR(CORDBG_E_MISSING_METADATA);
}
return m_pIMImport;
}
// Refresh the metadata cache if a profiler added new rows.
//
// Arguments:
// token - token that we want to ensure is in the metadata cache.
//
// Notes:
// In profiler case, this may be referred to new rows and we may need to update the metadata
// This only supports StandAloneSigs.
//
void CordbModule::UpdateMetaDataCacheIfNeeded(mdToken token)
{
CONTRACTL
{
THROWS;
}
CONTRACTL_END;
LOG((LF_CORDB,LL_INFO10000, "CM::UMCIN token=0x%x\n", token));
// If we aren't trying to keep parity with our legacy profiler metadata update behavior
// then we should avoid this temporary update mechanism entirely
if(GetProcess()->GetWriteableMetadataUpdateMode() != LegacyCompatPolicy)
{
return;
}
//
// 1) Check if in-range? Compare against tables, etc.
//
if(CheckIfTokenInMetaData(token))
{
LOG((LF_CORDB,LL_INFO10000, "CM::UMCIN token was present\n"));
return;
}
//
// 2) Copy over new MetaData. From now on we assume that the profiler is
// modifying module metadata and that we need to serialize in process
// at each refresh
//
LOG((LF_CORDB,LL_INFO10000, "CM::UMCIN token was not present, refreshing\n"));
m_fForceMetaDataSerialize = TRUE;
RefreshMetaData();
// If we are dump debugging, we may still not have it. Nothing to be done.
}
// Returns TRUE if the token is present, FALSE if not.
BOOL CordbModule::CheckIfTokenInMetaData(mdToken token)
{
CONTRACTL
{
THROWS;
}
CONTRACTL_END;
LOG((LF_CORDB,LL_INFO10000, "CM::CITIM token=0x%x\n", token));
_ASSERTE(TypeFromToken(token) == mdtSignature);
RSExtSmartPtr<IMetaDataTables> pTable;
HRESULT hr = GetMetaDataImporter()->QueryInterface(IID_IMetaDataTables, (void**) &pTable);
_ASSERTE(SUCCEEDED(hr));
if (FAILED(hr))
{
ThrowHR(hr);
}
ULONG cbRowsAvailable; // number of rows in the table
hr = pTable->GetTableInfo(
mdtSignature >> 24, // [IN] Which table.
NULL, // [OUT] Size of a row, bytes.
&cbRowsAvailable, // [OUT] Number of rows.
NULL, // [OUT] Number of columns in each row.
NULL, // [OUT] Key column, or -1 if none.
NULL); // [OUT] Name of the table.
_ASSERTE(SUCCEEDED(hr));
if (FAILED(hr))
{
ThrowHR(hr);
}
// Rows start counting with number 1.
ULONG rowRequested = RidFromToken(token);
LOG((LF_CORDB,LL_INFO10000, "CM::UMCIN requested=0x%x available=0x%x\n", rowRequested, cbRowsAvailable));
return (rowRequested <= cbRowsAvailable);
}
// This helper class ensures the remote serailzied buffer gets deleted in the RefreshMetaData
// function below
class CleanupRemoteBuffer
{
public:
CordbProcess* pProcess;
CordbModule* pModule;
TargetBuffer bufferMetaData;
BOOL fDoCleanup;
CleanupRemoteBuffer() :
fDoCleanup(FALSE) { }
~CleanupRemoteBuffer()
{
if(fDoCleanup)
{
//
// Send 2nd event to free buffer.
//
DebuggerIPCEvent event;
pProcess->InitIPCEvent(&event,
DB_IPCE_RESOLVE_UPDATE_METADATA_2,
true,
pModule->GetAppDomain()->GetADToken());
event.MetadataUpdateRequest.pMetadataStart = CORDB_ADDRESS_TO_PTR(bufferMetaData.pAddress);
// Note: two-way event here...
IfFailThrow(pProcess->SendIPCEvent(&event, sizeof(DebuggerIPCEvent)));
_ASSERTE(event.type == DB_IPCE_RESOLVE_UPDATE_METADATA_2_RESULT);
}
}
};
// Called to refetch metadata. This occurs when a dynamic module grows or the profiler
// has edited the metadata
void CordbModule::RefreshMetaData()
{
CONTRACTL
{
THROWS;
}
CONTRACTL_END;
LOG((LF_CORDB,LL_INFO1000, "CM::RM\n"));
// There are several different ways we can get the metadata
// 1) [Most common] Module is loaded into VM and never changed. The importer
// will be constructed refering to the file on disk. This is a significant
// working set win because the VM and debugger share the image. If there is
// an error reading by file we can fall back to case #2 for these modules
// 2) Most modules have a buffer in target memory that represents their
// metadata. We copy that data over the RS and construct an in-memory
// importer on top of it.
// 3) The only modules that don't have a suitable buffer (case #2) are those
// modified in memory via the profiling API (or ENC). A message can be sent from
// the debugger to the debuggee instructing it to allocate a buffer and
// serialize the metadata into it. Then we copy that data to the RS and
// construct an in-memory importer on top of it.
// We don't need to send this message in the ENC case because the debugger
// has the same changes applied as the debuggee.
// 4) Case #3 won't work when dump debugging because we can't send IPC events.
// Instead we can locate chunks of the metadata pointed to in the implementation
// details of a remote MDInternalRW object, marshal that memory over to the
// debugger process, and then put a metadata reader on top of it.
// In time this DAC'ized metadata could be used in almost any scenario,
// although its probably worth keeping the file mapping technique in case
// #1 around for its performance wins.
CordbProcess * pProcess = GetProcess();
TargetBuffer bufferMetaData;
CleanupRemoteBuffer cleanup; // this local has a destructor to do some finally work
// check for scenarios we might want to handle with case #4
if (GetProcess()->GetShim() == NULL &&
GetProcess()->GetWriteableMetadataUpdateMode() == AlwaysShowUpdates &&
!m_fDynamic)
{
//None of the above requirements are particularly hard to change in the future as needed...
// a) dump-debugging mode - If we do this on a process that can move forward we need a mechanism to determine
// when to refetch the metadata.
// b) AlwaysShowUpdates - this is purely a risk mitigation choice, there aren't any known back-compat issues
// using DAC'ized metadata. If you want back-compat with the in-proc debugging behavior
// you need to figure out how to ReOpen the same public MD interface with new data.
// c) !m_fDynamic - A risk mitigation choice. Initial testing suggests it would work fine.
// So far we've only got a reader for in-memory-writable metadata (MDInternalRW implementation)
// We could make a reader for MDInternalRO, but no need yet. This also ensures we don't encroach into common
// scenario where we can map a file on disk.
TADDR remoteMDInternalRWAddr = NULL;
GetProcess()->GetDAC()->GetPEFileMDInternalRW(m_vmPEFile, &remoteMDInternalRWAddr);
if (remoteMDInternalRWAddr != NULL)
{
// we should only be doing this once to initialize, we don't support reopen with this technique
_ASSERTE(m_pIMImport == NULL);
ULONG32 mdStructuresVersion;
HRESULT hr = GetProcess()->GetDAC()->GetMDStructuresVersion(&mdStructuresVersion);
IfFailThrow(hr);
ULONG32 mdStructuresDefines;
hr = GetProcess()->GetDAC()->GetDefinesBitField(&mdStructuresDefines);
IfFailThrow(hr);
IMetaDataDispenserCustom* pDispCustom = NULL;
hr = GetProcess()->GetDispenser()->QueryInterface(IID_IMetaDataDispenserCustom, (void**)&pDispCustom);
IfFailThrow(hr);
IMDCustomDataSource* pDataSource = NULL;
hr = CreateRemoteMDInternalRWSource(remoteMDInternalRWAddr, GetProcess()->GetDataTarget(), mdStructuresDefines, mdStructuresVersion, &pDataSource);
IfFailThrow(hr);
IMetaDataImport* pImport = NULL;
hr = pDispCustom->OpenScopeOnCustomDataSource(pDataSource, 0, IID_IMetaDataImport, (IUnknown**)&m_pIMImport);
IfFailThrow(hr);
UpdateInternalMetaData();
return;
}
}
if(!m_fForceMetaDataSerialize) // case 1 and 2
{
LOG((LF_CORDB,LL_INFO10000, "CM::RM !m_fForceMetaDataSerialize case\n"));
GetProcess()->GetDAC()->GetMetadata(m_vmModule, &bufferMetaData); // throws
}
else if (GetProcess()->GetShim() == NULL) // case 3 won't work on a dump so don't try
{
return;
}
else // case 3 on a live process
{
LOG((LF_CORDB,LL_INFO10000, "CM::RM m_fForceMetaDataSerialize case\n"));
//
// Send 1 event to get metadata. This allocates a buffer
//
DebuggerIPCEvent event;
pProcess->InitIPCEvent(&event,
DB_IPCE_RESOLVE_UPDATE_METADATA_1,
true,
GetAppDomain()->GetADToken());
event.MetadataUpdateRequest.vmModule = m_vmModule;
// Note: two-way event here...
IfFailThrow(pProcess->SendIPCEvent(&event, sizeof(DebuggerIPCEvent)));
_ASSERTE(event.type == DB_IPCE_RESOLVE_UPDATE_METADATA_1_RESULT);
//
// Update it on the RS
//
bufferMetaData.Init(PTR_TO_CORDB_ADDRESS(event.MetadataUpdateRequest.pMetadataStart), (ULONG) event.MetadataUpdateRequest.nMetadataSize);
// init the cleanup object to ensure the buffer gets destroyed later
cleanup.bufferMetaData = bufferMetaData;
cleanup.pProcess = pProcess;
cleanup.pModule = this;
cleanup.fDoCleanup = TRUE;
}
InitMetaData(bufferMetaData, IsFileMetaDataValid()); // throws
}
// Determines whether the on-disk metadata for this module is usable as the
// current metadata
BOOL CordbModule::IsFileMetaDataValid()
{
bool fOpenFromFile = true;
// Dynamic, In-memory, modules must be OpenScopeOnMemory.
// For modules that require the metadata to be serialized in memory, we must also OpenScopeOnMemory
// For Enc, we'll can use OpenScope(onFile) and it will get converted to Memory when we get an emitter.
// We're called from before the ModuleLoad callback, so EnC status hasn't been set yet, so
// EnC will be false.
if (m_fDynamic || m_fInMemory || m_fForceMetaDataSerialize)
{
LOG((LF_CORDB,LL_INFO10000, "CM::IFMV: m_fDynamic=0x%x m_fInMemory=0x%x m_fForceMetaDataSerialize=0x%x\n",
m_fDynamic, m_fInMemory, m_fForceMetaDataSerialize));
fOpenFromFile = false;
}
#ifdef _DEBUG
// Reg key override to force us to use Open-by-memory. This can let us run perf tests to
// compare the Open-by-mem vs. Open-by-file.
static DWORD openFromFile = 99;
if (openFromFile == 99)
openFromFile = CLRConfig::GetConfigValue(CLRConfig::INTERNAL_DbgNoOpenMDByFile);
if (openFromFile)
{
LOG((LF_CORDB,LL_INFO10000, "CM::IFMV: INTERNAL_DbgNoOpenMDByFile is set\n"));
fOpenFromFile = false;
}
#endif
LOG((LF_CORDB,LL_INFO10000, "CM::IFMV: returns 0x%x\n", fOpenFromFile));
return fOpenFromFile;
}
//---------------------------------------------------------------------------------------
// Accessor for Internal MetaData importer. This is lazily initialized.
//
// Returns:
// Internal MetaDataImporter, which can be handed off to DAC. Not AddRef().
// Should be non-null. Throws on error.
//
// Notes:
// An internal metadata importer is used extensively by DAC-ized code (And Edit-and-continue).
// This should not be handed out through ICorDebug.
IMDInternalImport * CordbModule::GetInternalMD()
{
if (m_pInternalMetaDataImport == NULL)
{
UpdateInternalMetaData(); // throws
}
return m_pInternalMetaDataImport;
}
//---------------------------------------------------------------------------------------
// The one-stop top-level initialization function the metadata (both public and private) for this module.
//
// Arguments:
// buffer - valid buffer into target containing the metadata.
// useFileMappingOptimization - if true this allows us to attempt just opening the importer
// by using the metadata in the module on disk. if false or
// if the attempt fails we open the metadata import on memory in
// target buffer
//
// Notes:
// This will initialize both the internal and public metadata from the buffer in the target.
// Only called as a helper from RefreshMetaData()
//
// This may throw (eg, target buffer is missing).
//
void CordbModule::InitMetaData(TargetBuffer buffer, BOOL allowFileMappingOptimization)
{
CONTRACTL
{
THROWS;
}
CONTRACTL_END;
LOG((LF_CORDB,LL_INFO100000, "CM::IM: initing with remote buffer 0x%p length 0x%x\n",
CORDB_ADDRESS_TO_PTR(buffer.pAddress), buffer.cbSize));
// clear all the metadata
m_pInternalMetaDataImport.Clear();
if (m_pIMImport == NULL)
{
// The optimization we're going for here is that the OS will use the same physical memory to
// back multiple ReadOnly opens of the same file. Thus since we expect the target process in
// live debugging, or the debugger in dump debugging, has already opened the file we would
// like to not create a local buffer and spend time copying in metadata from the target when
// the OS will happily do address lookup magic against the same physical memory for everyone.
// Try getting the data from the file if allowed, and fall back to using the buffer
// if required
HRESULT hr = S_OK;
if (allowFileMappingOptimization)
{
hr = InitPublicMetaDataFromFile();
if(FAILED(hr))
{
LOG((LF_CORDB,LL_INFO1000000, "CM::IPM: File mapping failed with hr=0x%x\n", hr));
}
}
if(!allowFileMappingOptimization || FAILED(hr))
{
// This is where the expensive copy of all metadata content from target memory
// that we would like to try and avoid happens.
InitPublicMetaData(buffer);
}
}
else
{
// We've already handed out an Import object, and so we can't create a new pointer instance.
// Instead, we update the existing instance with new data.
UpdatePublicMetaDataFromRemote(buffer);
}
// if we haven't set it by this point UpdateInternalMetaData below is going to get us
// in an infinite loop of refreshing public metadata
_ASSERTE(m_pIMImport != NULL);
// Now that public metadata has changed, force internal metadata to update too.
// Public and internal metadata expose different access interfaces to the same underlying storage.
UpdateInternalMetaData();
}
//---------------------------------------------------------------------------------------
// Updates the Internal MetaData object from the public importer. Lazily fetch public importer if needed.
//
// Assumptions:
// Caller has cleared Internal metadata before even updating public metadata.
// This way, if the caller fails halfway through updating the public metadata, we don't have
// stale internal MetaData.
void CordbModule::UpdateInternalMetaData()
{
CONTRACTL
{
THROWS;
}
CONTRACTL_END;
// Caller should have already cleared it.
_ASSERTE(m_pInternalMetaDataImport == NULL);
// Get the importer. If it's currently null, this will go fetch it.
IMetaDataImport * pImport = GetMetaDataImporter(); // throws
// If both the public and the private interfaces are NULL on entry to this function, the call above will
// recursively call this function. This can happen if the caller calls GetInternalMD() directly
// instead of InitMetaData(). In this case, the above function call will have initialized the internal
// interface as well, so we need to check for it here.
if (m_pInternalMetaDataImport == NULL)
{
HRESULT hr = GetMDInternalInterfaceFromPublic(
pImport,
IID_IMDInternalImport,
reinterpret_cast<void**> (&m_pInternalMetaDataImport));
if (m_pInternalMetaDataImport == NULL)
{
ThrowHR(hr);
}
}
_ASSERTE(m_pInternalMetaDataImport != NULL);
}
// Initialize the public metadata.
//
// The debuggee already has a copy of the metadata in its process.
// If we OpenScope on file as read-only, the OS file-system will share our metadata with the
// copy in the debuggee. This can be a major perf win. FX metadata can be over 8 MB+.
// OpenScopeOnMemory can't be shared b/c we allocate a buffer.
HRESULT CordbModule::InitPublicMetaDataFromFile()
{
INTERNAL_API_ENTRY(this->GetProcess());
// @dbgtodo metadata - In v3, we can't assume we have the same path namespace as the target (i.e. it could be
// a dump or remote), so we can't just try and open the file. Instead we have to rely on interfaces
// on the datatarget to map the metadata here. Note that this must also work for minidumps where the
// metadata isn't necessarily in the dump image.
// Get filename. There are 2 filenames to choose from:
// - ngen (if applicable).
// - non-ngen (aka "normal").
// By loading metadata out of the same OS file as loaded into the debuggee space, the OS can share those pages.
const WCHAR * szFullPathName = NULL;
bool fDebuggerLoadingNgen = false;
bool fDebuggeeLoadedNgen = false;
szFullPathName = GetNGenImagePath();
if(szFullPathName != NULL)
{
fDebuggeeLoadedNgen = true;
fDebuggerLoadingNgen = true;
#ifndef TARGET_UNIX
// NGEN images are large and we shouldn't load them if they won't be shared, therefore fail the NGEN mapping and
// fallback to IL image if the debugger doesn't have the image loaded already.
// Its possible that the debugger would still load the NGEN image sometime in the future and we will miss a sharing
// opportunity. Its an acceptable loss from an imperfect heuristic.
if (NULL == WszGetModuleHandle(szFullPathName))
#endif
{
szFullPathName = NULL;
fDebuggerLoadingNgen = false;
}
}
// If we don't have or decided not to load the NGEN image, check to see if IL image is available
if (!fDebuggerLoadingNgen)
{
szFullPathName = GetModulePath();
}
// If we are doing live debugging we shouldn't use metadata from an IL image because it doesn't match closely enough.
// In particular the RVAs for IL code headers are different between the two images which will cause all IL code and
// local var signature lookups to fail. With further work we could compensate for the RVAs by computing
// the image layout differences and adjusting the returned RVAs, but there may be other differences that need to be accounted
// for as well. If we did go that route we should do a binary diff across a variety of NGEN/IL image metadata blobs to
// get a concrete understanding of the format differences.
//
// This check should really be 'Are we OK with only getting the functionality level of mini-dump debugging?' but since we
// don't know the debugger's intent we guess whether or not we are doing dump debugging by checking if we are shimmed. Once
// the shim supports live debugging we should probably just stop automatically falling back to IL image and let the debugger
// decide via the ICorDebugMetadataLocator interface.
if(fDebuggeeLoadedNgen && !fDebuggerLoadingNgen && GetProcess()->GetShim()!=NULL)
{
// The IL image might be there, but we shouldn't use it for live debugging
return CORDBG_E_MISSING_METADATA;
}
// @dbgtodo metadata - This is really a CreateFile() call which we can't do. We must offload this to
// the data target for the dump-debugging scenarios.
//
// We're opening it as "read". If we QI for an IEmit interface (which we need for EnC),
// then the metadata engine will convert it to a "write" underneath us.
// We want "read" so that we can let the OS share the pages.
DWORD dwOpenFlags = 0;
// If we know we're never going to need to write (i.e. never do EnC), then we should indicate
// that to metadata by telling it this interface will always be read-only. By passing read-only,
// the metadata library will then also share the VM space for the image when the same image is
// opened multiple times for multiple AppDomains.
// We don't currently have a way to tell absolutely whether this module will support EnC, but we
// know that NGen modules NEVER support EnC, and NGen is the common case that eats up a lot of VM.
// So we'll use the heuristic of opening the metadata for all ngen images as read-only. Ideally
// we'd go even further here (perhaps even changing metadata to map only the region of the file it
// needs).
if (fDebuggerLoadingNgen)
{
dwOpenFlags = ofReadOnly | ofTrustedImage;
}
// This is the only place we ever validate that the file matches, because we're potentially
// loading the file from disk ourselves. We're doing this without giving the debugger a chance
// to do anything. We should never load a file that isn't an exact match.
return InitPublicMetaDataFromFile(szFullPathName, dwOpenFlags, true);
}
// We should only ever validate we have the correct file if it's a file we found ourselves.
// We allow the debugger to choose their own policy with regard to using metadata from the IL image
// when debugging an NI, or even intentionally using mismatched metadata if they like.
HRESULT CordbModule::InitPublicMetaDataFromFile(const WCHAR * pszFullPathName,
DWORD dwOpenFlags,
bool validateFileInfo)
{
#ifdef HOST_UNIX
// UNIXTODO: Some intricate details of file mapping don't work on Linux as on Windows.
// We have to revisit this and try to fix it for POSIX system.
return E_FAIL;
#else
if (validateFileInfo)
{
// Check that we've got the right file to target.
// There's nothing to prevent some other file being copied in for live, and with
// dump debugging there's nothing to say that we're not on another machine where a different
// file is at the same path.
// If we can't validate we have a hold of the correct file, we should not open it.
// We will fall back on asking the debugger to get us the correct file, or copying
// target memory back to the debugger.
DWORD dwImageTimeStamp = 0;
DWORD dwImageSize = 0;
bool isNGEN = false; // unused
StringCopyHolder filePath;
_ASSERTE(!m_vmPEFile.IsNull());
// MetaData lookup favors the NGEN image, which is what we want here.
if (!this->GetProcess()->GetDAC()->GetMetaDataFileInfoFromPEFile(m_vmPEFile,
dwImageTimeStamp,
dwImageSize,
isNGEN,
&filePath))
{
LOG((LF_CORDB,LL_WARNING, "CM::IM: Couldn't get metadata info for file \"%s\"\n", pszFullPathName));
return CORDBG_E_MISSING_METADATA;
}
// If the timestamp and size don't match, then this is the wrong file!
// Map the file and check them.
HandleHolder hMDFile = WszCreateFile(pszFullPathName,
GENERIC_READ,
FILE_SHARE_READ,
NULL, // default security descriptor
OPEN_EXISTING,
FILE_ATTRIBUTE_NORMAL,
NULL);
if (hMDFile == INVALID_HANDLE_VALUE)
{
LOG((LF_CORDB,LL_WARNING, "CM::IM: Couldn't open file \"%s\" (GLE=%x)\n", pszFullPathName, GetLastError()));
return CORDBG_E_MISSING_METADATA;
}
DWORD dwFileHigh = 0;
DWORD dwFileLow = GetFileSize(hMDFile, &dwFileHigh);
if (dwFileLow == INVALID_FILE_SIZE)
{
LOG((LF_CORDB,LL_WARNING, "CM::IM: File \"%s\" had invalid size.\n", pszFullPathName));
return CORDBG_E_MISSING_METADATA;
}
_ASSERTE(dwFileHigh == 0);
HandleHolder hMap = WszCreateFileMapping(hMDFile, NULL, PAGE_READONLY, dwFileHigh, dwFileLow, NULL);
if (hMap == NULL)
{
LOG((LF_CORDB,LL_WARNING, "CM::IM: Couldn't create mapping of file \"%s\" (GLE=%x)\n", pszFullPathName, GetLastError()));
return CORDBG_E_MISSING_METADATA;
}
MapViewHolder hMapView = MapViewOfFile(hMap, FILE_MAP_READ, 0, 0, 0);
if (hMapView == NULL)
{
LOG((LF_CORDB,LL_WARNING, "CM::IM: Couldn't map view of file \"%s\" (GLE=%x)\n", pszFullPathName, GetLastError()));
return CORDBG_E_MISSING_METADATA;
}
// Mapped as flat file, have PEDecoder go find what we want.
PEDecoder pedecoder(hMapView, (COUNT_T)dwFileLow);
if (!pedecoder.HasNTHeaders())
{
LOG((LF_CORDB,LL_WARNING, "CM::IM: \"%s\" did not have PE headers!\n", pszFullPathName));
return CORDBG_E_MISSING_METADATA;
}
if ((dwImageSize != pedecoder.GetVirtualSize()) ||
(dwImageTimeStamp != pedecoder.GetTimeDateStamp()))
{
LOG((LF_CORDB,LL_WARNING, "CM::IM: Validation of \"%s\" failed. "
"Expected size=%x, Expected timestamp=%x, Actual size=%x, Actual timestamp=%x\n",
pszFullPathName,
pedecoder.GetVirtualSize(),
pedecoder.GetTimeDateStamp(),
dwImageSize,
dwImageTimeStamp));
return CORDBG_E_MISSING_METADATA;
}
// All checks passed, go ahead and load this file for real.
}
// Get metadata Dispenser.
IMetaDataDispenserEx * pDisp = GetProcess()->GetDispenser();
HRESULT hr = pDisp->OpenScope(pszFullPathName, dwOpenFlags, IID_IMetaDataImport, (IUnknown**)&m_pIMImport);
_ASSERTE(SUCCEEDED(hr) == (m_pIMImport != NULL));
if (FAILED(hr))
{
// This should never happen in normal scenarios. It could happen if someone has renamed
// the assembly after it was opened by the debugee process, but this should be rare enough
// that we don't mind taking the perf. hit and loading from memory.
// @dbgtodo metadata - would this happen in the shadow-copy scenario?
LOG((LF_CORDB,LL_WARNING, "CM::IM: Couldn't open metadata in file \"%s\" (hr=%x)\n", pszFullPathName, hr));
}
return hr;
#endif // TARGET_UNIX
}
//---------------------------------------------------------------------------------------
// Initialize the public metadata.
//
// Arguments:
// buffer - valid buffer into target containing the metadata.
//
// Assumptions:
// This is an internal function which should only be called once to initialize the
// metadata. Future attempts to re-initialize (in dynamic cases) should call code:CordbModule::UpdatePublicMetaDataFromRemote
// After the public metadata is initialized, initialize private metadata via code:CordbModule::UpdateInternalMetaData
//
void CordbModule::InitPublicMetaData(TargetBuffer buffer)
{
CONTRACTL
{
THROWS;
}
CONTRACTL_END;
INTERNAL_API_ENTRY(this->GetProcess());
LOG((LF_CORDB,LL_INFO100000, "CM::IPM: initing with remote buffer 0x%p length 0x%x\n",
CORDB_ADDRESS_TO_PTR(buffer.pAddress), buffer.cbSize));
ULONG nMetaDataSize = buffer.cbSize;
if (nMetaDataSize == 0)
{
// We should always have metadata, and if we don't, we want to know.
// @dbgtodo metadata - we know metadata from dynamic modules doesn't work in V3
// (non-shim) cases yet.
// But our caller should already have handled that case.
SIMPLIFYING_ASSUMPTION(!"Error: missing the metadata");
return;
}
HRESULT hr = S_OK;
// Get metadata Dispenser.
IMetaDataDispenserEx * pDisp = GetProcess()->GetDispenser();
// copy it over from the remote process
CoTaskMemHolder<VOID> pMetaDataCopy;
CopyRemoteMetaData(buffer, pMetaDataCopy.GetAddr());
//
// Setup our metadata import object, m_pIMImport
//
// Save the old mode for restoration
VARIANT valueOld;
hr = pDisp->GetOption(MetaDataSetUpdate, &valueOld);
SIMPLIFYING_ASSUMPTION(!FAILED(hr));
// Set R/W mode so that we can update the metadata when
// we do EnC operations.
VARIANT valueRW;
V_VT(&valueRW) = VT_UI4;
V_I4(&valueRW) = MDUpdateFull;
hr = pDisp->SetOption(MetaDataSetUpdate, &valueRW);
SIMPLIFYING_ASSUMPTION(!FAILED(hr));
hr = pDisp->OpenScopeOnMemory(pMetaDataCopy,
nMetaDataSize,
ofTakeOwnership,
IID_IMetaDataImport,
reinterpret_cast<IUnknown**>( &m_pIMImport ));
// MetaData has taken ownership -don't free the memory
pMetaDataCopy.SuppressRelease();
// Immediately restore the old setting.
HRESULT hrRestore = pDisp->SetOption(MetaDataSetUpdate, &valueOld);
SIMPLIFYING_ASSUMPTION(!FAILED(hrRestore));
// Throw on errors.
IfFailThrow(hr);
IfFailThrow(hrRestore);
// Done!
}
//---------------------------------------------------------------------------------------
// Update public MetaData by copying it from the target and updating our IMetaDataImport object.
//
// Arguments:
// buffer - buffer into target space containing metadata blob
//
// Notes:
// Useful for additional class-loads into a dynamic module. A new class means new metadata
// and so we need to update the RS metadata to stay in sync with the left-side.
//
// This will call code:CordbModule::CopyRemoteMetaData to copy the remote buffer locally, and then
// it can OpenScopeOnMemory().
//
void CordbModule::UpdatePublicMetaDataFromRemote(TargetBuffer bufferRemoteMetaData)
{
CONTRACTL
{
// @dbgtodo metadata - think about the error semantics here. These fails during dispatching an event; so
// address this during event pipeline.
THROWS;
}
CONTRACTL_END;
if (bufferRemoteMetaData.IsEmpty())
{
ThrowHR(E_INVALIDARG);
}
INTERNAL_API_ENTRY(this->GetProcess()); //
LOG((LF_CORDB,LL_INFO100000, "CM::UPMFR: updating with remote buffer 0x%p length 0x%x\n",
CORDB_ADDRESS_TO_PTR(bufferRemoteMetaData.pAddress), bufferRemoteMetaData.cbSize));
// We're re-initializing existing metadata.
_ASSERTE(m_pIMImport != NULL);
HRESULT hr = S_OK;
ULONG dwMetaDataSize = bufferRemoteMetaData.cbSize;
// First copy it from the remote process
CoTaskMemHolder<VOID> pLocalMetaDataPtr;
CopyRemoteMetaData(bufferRemoteMetaData, pLocalMetaDataPtr.GetAddr());
IMetaDataDispenserEx * pDisp = GetProcess()->GetDispenser();
_ASSERTE(pDisp != NULL); // throws on error.
LOG((LF_CORDB,LL_INFO100000, "CM::RI: converting to new metadata\n"));
// now verify that the metadata is valid by opening a temporary scope on the memory
{
ReleaseHolder<IMetaDataImport> pIMImport;
hr = pDisp->OpenScopeOnMemory(pLocalMetaDataPtr,
dwMetaDataSize,
0,
IID_IMetaDataImport,
(IUnknown**)&pIMImport);
IfFailThrow(hr);
}
// We reopen on an existing instance, not create a new instance.
_ASSERTE(m_pIMImport != NULL); //
// Now tell our current IMetaDataImport object to re-initialize by swapping in the new memory block.
// This allows us to keep manipulating metadata objects on other threads without crashing.
// This will also invalidate an existing associated Internal MetaData.
hr = ReOpenMetaDataWithMemoryEx(m_pIMImport, pLocalMetaDataPtr, dwMetaDataSize, ofTakeOwnership );
IfFailThrow(hr);
// Success. MetaData now owns the metadata memory
pLocalMetaDataPtr.SuppressRelease();
}
//---------------------------------------------------------------------------------------
// Copy metadata memory from the remote process into a newly allocated local buffer.
//
// Arguments:
// pRemoteMetaDataPtr - pointer to remote buffer
// dwMetaDataSize - size of buffer.
// pLocalBuffer - holder to get local buffer.
//
// Returns:
// pLocalBuffer may be allocated.
// Throws on error (pLocalBuffer may contain garbage).
// Else if successful, pLocalBuffer contains local copy of metadata.
//
// Notes:
// This can copy metadata out for the dynamic case or the normal case.
// Uses an allocator (CoTaskMemHolder) that lets us hand off the memory to the metadata.
void CordbModule::CopyRemoteMetaData(
TargetBuffer buffer,
CoTaskMemHolder<VOID> * pLocalBuffer)
{
CONTRACTL
{
THROWS;
}
CONTRACTL_END;
_ASSERTE(pLocalBuffer != NULL);
_ASSERTE(!buffer.IsEmpty());
// Allocate space for the local copy of the metadata
// No need to zero out the memory since we'll fill it all here.
LPVOID pRawBuffer = CoTaskMemAlloc(buffer.cbSize);
if (pRawBuffer == NULL)
{
ThrowOutOfMemory();
}
pLocalBuffer->Assign(pRawBuffer);
// Copy the metadata from the left side
GetProcess()->SafeReadBuffer(buffer, (BYTE *)pRawBuffer);
return;
}
HRESULT CordbModule::QueryInterface(REFIID id, void **pInterface)
{
if (id == IID_ICorDebugModule)
{
*pInterface = static_cast<ICorDebugModule*>(this);
}
else if (id == IID_ICorDebugModule2)
{
*pInterface = static_cast<ICorDebugModule2*>(this);
}
else if (id == IID_ICorDebugModule3)
{
*pInterface = static_cast<ICorDebugModule3*>(this);
}
else if (id == IID_ICorDebugModule4)
{
*pInterface = static_cast<ICorDebugModule4*>(this);
}
else if (id == IID_IUnknown)
{
*pInterface = static_cast<IUnknown*>(static_cast<ICorDebugModule*>(this));
}
else
{
*pInterface = NULL;
return E_NOINTERFACE;
}
ExternalAddRef();
return S_OK;
}
HRESULT CordbModule::GetProcess(ICorDebugProcess **ppProcess)
{
PUBLIC_API_ENTRY(this);
FAIL_IF_NEUTERED(this);
VALIDATE_POINTER_TO_OBJECT(ppProcess, ICorDebugProcess **);
*ppProcess = static_cast<ICorDebugProcess*> (GetProcess());
GetProcess()->ExternalAddRef();
return S_OK;
}
HRESULT CordbModule::GetBaseAddress(CORDB_ADDRESS *pAddress)
{
PUBLIC_API_ENTRY(this);
FAIL_IF_NEUTERED(this);
VALIDATE_POINTER_TO_OBJECT(pAddress, CORDB_ADDRESS *);
*pAddress = m_PEBuffer.pAddress;
return S_OK;
}
HRESULT CordbModule::GetAssembly(ICorDebugAssembly **ppAssembly)
{
PUBLIC_API_ENTRY(this);
FAIL_IF_NEUTERED(this);
VALIDATE_POINTER_TO_OBJECT(ppAssembly, ICorDebugAssembly **);
*ppAssembly = static_cast<ICorDebugAssembly *> (m_pAssembly);
if (m_pAssembly != NULL)
{
m_pAssembly->ExternalAddRef();
}
return S_OK;
}
// Public implementation of ICorDebugModule::GetName,
// wrapper around code:GetNameWorker (which throws).
HRESULT CordbModule::GetName(ULONG32 cchName, ULONG32 *pcchName, _Out_writes_to_opt_(cchName, *pcchName) WCHAR szName[])
{
HRESULT hr = S_OK;
PUBLIC_API_BEGIN(this)
{
EX_TRY
{
hr = GetNameWorker(cchName, pcchName, szName);
}
EX_CATCH_HRESULT(hr);
// GetNameWorker can use metadata. If it fails due to missing metadata, or if we fail to find expected
// target memory (dump debugging) then we should fall back to getting the file name without metadata.
if ((hr == CORDBG_E_MISSING_METADATA) ||
(hr == CORDBG_E_READVIRTUAL_FAILURE) ||
(hr == HRESULT_FROM_WIN32(ERROR_PARTIAL_COPY)))
{
DWORD dwImageTimeStamp = 0; // unused
DWORD dwImageSize = 0; // unused
bool isNGEN = false;
StringCopyHolder filePath;
_ASSERTE(!m_vmPEFile.IsNull());
if (this->GetProcess()->GetDAC()->GetMetaDataFileInfoFromPEFile(m_vmPEFile,
dwImageTimeStamp,
dwImageSize,
isNGEN,
&filePath))
{
_ASSERTE(filePath.IsSet());
// Unfortunately, metadata lookup preferentially takes the ngen image - so in this case,
// we need to go back and get the IL image's name instead.
if ((isNGEN) &&
(this->GetProcess()->GetDAC()->GetILImageInfoFromNgenPEFile(m_vmPEFile,
dwImageTimeStamp,
dwImageSize,
&filePath)))
{
_ASSERTE(filePath.IsSet());
}
hr = CopyOutString(filePath, cchName, pcchName, szName);
}
}
}
PUBLIC_API_END(hr);
return hr;
}
//---------------------------------------------------------------------------------------
// Gets the module pretty name (may be filename or faked up name)
//
// Arguments:
// cchName - count of characters in the szName buffer on input.
// *pcchName - Optional Out parameter, which gets set to the fully requested size
// (not just how many characters are written).
// szName - buffer to get name.
//
// Returns:
// S_OK on success.
// S_FALSE if we fabricate the name.
// Return failing HR (on common errors) or Throw on exceptional errors.
//
// Note:
// Filename isn't necessarily the same as the module name in the metadata.
//
HRESULT CordbModule::GetNameWorker(ULONG32 cchName, ULONG32 *pcchName, _Out_writes_to_opt_(cchName, *pcchName) WCHAR szName[])
{
CONTRACTL
{
THROWS;
}
CONTRACTL_END;
HRESULT hr = S_OK;
const WCHAR * szTempName = NULL;
ALLOW_DATATARGET_MISSING_MEMORY(
szTempName = GetModulePath();
);
#if defined(FEATURE_DBGIPC_TRANSPORT_DI)
// To support VS when debugging remotely we act like the Compact Framework and return the assembly name
// when asked for the name of an in-memory module.
if (szTempName == NULL)
{
IMetaDataAssemblyImport *pAssemblyImport = NULL;
if (SUCCEEDED(hr = GetMetaDataImporter()->QueryInterface(IID_IMetaDataAssemblyImport, (void**)&pAssemblyImport)))
{
mdAssembly mda = TokenFromRid(1, mdtAssembly);
hr = pAssemblyImport->GetAssemblyProps(mda, // [IN] The Assembly for which to get the properties.
NULL, // [OUT] Pointer to the Originator blob.
NULL, // [OUT] Count of bytes in the Originator Blob.
NULL, // [OUT] Hash Algorithm.
szName, // [OUT] Buffer to fill with name.
cchName, // [IN] Size of buffer in wide chars.
(ULONG*)pcchName, // [OUT] Actual # of wide chars in name.
NULL, // [OUT] Assembly MetaData.
NULL); // [OUT] Flags.
pAssemblyImport->Release();
return hr;
}
// reset hr
hr = S_OK;
}
#endif // FEATURE_DBGIPC_TRANSPORT_DI
EX_TRY_ALLOW_DATATARGET_MISSING_MEMORY
{
StringCopyHolder buffer;
// If the module has no file name, then we'll fabricate a fake name
if (!szTempName)
{
// On MiniDumpNormal, if the debugger can't find the module then there's no way we will
// find metadata.
hr = HRESULT_FROM_WIN32(ERROR_PARTIAL_COPY);
// Tempting to use the metadata-scope name, but that's a regression from Whidbey. For manifest modules,
// the metadata scope name is not initialized with the string the user supplied to create the
// dynamic assembly. So we call into the runtime to use CLR heuristics to get a more accurate name.
m_pProcess->GetDAC()->GetModuleSimpleName(m_vmModule, &buffer);
_ASSERTE(buffer.IsSet());
szTempName = buffer;
// Note that we considered returning S_FALSE for fabricated names like this, but that's a breaking
// change from Whidbey that is known to trigger bugs in vS. If a debugger wants to differentiate
// real path names from fake simple names, we'll just have to add a new API with the right semantics.
}
hr = CopyOutString(szTempName, cchName, pcchName, szName);
}
EX_END_CATCH_ALLOW_DATATARGET_MISSING_MEMORY
return hr;
}
//---------------------------------------------------------------------------------------
// Gets actual name of loaded module. (no faked names)
//
// Returns:
// string for full path to module name. This is a file that can be opened.
// NULL if name is not available (such as in some dynamic module cases)
// Throws if failed accessing target
//
// Notes:
// We avoid using the method name "GetModuleFileName" because winbase.h #defines that
// token (along with many others) to have an A or W suffix.
const WCHAR * CordbModule::GetModulePath()
{
// Lazily initialize. Module filenames cannot change, and so once
// we've retrieved this successfully, it's stored for good.
if (!m_strModulePath.IsSet())
{
IDacDbiInterface * pDac = m_pProcess->GetDAC(); // throws
pDac->GetModulePath(m_vmModule, &m_strModulePath); // throws
_ASSERTE(m_strModulePath.IsSet());
}
if (m_strModulePath.IsEmpty())
{
return NULL; // module has no filename
}
return m_strModulePath;
}
//---------------------------------------------------------------------------------------
// Get and caches ngen image path.
//
// Returns:
// Null-terminated string to ngen image path.
// NULL if there is no ngen filename (eg, file is not ngenned).
// Throws on error (such as inability to read the path from the target).
//
// Notes:
// This can be used to get the path to find metadata. For ngenned images,
// the IL (and associated metadata) may not be loaded, so we may want to get the
// metadata out of the ngen image.
const WCHAR * CordbModule::GetNGenImagePath()
{
HRESULT hr = S_OK;
EX_TRY
{
// Lazily initialize. Module filenames cannot change, and so once
// we've retrieved this successfully, it's stored for good.
if (!m_strNGenImagePath.IsSet())
{
IDacDbiInterface * pDac = m_pProcess->GetDAC(); // throws
BOOL fNonEmpty = pDac->GetModuleNGenPath(m_vmModule, &m_strNGenImagePath); // throws
(void)fNonEmpty; //prevent "unused variable" error from GCC
_ASSERTE(m_strNGenImagePath.IsSet() && (m_strNGenImagePath.IsEmpty() == !fNonEmpty));
}
}
EX_CATCH_HRESULT(hr);
if (FAILED(hr) ||
m_strNGenImagePath == NULL ||
m_strNGenImagePath.IsEmpty())
{
return NULL; // module has no ngen filename
}
return m_strNGenImagePath;
}
// Implementation of ICorDebugModule::EnableJITDebugging
// See also code:CordbModule::SetJITCompilerFlags
HRESULT CordbModule::EnableJITDebugging(BOOL bTrackJITInfo, BOOL bAllowJitOpts)
{
// Leftside will enforce that this is a valid time to change jit flags.
// V1.0 behavior allowed setting these in the middle of a module's lifetime, which meant
// that different methods throughout the module may have been jitted differently.
// Since V2, this has to be set when the module is first loaded, before anything is jitted.
PUBLIC_API_ENTRY(this);
FAIL_IF_NEUTERED(this);
DWORD dwFlags = CORDEBUG_JIT_DEFAULT;
// Since V2, bTrackJITInfo is the default and cannot be turned off.
if (!bAllowJitOpts)
{
dwFlags |= CORDEBUG_JIT_DISABLE_OPTIMIZATION;
}
return SetJITCompilerFlags(dwFlags);
}
HRESULT CordbModule::EnableClassLoadCallbacks(BOOL bClassLoadCallbacks)
{
PUBLIC_API_ENTRY(this);
FAIL_IF_NEUTERED(this);
ATT_ALLOW_LIVE_DO_STOPGO(GetProcess());
// You must receive ClassLoad callbacks for dynamic modules so that we can keep the metadata up-to-date on the Right
// Side. Therefore, we refuse to turn them off for all dynamic modules (they were forced on when the module was
// loaded on the Left Side.)
if (m_fDynamic && !bClassLoadCallbacks)
return E_INVALIDARG;
if (m_vmDomainAssembly.IsNull())
return E_UNEXPECTED;
// Send a Set Class Load Flag event to the left side. There is no need to wait for a response, and this can be
// called whether or not the process is synchronized.
CordbProcess *pProcess = GetProcess();
DebuggerIPCEvent event;
pProcess->InitIPCEvent(&event,
DB_IPCE_SET_CLASS_LOAD_FLAG,
false,
(GetAppDomain()->GetADToken()));
event.SetClassLoad.vmDomainAssembly = this->m_vmDomainAssembly;
event.SetClassLoad.flag = (bClassLoadCallbacks == TRUE);
HRESULT hr = pProcess->m_cordb->SendIPCEvent(pProcess, &event,
sizeof(DebuggerIPCEvent));
hr = WORST_HR(hr, event.hr);
return hr;
}
//-----------------------------------------------------------------------------
// Public implementation of ICorDebugModule::GetFunctionFromToken
// Get the CordbFunction matches this token / module pair.
// Each time a function is Enc-ed, it gets its own CordbFunction object.
// This will return the latest EnC version of the function for this Module,Token pair.
HRESULT CordbModule::GetFunctionFromToken(mdMethodDef token,
ICorDebugFunction **ppFunction)
{
// This is not reentrant. DBI should call code:CordbModule::LookupOrCreateFunctionLatestVersion instead.
PUBLIC_API_ENTRY(this);
ATT_ALLOW_LIVE_DO_STOPGO(GetProcess()); // @todo - can this be RequiredStop?
FAIL_IF_NEUTERED(this);
VALIDATE_POINTER_TO_OBJECT(ppFunction, ICorDebugFunction **);
HRESULT hr = S_OK;
EX_TRY
{
RSLockHolder lockHolder(GetProcess()->GetProcessLock());
// Check token is valid.
if ((token == mdMethodDefNil) ||
(TypeFromToken(token) != mdtMethodDef) ||
(!GetMetaDataImporter()->IsValidToken(token)))
{
ThrowHR(E_INVALIDARG);
}
CordbFunction * pFunction = LookupOrCreateFunctionLatestVersion(token);
*ppFunction = static_cast<ICorDebugFunction*> (pFunction);
pFunction->ExternalAddRef();
}
EX_CATCH_HRESULT(hr);
return hr;
}
HRESULT CordbModule::GetFunctionFromRVA(CORDB_ADDRESS rva,
ICorDebugFunction **ppFunction)
{
PUBLIC_API_ENTRY(this);
FAIL_IF_NEUTERED(this);
VALIDATE_POINTER_TO_OBJECT(ppFunction, ICorDebugFunction **);
return E_NOTIMPL;
}
HRESULT CordbModule::LookupClassByToken(mdTypeDef token,
CordbClass **ppClass)
{
INTERNAL_API_ENTRY(this->GetProcess()); //
FAIL_IF_NEUTERED(this);
HRESULT hr = S_OK;
EX_TRY // @dbgtodo exceptions - push this up
{
*ppClass = NULL;
if ((token == mdTypeDefNil) || (TypeFromToken(token) != mdtTypeDef))
{
ThrowHR(E_INVALIDARG);
}
RSLockHolder lockHolder(GetProcess()->GetProcessLock()); // @dbgtodo synchronization - Push this up
CordbClass *pClass = m_classes.GetBase(token);
if (pClass == NULL)
{
// Validate the token.
if (!GetMetaDataImporter()->IsValidToken(token))
{
ThrowHR(E_INVALIDARG);
}
RSInitHolder<CordbClass> pClassInit(new CordbClass(this, token));
pClass = pClassInit.TransferOwnershipToHash(&m_classes);
}
*ppClass = pClass;
}
EX_CATCH_HRESULT(hr);
return hr;
}
HRESULT CordbModule::GetClassFromToken(mdTypeDef token,
ICorDebugClass **ppClass)
{
PUBLIC_API_ENTRY(this);
FAIL_IF_NEUTERED(this);
ATT_ALLOW_LIVE_DO_STOPGO(this->GetProcess()); // @todo - could this be RequiredStopped?
VALIDATE_POINTER_TO_OBJECT(ppClass, ICorDebugClass **);
HRESULT hr = S_OK;
EX_TRY
{
CordbClass *pClass = NULL;
*ppClass = NULL;
// Validate the token.
if (!GetMetaDataImporter()->IsValidToken(token))
{
ThrowHR(E_INVALIDARG);
}
hr = LookupClassByToken(token, &pClass);
IfFailThrow(hr);
*ppClass = static_cast<ICorDebugClass*> (pClass);
pClass->ExternalAddRef();
}
EX_CATCH_HRESULT(hr);
return hr;
}
HRESULT CordbModule::CreateBreakpoint(ICorDebugModuleBreakpoint **ppBreakpoint)
{
PUBLIC_API_ENTRY(this);
FAIL_IF_NEUTERED(this);
VALIDATE_POINTER_TO_OBJECT(ppBreakpoint, ICorDebugModuleBreakpoint **);
return E_NOTIMPL;
}
//
// Return the token for the Module table entry for this object. The token
// may then be passed to the meta data import api's.
//
HRESULT CordbModule::GetToken(mdModule *pToken)
{
PUBLIC_API_ENTRY(this);
FAIL_IF_NEUTERED(this);
VALIDATE_POINTER_TO_OBJECT(pToken, mdModule *);
HRESULT hr = S_OK;
EX_TRY
{
hr = GetMetaDataImporter()->GetModuleFromScope(pToken);
IfFailThrow(hr);
}
EX_CATCH_HRESULT(hr);
return hr;
}
// public implementation for ICorDebugModule::GetMetaDataInterface
// Return a meta data interface pointer that can be used to examine the
// meta data for this module.
HRESULT CordbModule::GetMetaDataInterface(REFIID riid, IUnknown **ppObj)
{
PUBLIC_API_ENTRY(this);
FAIL_IF_NEUTERED(this);
VALIDATE_POINTER_TO_OBJECT(ppObj, IUnknown **);
HRESULT hr = S_OK;
EX_TRY
{
// QI the importer that we already have and return the result.
hr = GetMetaDataImporter()->QueryInterface(riid, (void**)ppObj);
IfFailThrow(hr);
}
EX_CATCH_HRESULT(hr);
return hr;
}
//-----------------------------------------------------------------------------
// LookupFunctionLatestVersion finds the latest cached version of an existing CordbFunction
// in the given module. If the function doesn't exist, it returns NULL.
//
// Arguments:
// funcMetaDataToken - methoddef token for function to lookup
//
//
// Notes:
// If no CordbFunction instance was cached, then this returns NULL.
// use code:CordbModule::LookupOrCreateFunctionLatestVersion to do a lookup that will
// populate the cache if needed.
CordbFunction* CordbModule::LookupFunctionLatestVersion(mdMethodDef funcMetaDataToken)
{
INTERNAL_API_ENTRY(this);
return m_functions.GetBase(funcMetaDataToken);
}
//-----------------------------------------------------------------------------
// Lookup (or create) the CordbFunction for the latest EnC version.
//
// Arguments:
// funcMetaDataToken - methoddef token for function to lookup
//
// Returns:
// CordbFunction instance for that token. This will create an instance if needed, and so never returns null.
// Throws on critical error.
//
// Notes:
// This creates the latest EnC version. Use code:CordbModule::LookupOrCreateFunction to do an
// enc-version aware function lookup.
//
CordbFunction* CordbModule::LookupOrCreateFunctionLatestVersion(mdMethodDef funcMetaDataToken)
{
INTERNAL_API_ENTRY(this);
CordbFunction * pFunction = m_functions.GetBase(funcMetaDataToken);
if (pFunction != NULL)
{
return pFunction;
}
// EnC adds each version to the hash. So if the hash lookup fails, then it must not be an EnC case,
// and so we can use the default version number.
return CreateFunction(funcMetaDataToken, CorDB_DEFAULT_ENC_FUNCTION_VERSION);
}
//-----------------------------------------------------------------------------
// LookupOrCreateFunction finds an existing version of CordbFunction in the given module.
// If the function doesn't exist, it creates it.
//
// The outgoing function is not yet fully inititalized. For eg, the Class field is not set.
// However, ICorDebugFunction::GetClass() will check that and lazily initialize the field.
//
// Throws on error.
//
CordbFunction * CordbModule::LookupOrCreateFunction(mdMethodDef funcMetaDataToken, SIZE_T enCVersion)
{
INTERNAL_API_ENTRY(this);
_ASSERTE(GetProcess()->ThreadHoldsProcessLock());
CordbFunction * pFunction = m_functions.GetBase(funcMetaDataToken);
// special case non-existance as need to add to the hash table too
if (pFunction == NULL)
{
// EnC adds each version to the hash. So if the hash lookup fails,
// then it must not be an EnC case.
return CreateFunction(funcMetaDataToken, enCVersion);
}
// linked list sorted with most recent version at front. Version numbers correspond
// to actual edit count against the module, so version numbers not necessarily contiguous.
// Any valid EnC version must already exist as we would have created it on the ApplyChanges
for (CordbFunction *pf=pFunction; pf != NULL; pf = pf->GetPrevVersion())
{
if (pf->GetEnCVersionNumber() == enCVersion)
{
return pf;
}
}
_ASSERTE(!"Couldn't find EnC version of function\n");
ThrowHR(E_FAIL);
}
HRESULT CordbModule::IsDynamic(BOOL *pDynamic)
{
PUBLIC_API_ENTRY(this);
FAIL_IF_NEUTERED(this);
VALIDATE_POINTER_TO_OBJECT(pDynamic, BOOL *);
(*pDynamic) = m_fDynamic;
return S_OK;
}
BOOL CordbModule::IsDynamic()
{
return m_fDynamic;
}
HRESULT CordbModule::IsInMemory(BOOL *pInMemory)
{
PUBLIC_API_ENTRY(this);
FAIL_IF_NEUTERED(this);
VALIDATE_POINTER_TO_OBJECT(pInMemory, BOOL *);
(*pInMemory) = m_fInMemory;
return S_OK;
}
HRESULT CordbModule::GetGlobalVariableValue(mdFieldDef fieldDef,
ICorDebugValue **ppValue)
{
PUBLIC_API_ENTRY(this);
FAIL_IF_NEUTERED(this);
VALIDATE_POINTER_TO_OBJECT(ppValue, ICorDebugValue **);
ATT_REQUIRE_STOPPED_MAY_FAIL(this->GetProcess());
HRESULT hr = S_OK;
EX_TRY
{
if (m_pClass == NULL)
{
CordbClass * pGlobalClass = NULL;
hr = LookupClassByToken(COR_GLOBAL_PARENT_TOKEN, &pGlobalClass);
IfFailThrow(hr);
m_pClass.Assign(pGlobalClass);
_ASSERTE(m_pClass != NULL);
}
hr = m_pClass->GetStaticFieldValue(fieldDef, NULL, ppValue);
IfFailThrow(hr);
}
EX_CATCH_HRESULT(hr);
return hr;
}
//
// CreateFunction creates a new function from the given information and
// adds it to the module.
//
CordbFunction * CordbModule::CreateFunction(mdMethodDef funcMetaDataToken, SIZE_T enCVersion)
{
INTERNAL_API_ENTRY(this);
// In EnC cases, the token may not yet be valid. We may be caching the CordbFunction
// for a token for an added method before the metadata is updated on the RS.
// We rely that our caller has done token validation.
// Create a new CordbFunction object or throw.
RSInitHolder<CordbFunction> pFunction(new CordbFunction(this, funcMetaDataToken, enCVersion)); // throws
CordbFunction * pCopy = pFunction.TransferOwnershipToHash(&m_functions);
return pCopy;
}
#ifdef EnC_SUPPORTED
//---------------------------------------------------------------------------------------
//
// Creates a new CordbFunction object to represent this new version of a function and
// updates the module's function collection to mark this as the latest version.
//
// Arguments:
// funcMetaDataToken - the functions methodDef token in this module
// enCVerison - The new version number of this function
// ppFunction - Output param for the new instance - optional
//
// Assumptions:
// Assumes the specified version of this function doesn't already exist (i.e. enCVersion
// is newer than all existing versions).
//
HRESULT CordbModule::UpdateFunction(mdMethodDef funcMetaDataToken,
SIZE_T enCVersion,
CordbFunction** ppFunction)
{
INTERNAL_API_ENTRY(this);
if (ppFunction)
*ppFunction = NULL;
_ASSERTE(funcMetaDataToken);
RSLockHolder lockHolder(GetProcess()->GetProcessLock());
// pOldVersion is the 2nd newest version
CordbFunction* pOldVersion = LookupFunctionLatestVersion(funcMetaDataToken);
// if don't have an old version, then create a default versioned one as will most likely
// go looking for it later and easier to put it in now than have code to insert it later.
if (!pOldVersion)
{
LOG((LF_ENC, LL_INFO10000, "CM::UF: adding %8.8x with version %d\n", funcMetaDataToken, enCVersion));
HRESULT hr = S_OK;
EX_TRY
{
pOldVersion = CreateFunction(funcMetaDataToken, CorDB_DEFAULT_ENC_FUNCTION_VERSION);
}
EX_CATCH_HRESULT(hr);
if (FAILED(hr))
{
return hr;
}
}
// This method should not be called for versions that already exist
_ASSERTE( enCVersion > pOldVersion->GetEnCVersionNumber());
LOG((LF_ENC, LL_INFO10000, "CM::UF: updating %8.8x with version %d\n", funcMetaDataToken, enCVersion));
// Create a new function object.
CordbFunction * pNewVersion = new (nothrow) CordbFunction(this, funcMetaDataToken, enCVersion);
if (pNewVersion == NULL)
return E_OUTOFMEMORY;
// Chain the 2nd most recent version onto this instance (this will internal addref).
pNewVersion->SetPrevVersion(pOldVersion);
// Add the function to the Module's hash of all functions.
HRESULT hr = m_functions.SwapBase(pOldVersion, pNewVersion);
if (FAILED(hr))
{
delete pNewVersion;
return hr;
}
// Do cleanup for function which is no longer the latest version
pNewVersion->GetPrevVersion()->MakeOld();
if (ppFunction)
*ppFunction = pNewVersion;
return hr;
}
#endif // EnC_SUPPORTED
HRESULT CordbModule::LookupOrCreateClass(mdTypeDef classMetaDataToken,CordbClass** ppClass)
{
INTERNAL_API_ENTRY(this);
FAIL_IF_NEUTERED(this);
RSLockHolder lockHolder(GetProcess()->GetProcessLock()); // @dbgtodo exceptions synchronization-
// Push this lock up, convert to exceptions.
HRESULT hr = S_OK;
*ppClass = LookupClass(classMetaDataToken);
if (*ppClass == NULL)
{
hr = CreateClass(classMetaDataToken,ppClass);
if (!SUCCEEDED(hr))
{
return hr;
}
_ASSERTE(*ppClass != NULL);
}
return hr;
}
//
// LookupClass finds an existing CordbClass in the given module.
// If the class doesn't exist, it returns NULL.
//
CordbClass* CordbModule::LookupClass(mdTypeDef classMetaDataToken)
{
INTERNAL_API_ENTRY(this);
_ASSERTE(GetProcess()->ThreadHoldsProcessLock());
return m_classes.GetBase(classMetaDataToken);
}
//
// CreateClass creates a new class from the given information and
// adds it to the module.
//
HRESULT CordbModule::CreateClass(mdTypeDef classMetaDataToken,
CordbClass** ppClass)
{
INTERNAL_API_ENTRY(this);
FAIL_IF_NEUTERED(this);
_ASSERTE(GetProcess()->ThreadHoldsProcessLock());
CordbClass* pClass = new (nothrow) CordbClass(this, classMetaDataToken);
if (pClass == NULL)
return E_OUTOFMEMORY;
HRESULT hr = m_classes.AddBase(pClass);
if (SUCCEEDED(hr))
{
*ppClass = pClass;
if (classMetaDataToken == COR_GLOBAL_PARENT_TOKEN)
{
_ASSERTE( m_pClass == NULL ); //redundant create
m_pClass.Assign(pClass);
}
}
else
{
delete pClass;
}
return hr;
}
// Resolve a type-ref from this module to a CordbClass
//
// Arguments:
// token - a Type Ref in this module's scope.
// ppClass - out parameter to get the class we resolve to.
//
// Returns:
// S_OK on success.
// CORDBG_E_CLASS_NOT_LOADED is the TypeRef is not yet resolved because the type it will refer
// to is not yet loaded.
//
// Notes:
// In general, a TypeRef refers to a type in another module. (Although as a corner case, it could
// refer to this module too). This resolves a TypeRef within the current module's scope to a
// (TypeDef, metadata scope), which is in turn encapsulated as a CordbClass.
//
// A TypeRef has a resolution scope (ModuleRef or AssemblyRef) and string name for the type
// within that scope. Resolving means:
// 1. Determining the actual metadata scope loaded for the resolution scope.
// See also code:CordbModule::ResolveAssemblyInternal
// If the resolved module hasn't been loaded yet, the resolution will fail.
// 2. Doing a string lookup of the TypeRef's name within that resolved scope to find the TypeDef.
// 3. Returning the (resolved scope, TypeDef) pair.
//
HRESULT CordbModule::ResolveTypeRef(mdTypeRef token, CordbClass **ppClass)
{
FAIL_IF_NEUTERED(this);
INTERNAL_SYNC_API_ENTRY(GetProcess()); //
CordbProcess * pProcess = GetProcess();
_ASSERTE((pProcess->GetShim() == NULL) || pProcess->GetSynchronized());
if ((token == mdTypeRefNil) || (TypeFromToken(token) != mdtTypeRef))
{
return E_INVALIDARG;
}
if (m_vmDomainAssembly.IsNull() || m_pAppDomain == NULL)
{
return E_UNEXPECTED;
}
HRESULT hr = S_OK;
*ppClass = NULL;
EX_TRY
{
TypeRefData inData = {m_vmDomainAssembly, token};
TypeRefData outData;
{
RSLockHolder lockHolder(pProcess->GetProcessLock());
pProcess->GetDAC()->ResolveTypeReference(&inData, &outData);
}
CordbModule * pModule = m_pAppDomain->LookupOrCreateModule(outData.vmDomainAssembly);
IfFailThrow(pModule->LookupClassByToken(outData.typeToken, ppClass));
}
EX_CATCH_HRESULT(hr);
return hr;
} // CordbModule::ResolveTypeRef
// Resolve a type ref or def to a CordbClass
//
// Arguments:
// token - a mdTypeDef or mdTypeRef in this module's scope to be resolved
// ppClass - out parameter to get the CordbClass for this type
//
// Notes:
// See code:CordbModule::ResolveTypeRef for more details.
HRESULT CordbModule::ResolveTypeRefOrDef(mdToken token, CordbClass **ppClass)
{
FAIL_IF_NEUTERED(this);
INTERNAL_SYNC_API_ENTRY(this->GetProcess()); //
if ((token == mdTypeRefNil) ||
(TypeFromToken(token) != mdtTypeRef && TypeFromToken(token) != mdtTypeDef))
return E_INVALIDARG;
if (TypeFromToken(token)==mdtTypeRef)
{
// It's a type-ref. That means the type is defined in another module.
// That other module is determined at runtime by Fusion / Loader policy. So we need to
// ultimately ask the runtime which module was actually loaded.
return ( ResolveTypeRef(token, ppClass) );
}
else
{
// It's a type-def. This is the easy case because the type is defined in this same module.
return ( LookupClassByToken(token, ppClass) );
}
}
//
// GetSize returns the size of the module.
//
HRESULT CordbModule::GetSize(ULONG32 *pcBytes)
{
PUBLIC_API_ENTRY(this);
FAIL_IF_NEUTERED(this);
VALIDATE_POINTER_TO_OBJECT(pcBytes, ULONG32 *);
*pcBytes = m_PEBuffer.cbSize;
return S_OK;
}
CordbAssembly *CordbModule::GetCordbAssembly()
{
INTERNAL_API_ENTRY(this);
return m_pAssembly;
}
// This is legacy from the aborted V1 EnC attempt - not used in V2 EnC support
HRESULT CordbModule::GetEditAndContinueSnapshot(
ICorDebugEditAndContinueSnapshot **ppEditAndContinueSnapshot)
{
return E_NOTIMPL;
}
//---------------------------------------------------------------------------------------
//
// Requests that an edit be applied to the module for edit and continue and updates
// the right-side state and metadata.
//
// Arguments:
// cbMetaData - number of bytes in pbMetaData
// pbMetaData - a delta metadata blob describing the metadata edits to be made
// cbIL - number of bytes in pbIL
// pbIL - a new method body stream containing all of the method body information
// (IL, EH info, etc) for edited and added methods.
//
// Return Value:
// S_OK on success, various errors on failure
//
// Notes:
//
//
// This applies the same changes to the RS's copy of the metadata that the left-side will apply to
// it's copy of the metadata. see code:EditAndContinueModule::ApplyEditAndContinue
//
HRESULT CordbModule::ApplyChanges(ULONG cbMetaData,
BYTE pbMetaData[],
ULONG cbIL,
BYTE pbIL[])
{
PUBLIC_API_ENTRY(this);
FAIL_IF_NEUTERED(this);
ATT_REQUIRE_STOPPED_MAY_FAIL(GetProcess());
#ifdef FEATURE_ENC_SUPPORTED
// We enable EnC back in code:CordbModule::SetJITCompilerFlags.
// If EnC isn't enabled, then we'll fail in the LS when we try to ApplyChanges.
// We'd expect a well-behaved debugger to never actually land here.
LOG((LF_CORDB,LL_INFO10000, "CP::AC: applying changes"));
VALIDATE_POINTER_TO_OBJECT_ARRAY(pbMetaData,
BYTE,
cbMetaData,
true,
true);
VALIDATE_POINTER_TO_OBJECT_ARRAY(pbIL,
BYTE,
cbIL,
true,
true);
HRESULT hr;
RSExtSmartPtr<IUnknown> pUnk;
RSExtSmartPtr<IMDInternalImport> pMDImport;
RSExtSmartPtr<IMDInternalImport> pMDImport2;
//
// Edit was successful - update the right-side state to reflect the edit
//
++m_EnCCount;
// apply the changes to our copy of the metadata
_ASSERTE(m_pIMImport != NULL); // must have metadata at this point in EnC
IfFailGo(m_pIMImport->QueryInterface(IID_IUnknown, (void**)&pUnk));
IfFailGo(GetMDInternalInterfaceFromPublic(pUnk, IID_IMDInternalImport,
(void **)&pMDImport));
// The left-side will call this same method on its copy of the metadata.
hr = pMDImport->ApplyEditAndContinue(pbMetaData, cbMetaData, &pMDImport2);
if (pMDImport2 != NULL)
{
// ApplyEditAndContinue() expects IMDInternalImport**, but we give it RSExtSmartPtr<IMDInternalImport>
// Silent cast of RSExtSmartPtr to IMDInternalImport* leads to assignment of a raw pointer
// without calling AddRef(), thus we need to do it manually.
// @todo - ApplyEditAndContinue should probably AddRef the out parameter.
pMDImport2->AddRef();
}
IfFailGo(hr);
// We're about to get a new importer object, so release the old one.
m_pIMImport.Clear();
IfFailGo(GetMDPublicInterfaceFromInternal(pMDImport2, IID_IMetaDataImport, (void **)&m_pIMImport));
// set the new RVA value
// Send the delta over to the debugee and request that it apply the edit
IfFailGo( ApplyChangesInternal(cbMetaData, pbMetaData, cbIL, pbIL) );
EX_TRY
{
m_pInternalMetaDataImport.Clear();
UpdateInternalMetaData();
}
EX_CATCH_HRESULT(hr);
_ASSERTE(SUCCEEDED(hr));
ErrExit:
// MetaData interface pointers will be automatically released via SmartPtr dtors.
// @todo : prevent further execution of program
return hr;
#else
return E_NOTIMPL;
#endif
}
//---------------------------------------------------------------------------------------
//
// Requests that an edit be applied to the module for edit and continue and updates
// some right-side state, but does not update our copy of the metadata.
//
// Arguments:
// cbMetaData - number of bytes in pbMetaData
// pbMetaData - a delta metadata blob describing the metadata edits to be made
// cbIL - number of bytes in pbIL
// pbIL - a new method body stream containing all of the method body information
// (IL, EH info, etc) for edited and added methods.
//
// Return Value:
// S_OK on success, various errors on failure
//
HRESULT CordbModule::ApplyChangesInternal(ULONG cbMetaData,
BYTE pbMetaData[],
ULONG cbIL,
BYTE pbIL[])
{
CONTRACTL
{
NOTHROW;
}
CONTRACTL_END;
LOG((LF_ENC,LL_INFO100, "CordbProcess::ApplyChangesInternal\n"));
FAIL_IF_NEUTERED(this);
INTERNAL_SYNC_API_ENTRY(this->GetProcess()); //
if (m_vmDomainAssembly.IsNull())
return E_UNEXPECTED;
#ifdef FEATURE_ENC_SUPPORTED
HRESULT hr;
void * pRemoteBuf = NULL;
EX_TRY
{
// Create and initialize the event as synchronous
// We'll be sending a NULL appdomain pointer since the individual modules
// will contains pointers to their respective A.D.s
DebuggerIPCEvent event;
GetProcess()->InitIPCEvent(&event, DB_IPCE_APPLY_CHANGES, false, VMPTR_AppDomain::NullPtr());
event.ApplyChanges.vmDomainAssembly = this->m_vmDomainAssembly;
// Have the left-side create a buffer for us to store the delta into
ULONG cbSize = cbMetaData+cbIL;
TargetBuffer tbFull = GetProcess()->GetRemoteBuffer(cbSize);
pRemoteBuf = CORDB_ADDRESS_TO_PTR(tbFull.pAddress);
TargetBuffer tbMetaData = tbFull.SubBuffer(0, cbMetaData); // 1st half
TargetBuffer tbIL = tbFull.SubBuffer(cbMetaData); // 2nd half
// Copy the delta metadata over to the debugee
GetProcess()->SafeWriteBuffer(tbMetaData, pbMetaData); // throws
GetProcess()->SafeWriteBuffer(tbIL, pbIL); // throws
// Send a synchronous event requesting the debugee apply the edit
event.ApplyChanges.pDeltaMetadata = tbMetaData.pAddress;
event.ApplyChanges.cbDeltaMetadata = tbMetaData.cbSize;
event.ApplyChanges.pDeltaIL = tbIL.pAddress;
event.ApplyChanges.cbDeltaIL = tbIL.cbSize;
LOG((LF_ENC,LL_INFO100, "CordbProcess::ApplyChangesInternal sending event\n"));
hr = GetProcess()->SendIPCEvent(&event, sizeof(event));
hr = WORST_HR(hr, event.hr);
IfFailThrow(hr);
// Allocate space for the return event.
// We always copy over the whole buffer size which is bigger than sizeof(DebuggerIPCEvent)
// This seems ugly, in this case we know the exact size of the event we want to read
// why copy over all the extra data?
DebuggerIPCEvent *retEvent = (DebuggerIPCEvent *) _alloca(CorDBIPC_BUFFER_SIZE);
{
//
// Wait for events to return from the RC. We expect zero or more add field,
// add function or update function events and one completion event.
//
while (TRUE)
{
hr = GetProcess()->m_cordb->WaitForIPCEventFromProcess(GetProcess(),
GetAppDomain(),
retEvent);
IfFailThrow(hr);
if (retEvent->type == DB_IPCE_APPLY_CHANGES_RESULT)
{
// Done receiving update events
hr = retEvent->ApplyChangesResult.hr;
LOG((LF_CORDB, LL_INFO1000, "[%x] RCET::DRCE: EnC apply changes result %8.8x.\n", hr));
break;
}
_ASSERTE(retEvent->type == DB_IPCE_ENC_UPDATE_FUNCTION ||
retEvent->type == DB_IPCE_ENC_ADD_FUNCTION ||
retEvent->type == DB_IPCE_ENC_ADD_FIELD);
LOG((LF_CORDB, LL_INFO1000, "[%x] RCET::DRCE: EnC %s %8.8x to version %d.\n",
GetCurrentThreadId(),
retEvent->type == DB_IPCE_ENC_UPDATE_FUNCTION ? "Update function" :
retEvent->type == DB_IPCE_ENC_ADD_FUNCTION ? "Add function" : "Add field",
retEvent->EnCUpdate.memberMetadataToken, retEvent->EnCUpdate.newVersionNumber));
CordbAppDomain *pAppDomain = GetAppDomain();
_ASSERTE(NULL != pAppDomain);
CordbModule* pModule = NULL;
pModule = pAppDomain->LookupOrCreateModule(retEvent->EnCUpdate.vmDomainAssembly); // throws
_ASSERTE(pModule != NULL);
// update to the newest version
if (retEvent->type == DB_IPCE_ENC_UPDATE_FUNCTION ||
retEvent->type == DB_IPCE_ENC_ADD_FUNCTION)
{
// Update the function collection to reflect this edit
hr = pModule->UpdateFunction(retEvent->EnCUpdate.memberMetadataToken, retEvent->EnCUpdate.newVersionNumber, NULL);
}
// mark the class and relevant type as old so we update it next time we try to query it
if (retEvent->type == DB_IPCE_ENC_ADD_FUNCTION ||
retEvent->type == DB_IPCE_ENC_ADD_FIELD)
{
RSLockHolder lockHolder(GetProcess()->GetProcessLock()); // @dbgtodo synchronization - push this up
CordbClass* pClass = pModule->LookupClass(retEvent->EnCUpdate.classMetadataToken);
// if don't find class, that is fine because it hasn't been loaded yet so doesn't
// need to be updated
if (pClass)
{
pClass->MakeOld();
}
}
}
}
LOG((LF_ENC,LL_INFO100, "CordbProcess::ApplyChangesInternal complete.\n"));
}
EX_CATCH_HRESULT(hr);
// process may have gone away by the time we get here so don't assume is there.
CordbProcess *pProcess = GetProcess();
if (pProcess)
{
HRESULT hr2 = pProcess->ReleaseRemoteBuffer(&pRemoteBuf);
TESTANDRETURNHR(hr2);
}
return hr;
#else // FEATURE_ENC_SUPPORTED
return E_NOTIMPL;
#endif // FEATURE_ENC_SUPPORTED
}
// Set the JMC status for the entire module.
// All methods specified in others[] will have jmc status !fIsUserCode
// All other methods will have jmc status fIsUserCode.
HRESULT CordbModule::SetJMCStatus(
BOOL fIsUserCode,
ULONG32 cOthers,
mdToken others[])
{
PUBLIC_API_ENTRY(this);
FAIL_IF_NEUTERED(this);
ATT_REQUIRE_STOPPED_MAY_FAIL(GetProcess());
if (m_vmDomainAssembly.IsNull())
return E_UNEXPECTED;
// @todo -allow the other parameters. These are functions that have default status
// opposite of fIsUserCode.
if (cOthers != 0)
{
_ASSERTE(!"not yet impl for cOthers != 0");
return E_NOTIMPL;
}
// Send event to the LS.
CordbProcess* pProcess = this->GetProcess();
_ASSERTE(pProcess != NULL);
// Tell the LS that this module is/is not user code
DebuggerIPCEvent event;
pProcess->InitIPCEvent(&event, DB_IPCE_SET_MODULE_JMC_STATUS, true, this->GetAppDomain()->GetADToken());
event.SetJMCFunctionStatus.vmDomainAssembly = m_vmDomainAssembly;
event.SetJMCFunctionStatus.dwStatus = fIsUserCode;
// Note: two-way event here...
HRESULT hr = pProcess->m_cordb->SendIPCEvent(pProcess, &event, sizeof(DebuggerIPCEvent));
// Stop now if we can't even send the event.
if (!SUCCEEDED(hr))
{
LOG((LF_CORDB, LL_INFO10, "CordbModule::SetJMCStatus failed 0x%08x...\n", hr));
return hr;
}
_ASSERTE(event.type == DB_IPCE_SET_MODULE_JMC_STATUS_RESULT);
LOG((LF_CORDB, LL_INFO10, "returning from CordbModule::SetJMCStatus 0x%08x...\n", hr));
return event.hr;
}
//
// Resolve an assembly given an AssemblyRef token. Note that
// this will not trigger the loading of assembly. If assembly is not yet loaded,
// this will return an CORDBG_E_CANNOT_RESOLVE_ASSEMBLY error
//
HRESULT CordbModule::ResolveAssembly(mdToken tkAssemblyRef,
ICorDebugAssembly **ppAssembly)
{
PUBLIC_API_ENTRY(this);
FAIL_IF_NEUTERED(this);
ATT_REQUIRE_STOPPED_MAY_FAIL(this->GetProcess());
if(ppAssembly)
{
*ppAssembly = NULL;
}
HRESULT hr = S_OK;
EX_TRY
{
CordbAssembly *pCordbAsm = ResolveAssemblyInternal(tkAssemblyRef);
if (pCordbAsm == NULL)
{
// Don't throw here. It's a common-case failure path and not exceptional.
hr = CORDBG_E_CANNOT_RESOLVE_ASSEMBLY;
}
else if(ppAssembly)
{
_ASSERTE(pCordbAsm != NULL);
*ppAssembly = pCordbAsm;
pCordbAsm->ExternalAddRef();
}
}
EX_CATCH_HRESULT(hr);
return hr;
}
//---------------------------------------------------------------------------------------
// Worker to resolve an assembly ref.
//
// Arguments:
// tkAssemblyRef - token of assembly ref to resolve
//
// Returns:
// Assembly that this token resolves to.
// NULL if it's a valid token but the assembly has not yet been resolved.
// (This is a non-exceptional error case).
//
// Notes:
// MetaData has tokens to represent a reference to another assembly.
// But Loader/Fusion policy ultimately decides which specific assembly is actually loaded
// for that token.
// This does the lookup of actual assembly and reports back to the debugger.
CordbAssembly * CordbModule::ResolveAssemblyInternal(mdToken tkAssemblyRef)
{
INTERNAL_SYNC_API_ENTRY(GetProcess()); //
if (TypeFromToken(tkAssemblyRef) != mdtAssemblyRef || tkAssemblyRef == mdAssemblyRefNil)
{
// Not a valid token
ThrowHR(E_INVALIDARG);
}
CordbAssembly * pAssembly = NULL;
if (!m_vmDomainAssembly.IsNull())
{
// Get DAC to do the real work to resolve the assembly
VMPTR_DomainAssembly vmDomainAssembly = GetProcess()->GetDAC()->ResolveAssembly(m_vmDomainAssembly, tkAssemblyRef);
// now find the ICorDebugAssembly corresponding to it
if (!vmDomainAssembly.IsNull() && m_pAppDomain != NULL)
{
RSLockHolder lockHolder(GetProcess()->GetProcessLock());
// Don't throw here because if the lookup fails, we want to throw CORDBG_E_CANNOT_RESOLVE_ASSEMBLY.
pAssembly = m_pAppDomain->LookupOrCreateAssembly(vmDomainAssembly);
}
}
return pAssembly;
}
//
// CreateReaderForInMemorySymbols - create an ISymUnmanagedReader object for symbols
// which are loaded into memory in the CLR. See interface definition in cordebug.idl for
// details.
//
HRESULT CordbModule::CreateReaderForInMemorySymbols(REFIID riid, void** ppObj)
{
PUBLIC_API_ENTRY(this);
FAIL_IF_NEUTERED(this);
CordbProcess *pProcess = GetProcess();
ATT_REQUIRE_STOPPED_MAY_FAIL(pProcess);
HRESULT hr = S_OK;
EX_TRY
{
// Get the symbol memory in a stream to give to the reader.
ReleaseHolder<IStream> pStream;
IDacDbiInterface::SymbolFormat symFormat = GetInMemorySymbolStream(&pStream);
// First create the symbol binder corresponding to the format of the stream
ReleaseHolder<ISymUnmanagedBinder> pBinder;
if (symFormat == IDacDbiInterface::kSymbolFormatPDB)
{
#ifndef TARGET_UNIX
// PDB format - use diasymreader.dll with COM activation
InlineSString<_MAX_PATH> ssBuf;
IfFailThrow(GetClrModuleDirectory(ssBuf));
IfFailThrow(FakeCoCreateInstanceEx(CLSID_CorSymBinder_SxS,
ssBuf.GetUnicode(),
IID_ISymUnmanagedBinder,
(void**)&pBinder,
NULL));
#else
IfFailThrow(FakeCoCreateInstance(CLSID_CorSymBinder_SxS,
IID_ISymUnmanagedBinder,
(void**)&pBinder));
#endif
}
else
{
// No in-memory symbols, return the appropriate error
_ASSERTE(symFormat == IDacDbiInterface::kSymbolFormatNone);
if (m_fDynamic || m_fInMemory)
{
// This is indeed an in-memory or dynamic module, we just don't have any symbols for it.
// This means the application didn't supply any, or they are not yet available. Symbols
// first become available at LoadClass time for dynamic modules and UpdateModuleSymbols
// time for non-dynamic in-memory modules.
ThrowHR(CORDBG_E_SYMBOLS_NOT_AVAILABLE);
}
// This module is on disk - the debugger should use it's normal symbol-loading logic.
ThrowHR(CORDBG_E_MODULE_LOADED_FROM_DISK);
}
// In the attach or dump case, if we attach or take the dump after we have defined a dynamic module, we may
// have already set the symbol format to "PDB" by the time we call CreateReaderForInMemorySymbols during initialization
// for loaded modules. (In the launch case, we do this initialization when the module is actually loaded, and before we
// set the symbol format.) When we call CreateReaderForInMemorySymbols, we can't assume the initialization was already
// performed or specifically, that we already have m_pIMImport initialized. We can't call into diasymreader with a NULL
// pointer as the value for m_pIMImport, so we need to check that here.
if (m_pIMImport == NULL)
{
ThrowHR(CORDBG_E_SYMBOLS_NOT_AVAILABLE);
}
// Now create the symbol reader from the data
ReleaseHolder<ISymUnmanagedReader> pReader;
IfFailThrow(pBinder->GetReaderFromStream(m_pIMImport, pStream, &pReader));
// Attempt to return the interface requested
// Note that this does an AddRef for our return value ppObj, so we don't suppress the release
// of the pReader holder.
IfFailThrow(pReader->QueryInterface(riid, ppObj));
}
EX_CATCH_HRESULT(hr);
return hr;
}
/* ------------------------------------------------------------------------- *
* Class class
* ------------------------------------------------------------------------- */
//---------------------------------------------------------------------------------------
// Set the continue counter that marks when the module is in its Load event
//
// Notes:
// Jit flags can only be changed in the real module Load event. We may
// have multiple module load events on different threads coming at the
// same time. So each module load tracks its continue counter.
//
// This can be used by code:CordbModule::EnsureModuleIsInLoadCallback to
// properly return CORDBG_E_MUST_BE_IN_LOAD_MODULE
void CordbModule::SetLoadEventContinueMarker()
{
// Well behaved targets should only set this once.
GetProcess()->TargetConsistencyCheck(m_nLoadEventContinueCounter == 0);
m_nLoadEventContinueCounter = GetProcess()->m_continueCounter;
}
//---------------------------------------------------------------------------------------
// Return CORDBG_E_MUST_BE_IN_LOAD_MODULE if the module is not in the load module callback.
//
// Notes:
// The comparison is done via continue counters. The counter of the load
// event is cached via code:CordbModule::SetLoadEventContinueMarker.
//
// This state is currently stored on the RS. Alternatively, it could likely be retreived from the LS state as
// well. One disadvantage of the current model is that if we detach during the load-module callback and
// then reattach, the RS state is flushed and we lose the fact that we can toggle the jit flags.
HRESULT CordbModule::EnsureModuleIsInLoadCallback()
{
if (this->m_nLoadEventContinueCounter < GetProcess()->m_continueCounter)
{
return CORDBG_E_MUST_BE_IN_LOAD_MODULE;
}
else
{
return S_OK;
}
}
// Implementation of ICorDebugModule2::SetJITCompilerFlags
// See also code:CordbModule::EnableJITDebugging
HRESULT CordbModule::SetJITCompilerFlags(DWORD dwFlags)
{
PUBLIC_REENTRANT_API_ENTRY(this);
FAIL_IF_NEUTERED(this);
CordbProcess *pProcess = GetProcess();
ATT_REQUIRE_STOPPED_MAY_FAIL(pProcess);
HRESULT hr = S_OK;
EX_TRY
{
// can't have a subset of these, eg 0x101, so make sure we have an exact match
if ((dwFlags != CORDEBUG_JIT_DEFAULT) &&
(dwFlags != CORDEBUG_JIT_DISABLE_OPTIMIZATION) &&
(dwFlags != CORDEBUG_JIT_ENABLE_ENC))
{
hr = E_INVALIDARG;
}
else
{
BOOL fAllowJitOpts = ((dwFlags & CORDEBUG_JIT_DISABLE_OPTIMIZATION) != CORDEBUG_JIT_DISABLE_OPTIMIZATION);
BOOL fEnableEnC = ((dwFlags & CORDEBUG_JIT_ENABLE_ENC) == CORDEBUG_JIT_ENABLE_ENC);
// Can only change jit flags when module is first loaded and before there's any jitted code.
// This ensures all code in the module is jitted the same way.
hr = EnsureModuleIsInLoadCallback();
if (SUCCEEDED(hr))
{
// DD interface will check if it's a valid time to change the flags.
hr = pProcess->GetDAC()->SetCompilerFlags(GetRuntimeDomainAssembly(), fAllowJitOpts, fEnableEnC);
}
}
}
EX_CATCH_HRESULT(hr);
// emulate v2 hresults
if (GetProcess()->GetShim() != NULL)
{
// Emulate Whidbey error hresults
hr = GetProcess()->GetShim()->FilterSetJitFlagsHresult(hr);
}
return hr;
}
// Implementation of ICorDebugModule2::GetJitCompilerFlags
HRESULT CordbModule::GetJITCompilerFlags(DWORD *pdwFlags )
{
PUBLIC_REENTRANT_API_ENTRY(this);
FAIL_IF_NEUTERED(this);
VALIDATE_POINTER_TO_OBJECT(pdwFlags, DWORD*);
*pdwFlags = CORDEBUG_JIT_DEFAULT;;
CordbProcess *pProcess = GetProcess();
ATT_REQUIRE_STOPPED_MAY_FAIL(pProcess);
HRESULT hr = S_OK;
EX_TRY
{
BOOL fAllowJitOpts;
BOOL fEnableEnC;
pProcess->GetDAC()->GetCompilerFlags (
GetRuntimeDomainAssembly(),
&fAllowJitOpts,
&fEnableEnC);
if (fEnableEnC)
{
*pdwFlags = CORDEBUG_JIT_ENABLE_ENC;
}
else if (! fAllowJitOpts)
{
*pdwFlags = CORDEBUG_JIT_DISABLE_OPTIMIZATION;
}
}
EX_CATCH_HRESULT(hr);
return hr;
}
HRESULT CordbModule::IsMappedLayout(BOOL *isMapped)
{
PUBLIC_API_ENTRY(this);
VALIDATE_POINTER_TO_OBJECT(isMapped, BOOL*);
FAIL_IF_NEUTERED(this);
HRESULT hr = S_OK;
*isMapped = FALSE;
CordbProcess *pProcess = GetProcess();
ATT_REQUIRE_STOPPED_MAY_FAIL(pProcess);
EX_TRY
{
hr = pProcess->GetDAC()->IsModuleMapped(m_vmModule, isMapped);
}
EX_CATCH_HRESULT(hr);
return hr;
}
/* ------------------------------------------------------------------------- *
* CordbCode class
* ------------------------------------------------------------------------- */
//-----------------------------------------------------------------------------
// CordbCode constructor
// Arguments:
// Input:
// pFunction - CordbFunction instance for this function
// encVersion - Edit and Continue version number for this code chunk
// fIsIL - indicates whether the instance is a CordbILCode (as
// opposed to a CordbNativeCode)
// id - This is the hashtable key for CordbCode objects
// - for native code, the code start address
// - for IL code, 0
// - for ReJit IL code, the remote pointer to the ReJitSharedInfo
// Output:
// fields of the CordbCode instance have been initialized
//-----------------------------------------------------------------------------
CordbCode::CordbCode(CordbFunction * pFunction, UINT_PTR id, SIZE_T encVersion, BOOL fIsIL)
: CordbBase(pFunction->GetProcess(), id, enumCordbCode),
m_fIsIL(fIsIL),
m_nVersion(encVersion),
m_rgbCode(NULL),
m_continueCounterLastSync(0),
m_pFunction(pFunction)
{
_ASSERTE(pFunction != NULL);
_ASSERTE(m_nVersion >= CorDB_DEFAULT_ENC_FUNCTION_VERSION);
} // CordbCode::CordbCode
//-----------------------------------------------------------------------------
// Destructor for CordbCode object
//-----------------------------------------------------------------------------
CordbCode::~CordbCode()
{
_ASSERTE(IsNeutered());
}
//-----------------------------------------------------------------------------
// Neutered by CordbFunction
// See CordbBase::Neuter for neuter semantics.
//-----------------------------------------------------------------------------
void CordbCode::Neuter()
{
m_pFunction = NULL;
delete [] m_rgbCode;
m_rgbCode = NULL;
CordbBase::Neuter();
}
//-----------------------------------------------------------------------------
// Public method for IUnknown::QueryInterface.
// Has standard QI semantics.
//-----------------------------------------------------------------------------
HRESULT CordbCode::QueryInterface(REFIID id, void ** pInterface)
{
if (id == IID_ICorDebugCode)
{
*pInterface = static_cast<ICorDebugCode*>(this);
}
else if (id == IID_IUnknown)
{
*pInterface = static_cast<IUnknown *>(static_cast<ICorDebugCode *>(this));
}
else
{
*pInterface = NULL;
return E_NOINTERFACE;
}
ExternalAddRef();
return S_OK;
}
//-----------------------------------------------------------------------------
// NOT IMPLEMENTED. Remap sequence points are entirely private to the LS,
// and ICorDebug will dispatch a RemapOpportunity callback to notify the
// debugger instead of letting the debugger query for the points.
//
// Returns: E_NOTIMPL
//-----------------------------------------------------------------------------
HRESULT CordbCode::GetEnCRemapSequencePoints(ULONG32 cMap, ULONG32 * pcMap, ULONG32 offsets[])
{
FAIL_IF_NEUTERED(this);
VALIDATE_POINTER_TO_OBJECT_OR_NULL(pcMap, ULONG32*);
VALIDATE_POINTER_TO_OBJECT_ARRAY_OR_NULL(offsets, ULONG32*, cMap, true, true);
//
// Old EnC interface - deprecated
//
return E_NOTIMPL;
} // CordbCode::GetEnCRemapSequencePoints
//-----------------------------------------------------------------------------
// CordbCode::IsIL
// Public method to determine if this Code object represents IL or native code.
//
// Parameters:
// pbIL - OUT: on return, set to True if IL code, else False.
//
// Returns:
// S_OK on success.
//-----------------------------------------------------------------------------
HRESULT CordbCode::IsIL(BOOL *pbIL)
{
PUBLIC_API_ENTRY(this);
FAIL_IF_NEUTERED(this);
VALIDATE_POINTER_TO_OBJECT(pbIL, BOOL *);
*pbIL = IsIL();
return S_OK;
}
//-----------------------------------------------------------------------------
// CordbCode::GetFunction
// Public method to get the Function object associated with this Code object.
// Function:Code = 1:1 for IL, and 1:n for Native. So there is always a single
// unique Function object to return.
//
// Parameters:
// ppFunction - OUT: returns the Function object for this Code.
//
// Returns:
// S_OK - on success.
//-----------------------------------------------------------------------------
HRESULT CordbCode::GetFunction(ICorDebugFunction **ppFunction)
{
PUBLIC_API_ENTRY(this);
FAIL_IF_NEUTERED(this);
VALIDATE_POINTER_TO_OBJECT(ppFunction, ICorDebugFunction **);
*ppFunction = static_cast<ICorDebugFunction*> (m_pFunction);
m_pFunction->ExternalAddRef();
return S_OK;
}
//-----------------------------------------------------------------------------
// CordbCode::GetSize
// Get the size of the code in bytes. If this is IL code, it will be bytes of IL.
// If this is native code, it will be bytes of native code.
//
// Parameters:
// pcBytes - OUT: on return, set to the size of the code in bytes.
//
// Returns:
// S_OK on success.
//-----------------------------------------------------------------------------
HRESULT CordbCode::GetSize(ULONG32 *pcBytes)
{
PUBLIC_REENTRANT_API_ENTRY(this);
FAIL_IF_NEUTERED(this);
VALIDATE_POINTER_TO_OBJECT(pcBytes, ULONG32 *);
*pcBytes = GetSize();
return S_OK;
}
//-----------------------------------------------------------------------------
// CordbCode::CreateBreakpoint
// public method to create a breakpoint in the code.
//
// Parameters:
// offset - offset in bytes to set the breakpoint at. If this is a Native
// code object (IsIl == false), then units are bytes of native code. If
// this is an IL code object, then units are bytes of IL code.
// ppBreakpoint- out-parameter to hold newly created breakpoint object.
//
// Return value:
// S_OK iff *ppBreakpoint is set. Else some error.
//-----------------------------------------------------------------------------
HRESULT CordbCode::CreateBreakpoint(ULONG32 offset,
ICorDebugFunctionBreakpoint **ppBreakpoint)
{
PUBLIC_REENTRANT_API_ENTRY(this);
FAIL_IF_NEUTERED(this);
VALIDATE_POINTER_TO_OBJECT(ppBreakpoint, ICorDebugFunctionBreakpoint **);
HRESULT hr;
ULONG32 size = GetSize();
BOOL offsetIsIl = IsIL();
LOG((LF_CORDB, LL_INFO10000, "CCode::CreateBreakpoint, offset=%d, size=%d, IsIl=%d, this=0x%p\n",
offset, size, offsetIsIl, this));
// Make sure the offset is within range of the method.
// If we're native code, then both offset & total code size are bytes of native code,
// else they're both bytes of IL.
if (offset >= size)
{
return CORDBG_E_UNABLE_TO_SET_BREAKPOINT;
}
CordbFunctionBreakpoint *bp = new (nothrow) CordbFunctionBreakpoint(this, offset, offsetIsIl);
if (bp == NULL)
return E_OUTOFMEMORY;
hr = bp->Activate(TRUE);
if (SUCCEEDED(hr))
{
*ppBreakpoint = static_cast<ICorDebugFunctionBreakpoint*> (bp);
bp->ExternalAddRef();
return S_OK;
}
else
{
delete bp;
return hr;
}
}
//-----------------------------------------------------------------------------
// CordbCode::GetCode
// Public method to get the code-bytes for this Code object. For an IL-code
// object, this will be bytes of IL. For a native-code object, this will be
// bytes of native opcodes.
// The units of the offsets are the same as the units on the CordbCode object.
// (eg, IL offsets for an IL code object, and native offsets for a native code object)
// This will glue together hot + cold regions into a single blob.
//
// Units are also logical (aka linear) values, which
// Parameters:
// startOffset - linear offset in Code to start copying from.
// endOffset - linear offset in Code to end copying from. Total bytes copied would be (endOffset - startOffset)
// cBufferAlloc - number of bytes in the buffer supplied by the buffer[] parameter.
// buffer - caller allocated storage to copy bytes into.
// pcBufferSize - required out-parameter, holds number of bytes copied into buffer.
//
// Returns:
// S_OK if copy successful. Else error.
//-----------------------------------------------------------------------------
HRESULT CordbCode::GetCode(ULONG32 startOffset,
ULONG32 endOffset,
ULONG32 cBufferAlloc,
BYTE buffer[],
ULONG32 *pcBufferSize)
{
PUBLIC_REENTRANT_API_ENTRY(this);
FAIL_IF_NEUTERED(this);
VALIDATE_POINTER_TO_OBJECT_ARRAY(buffer, BYTE, cBufferAlloc, true, true);
VALIDATE_POINTER_TO_OBJECT(pcBufferSize, ULONG32 *);
LOG((LF_CORDB,LL_EVERYTHING, "CC::GC: for token:0x%x\n", m_pFunction->GetMetadataToken()));
ATT_REQUIRE_STOPPED_MAY_FAIL(GetProcess());
HRESULT hr = S_OK;
*pcBufferSize = 0;
// Check ranges.
ULONG32 totalSize = GetSize();
if (cBufferAlloc < endOffset - startOffset)
endOffset = startOffset + cBufferAlloc;
if (endOffset > totalSize)
endOffset = totalSize;
if (startOffset > totalSize)
startOffset = totalSize;
// Check the continue counter since WriteMemory bumps it up.
if ((m_rgbCode == NULL) ||
(m_continueCounterLastSync < GetProcess()->m_continueCounter))
{
ReadCodeBytes();
m_continueCounterLastSync = GetProcess()->m_continueCounter;
}
// if we just got the code, we'll have to copy it over
if (*pcBufferSize == 0 && m_rgbCode != NULL)
{
memcpy(buffer,
m_rgbCode+startOffset,
endOffset - startOffset);
*pcBufferSize = endOffset - startOffset;
}
return hr;
} // CordbCode::GetCode
#include "dbgipcevents.h"
//-----------------------------------------------------------------------------
// CordbCode::GetVersionNumber
// Public method to get the EnC version number of the code.
//
// Parameters:
// nVersion - OUT: on return, set to the version number.
//
// Returns:
// S_OK on success.
//-----------------------------------------------------------------------------
HRESULT CordbCode::GetVersionNumber( ULONG32 *nVersion)
{
PUBLIC_API_ENTRY(this);
FAIL_IF_NEUTERED(this);
VALIDATE_POINTER_TO_OBJECT(nVersion, ULONG32 *);
LOG((LF_CORDB,LL_INFO10000,"R:CC:GVN:Returning 0x%x "
"as version\n",m_nVersion));
*nVersion = (ULONG32)m_nVersion;
#ifndef EnC_SUPPORTED
_ASSERTE(*nVersion == 1);
#endif // EnC_SUPPORTED
return S_OK;
}
// get the CordbFunction instance for this code object
CordbFunction * CordbCode::GetFunction()
{
_ASSERTE(m_pFunction != NULL);
return m_pFunction;
}
/* ------------------------------------------------------------------------- *
* CordbILCode class
* ------------------------------------------------------------------------- */
//-----------------------------------------------------------------------------
// CordbILCode ctor to make IL code.
// Arguments:
// Input:
// pFunction - pointer to the CordbFunction instance for this function
// codeRegionInfo - starting address and size in bytes of IL code blob
// nVersion - EnC version number for this IL code blob
// localVarSigToken - LocalVarSig for this IL blob
// id - the key when using ILCode in a CordbHashTable
// Output:
// fields of this instance of CordbILCode have been initialized
//-----------------------------------------------------------------------------
CordbILCode::CordbILCode(CordbFunction * pFunction,
TargetBuffer codeRegionInfo,
SIZE_T nVersion,
mdSignature localVarSigToken,
UINT_PTR id)
: CordbCode(pFunction, id, nVersion, TRUE),
#ifdef EnC_SUPPORTED
m_fIsOld(FALSE),
#endif
m_codeRegionInfo(codeRegionInfo),
m_localVarSigToken(localVarSigToken)
{
} // CordbILCode::CordbILCode
#ifdef EnC_SUPPORTED
//-----------------------------------------------------------------------------
// CordbILCode::MakeOld
// Internal method to perform any cleanup necessary when a code blob is no longer
// the most current.
//-----------------------------------------------------------------------------
void CordbILCode::MakeOld()
{
m_fIsOld = TRUE;
}
#endif
//-----------------------------------------------------------------------------
// CordbILCode::GetAddress
// Public method to get the Entry address for the code. This is the address
// where the method first starts executing.
//
// Parameters:
// pStart - out-parameter to hold start address.
//
// Returns:
// S_OK if *pStart is properly updated.
//-----------------------------------------------------------------------------
HRESULT CordbILCode::GetAddress(CORDB_ADDRESS * pStart)
{
PUBLIC_REENTRANT_API_ENTRY(this);
FAIL_IF_NEUTERED(this);
VALIDATE_POINTER_TO_OBJECT(pStart, CORDB_ADDRESS *);
_ASSERTE(this != NULL);
_ASSERTE(this->GetFunction() != NULL);
_ASSERTE(this->GetFunction()->GetModule() != NULL);
_ASSERTE(this->GetFunction()->GetModule()->GetProcess() == GetProcess());
*pStart = (m_codeRegionInfo.pAddress);
return S_OK;
} // CordbILCode::GetAddress
//-----------------------------------------------------------------------------
// CordbILCode::ReadCodeBytes
// Reads the actual bytes of IL code into the data member m_rgbCode
// Arguments:
// none (uses data members)
// Return value:
// standard HRESULT values
// also allocates and initializes m_rgbCode
// Notes: assumes that the caller has checked to ensure that m_rgbCode doesn't
// hold valid data
//-----------------------------------------------------------------------------
HRESULT CordbILCode::ReadCodeBytes()
{
HRESULT hr = S_OK;
EX_TRY
{
// We have an address & size, so we'll just call ReadMemory.
// This will conveniently strip out any patches too.
CORDB_ADDRESS pStart = m_codeRegionInfo.pAddress;
ULONG32 cbSize = (ULONG32) m_codeRegionInfo.cbSize;
delete [] m_rgbCode;
m_rgbCode = new BYTE[cbSize]; // throws
SIZE_T cbRead;
hr = GetProcess()->ReadMemory(pStart, cbSize, m_rgbCode, &cbRead);
IfFailThrow(hr);
SIMPLIFYING_ASSUMPTION(cbRead == cbSize);
}
EX_CATCH_HRESULT(hr);
return hr;
} // CordbILCode::ReadCodeBytes
//-----------------------------------------------------------------------------
// CordbILCode::GetILToNativeMapping
// Public method (implements ICorDebugCode) to get the IL-->{ Native Start, Native End} mapping.
// Since 1 CordbILCode can map to multiple CordbNativeCode due to generics, we cannot reliably return the
// mapping information in all cases. So we always fail with CORDBG_E_NON_NATIVE_FRAME. The caller should
// call code:CordbNativeCode::GetILToNativeMapping instead.
//
// Parameters:
// cMap - size of incoming map[] array (in elements).
// pcMap - OUT: full size of IL-->Native map (in elements).
// map - caller allocated array to be filled in.
//
// Returns:
// CORDBG_E_NON_NATIVE_FRAME in all cases
//-----------------------------------------------------------------------------
HRESULT CordbILCode::GetILToNativeMapping(ULONG32 cMap,
ULONG32 * pcMap,
COR_DEBUG_IL_TO_NATIVE_MAP map[])
{
PUBLIC_API_ENTRY(this);
FAIL_IF_NEUTERED(this);
VALIDATE_POINTER_TO_OBJECT_OR_NULL(pcMap, ULONG32 *);
VALIDATE_POINTER_TO_OBJECT_ARRAY_OR_NULL(map, COR_DEBUG_IL_TO_NATIVE_MAP *, cMap, true, true);
ATT_REQUIRE_STOPPED_MAY_FAIL(GetProcess());
return CORDBG_E_NON_NATIVE_FRAME;
} // CordbILCode::GetILToNativeMapping
/*
* CordbILCode::GetLocalVarSig
*
* Get the method's local variable metadata signature. This may be cached, but for dynamic modules we'll always
* read it from the metadata. This function also returns the count of local variables in the method.
*
* Parameters:
* pLocalSigParser - OUT: the local variable signature for the method.
* pLocalCount - OUT: the number of locals the method has.
*
* Returns:
* HRESULT for success or failure.
*
*/
HRESULT CordbILCode::GetLocalVarSig(SigParser *pLocalSigParser,
ULONG *pLocalVarCount)
{
INTERNAL_SYNC_API_ENTRY(GetProcess());
CONTRACTL // @dbgtodo exceptions - convert to throws...
{
NOTHROW;
}
CONTRACTL_END;
FAIL_IF_NEUTERED(this);
HRESULT hr = S_OK;
// A function will not have a local var sig if it has no locals!
if (m_localVarSigToken != mdSignatureNil)
{
PCCOR_SIGNATURE localSignature = NULL;
ULONG size = 0;
EX_TRY // // @dbgtodo exceptions - push this up
{
GetFunction()->GetModule()->UpdateMetaDataCacheIfNeeded(m_localVarSigToken);
hr = GetFunction()->GetModule()->GetMetaDataImporter()->GetSigFromToken(m_localVarSigToken,
&localSignature,
&size);
}
EX_CATCH_HRESULT(hr);
if (FAILED(hr))
{
LOG((LF_CORDB, LL_WARNING, "CICF::GLVS caught hr=0x%x\n", hr));
}
IfFailRet(hr);
LOG((LF_CORDB, LL_INFO100000, "CIC::GLVS creating sig parser sig=0x%x size=0x%x\n", localSignature, size));
SigParser sigParser = SigParser(localSignature, size);
uint32_t data;
IfFailRet(sigParser.GetCallingConvInfo(&data));
_ASSERTE(data == IMAGE_CEE_CS_CALLCONV_LOCAL_SIG);
// Snagg the count of locals in the sig.
uint32_t localCount;
IfFailRet(sigParser.GetData(&localCount));
LOG((LF_CORDB, LL_INFO100000, "CIC::GLVS localCount=0x%x\n", localCount));
if (pLocalSigParser != NULL)
{
*pLocalSigParser = sigParser;
}
if (pLocalVarCount != NULL)
{
*pLocalVarCount = localCount;
}
}
else
{
//
// Signature is Nil, so fill in everything with NULLs and zeros
//
if (pLocalSigParser != NULL)
{
*pLocalSigParser = SigParser(NULL, 0);
}
if (pLocalVarCount != NULL)
{
*pLocalVarCount = 0;
}
}
LOG((LF_CORDB, LL_INFO100000, "CIC::GLVS returning hr=0x%x\n", hr));
return hr;
}
//-----------------------------------------------------------------------------
// CordbILCode::GetLocalVariableType
// Internal method. Return the type of an IL local, specified by 0-based index.
//
// Parameters:
// dwIndex - 0-based index for IL local number.
// inst - instantiation information if this is a generic function. Eg,
// if function is List<T>, inst describes T.
// res - out parameter, yields to CordbType of the local.
//
// Return:
// S_OK on success.
//
HRESULT CordbILCode::GetLocalVariableType(DWORD dwIndex,
const Instantiation * pInst,
CordbType ** ppResultType)
{
ATT_ALLOW_LIVE_DO_STOPGO(GetProcess());
LOG((LF_CORDB, LL_INFO10000, "CIC::GLVT dwIndex=0x%x pInst=0x%p\n", dwIndex, pInst));
HRESULT hr = S_OK;
EX_TRY
{
// Get the local variable signature.
SigParser sigParser;
ULONG cLocals;
IfFailThrow(GetLocalVarSig(&sigParser, &cLocals));
// Check the index.
if (dwIndex >= cLocals)
{
ThrowHR(E_INVALIDARG);
}
// Run the signature and find the required argument.
for (unsigned int i = 0; i < dwIndex; i++)
{
LOG((LF_CORDB, LL_INFO10000, "CIC::GLVT scanning index 0x%x\n", dwIndex));
IfFailThrow(sigParser.SkipExactlyOne());
}
hr = CordbType::SigToType(GetFunction()->GetModule(), &sigParser, pInst, ppResultType);
LOG((LF_CORDB, LL_INFO10000, "CIC::GLVT CT::SigToType returned hr=0x%x\n", hr));
IfFailThrow(hr);
} EX_CATCH_HRESULT(hr);
return hr;
}
mdSignature CordbILCode::GetLocalVarSigToken()
{
return m_localVarSigToken;
}
HRESULT CordbILCode::CreateNativeBreakpoint(ICorDebugFunctionBreakpoint **ppBreakpoint)
{
FAIL_IF_NEUTERED(this);
VALIDATE_POINTER_TO_OBJECT(ppBreakpoint, ICorDebugFunctionBreakpoint **);
HRESULT hr;
ULONG32 size = GetSize();
LOG((LF_CORDB, LL_INFO10000, "CordbILCode::CreateNativeBreakpoint, size=%d, this=0x%p\n",
size, this));
ULONG32 offset = 0;
CordbFunctionBreakpoint *bp = new (nothrow) CordbFunctionBreakpoint(this, offset, FALSE);
if (bp == NULL)
{
return E_OUTOFMEMORY;
}
hr = bp->Activate(TRUE);
if (SUCCEEDED(hr))
{
*ppBreakpoint = static_cast<ICorDebugFunctionBreakpoint*> (bp);
bp->ExternalAddRef();
return S_OK;
}
else
{
delete bp;
return hr;
}
}
CordbReJitILCode::CordbReJitILCode(CordbFunction *pFunction, SIZE_T encVersion, VMPTR_ILCodeVersionNode vmILCodeVersionNode) :
CordbILCode(pFunction, TargetBuffer(), encVersion, mdSignatureNil, VmPtrToCookie(vmILCodeVersionNode)),
m_cClauses(0),
m_cbLocalIL(0),
m_cILMap(0)
{
_ASSERTE(!vmILCodeVersionNode.IsNull());
DacSharedReJitInfo data = { 0 };
IfFailThrow(GetProcess()->GetDAC()->GetILCodeVersionNodeData(vmILCodeVersionNode, &data));
IfFailThrow(Init(&data));
}
//-----------------------------------------------------------------------------
// CordbReJitILCode::Init
//
// Returns:
// S_OK if all fields are inited. Else error.
HRESULT CordbReJitILCode::Init(DacSharedReJitInfo* pSharedReJitInfo)
{
HRESULT hr = S_OK;
// Instrumented IL map
if (pSharedReJitInfo->m_cInstrumentedMapEntries)
{
if (pSharedReJitInfo->m_cInstrumentedMapEntries > 100000)
return CORDBG_E_TARGET_INCONSISTENT;
m_cILMap = pSharedReJitInfo->m_cInstrumentedMapEntries;
m_pILMap = new (nothrow)COR_IL_MAP[m_cILMap];
TargetBuffer mapBuffer(pSharedReJitInfo->m_rgInstrumentedMapEntries, m_cILMap*sizeof(COR_IL_MAP));
IfFailRet(GetProcess()->SafeReadBuffer(mapBuffer, (BYTE*)m_pILMap.GetValue(), FALSE /* bThrowOnError */));
}
// Read the method's IL header
CORDB_ADDRESS pIlHeader = pSharedReJitInfo->m_pbIL;
IMAGE_COR_ILMETHOD_FAT header = { 0 };
bool headerMustBeTiny = false;
ULONG32 headerSize = 0;
hr = GetProcess()->SafeReadStruct(pIlHeader, &header);
if (hr != S_OK)
{
// Its possible the header is tiny and there isn't enough memory to read a complete
// FAT header
headerMustBeTiny = true;
IfFailRet(GetProcess()->SafeReadStruct(pIlHeader, (IMAGE_COR_ILMETHOD_TINY *)&header));
}
// Read the ILCodeSize and LocalVarSigTok from header
ULONG32 ilCodeSize = 0;
IMAGE_COR_ILMETHOD_TINY *pMethodTinyHeader = (IMAGE_COR_ILMETHOD_TINY *)&header;
bool isTinyHeader = ((pMethodTinyHeader->Flags_CodeSize & (CorILMethod_FormatMask >> 1)) == CorILMethod_TinyFormat);
if (isTinyHeader)
{
ilCodeSize = (((unsigned)pMethodTinyHeader->Flags_CodeSize) >> (CorILMethod_FormatShift - 1));
headerSize = sizeof(IMAGE_COR_ILMETHOD_TINY);
m_localVarSigToken = mdSignatureNil;
}
else if (headerMustBeTiny)
{
// header was not CorILMethod_TinyFormat
// this is not possible, must be an error when reading from data target
return CORDBG_E_READVIRTUAL_FAILURE;
}
else
{
ilCodeSize = header.CodeSize;
headerSize = header.Size * 4;
m_localVarSigToken = header.LocalVarSigTok;
}
if (ilCodeSize == 0 || ilCodeSize > 100000)
{
return CORDBG_E_TARGET_INCONSISTENT;
}
m_codeRegionInfo.Init(pIlHeader + headerSize, ilCodeSize);
m_pLocalIL = new (nothrow) BYTE[ilCodeSize];
if (m_pLocalIL == NULL)
return E_OUTOFMEMORY;
m_cbLocalIL = ilCodeSize;
IfFailRet(GetProcess()->SafeReadBuffer(m_codeRegionInfo, m_pLocalIL, FALSE /*throwOnError*/));
// Check if this il code has exception clauses
if ((pMethodTinyHeader->Flags_CodeSize & CorILMethod_MoreSects) == 0)
{
return S_OK; // no EH, done initing
}
// EH section starts at the 4 byte aligned address after the code
CORDB_ADDRESS ehClauseHeader = ((pIlHeader + headerSize + ilCodeSize - 1) & ~3) + 4;
BYTE kind = 0;
IfFailRet(GetProcess()->SafeReadStruct(ehClauseHeader, &kind));
if ((kind & CorILMethod_Sect_KindMask) != CorILMethod_Sect_EHTable)
{
return S_OK;
}
if (kind & CorILMethod_Sect_FatFormat)
{
// Read the section header to see how many clauses there are
IMAGE_COR_ILMETHOD_SECT_FAT sectionHeader = { 0 };
IfFailRet(GetProcess()->SafeReadStruct(ehClauseHeader, §ionHeader));
m_cClauses = (sectionHeader.DataSize - 4) / sizeof(IMAGE_COR_ILMETHOD_SECT_EH_CLAUSE_FAT);
if (m_cClauses > 10000) // sanity check the data before allocating
{
return CORDBG_E_TARGET_INCONSISTENT;
}
// Read in the clauses
TargetBuffer buffer(ehClauseHeader + sizeof(IMAGE_COR_ILMETHOD_SECT_FAT), m_cClauses*sizeof(IMAGE_COR_ILMETHOD_SECT_EH_CLAUSE_FAT));
NewArrayHolder<IMAGE_COR_ILMETHOD_SECT_EH_CLAUSE_FAT> pClauses = new (nothrow)IMAGE_COR_ILMETHOD_SECT_EH_CLAUSE_FAT[m_cClauses];
if (pClauses == NULL)
return E_OUTOFMEMORY;
IfFailRet(GetProcess()->SafeReadBuffer(buffer, (BYTE*)pClauses.GetValue(), FALSE /*throwOnError*/));
// convert clauses
m_pClauses = new (nothrow)CorDebugEHClause[m_cClauses];
if (m_pClauses == NULL)
return E_OUTOFMEMORY;
for (ULONG32 i = 0; i < m_cClauses; i++)
{
BOOL isFilter = ((pClauses[i].Flags & COR_ILEXCEPTION_CLAUSE_FILTER) != 0);
m_pClauses[i].Flags = pClauses[i].Flags;
m_pClauses[i].TryOffset = pClauses[i].TryOffset;
m_pClauses[i].TryLength = pClauses[i].TryLength;
m_pClauses[i].HandlerOffset = pClauses[i].HandlerOffset;
m_pClauses[i].HandlerLength = pClauses[i].HandlerLength;
// these two fields are a union in the image, but are seperate in the struct ICorDebug returns
m_pClauses[i].ClassToken = isFilter ? 0 : pClauses[i].ClassToken;
m_pClauses[i].FilterOffset = isFilter ? pClauses[i].FilterOffset : 0;
}
}
else
{
// Read in the section header to see how many small clauses there are
IMAGE_COR_ILMETHOD_SECT_SMALL sectionHeader = { 0 };
IfFailRet(GetProcess()->SafeReadStruct(ehClauseHeader, §ionHeader));
ULONG32 m_cClauses = (sectionHeader.DataSize - 4) / sizeof(IMAGE_COR_ILMETHOD_SECT_SMALL);
if (m_cClauses > 10000) // sanity check the data before allocating
{
return CORDBG_E_TARGET_INCONSISTENT;
}
// Read in the clauses
TargetBuffer buffer(ehClauseHeader + sizeof(IMAGE_COR_ILMETHOD_SECT_SMALL), m_cClauses*sizeof(IMAGE_COR_ILMETHOD_SECT_EH_CLAUSE_SMALL));
NewArrayHolder<IMAGE_COR_ILMETHOD_SECT_EH_CLAUSE_SMALL> pClauses = new (nothrow)IMAGE_COR_ILMETHOD_SECT_EH_CLAUSE_SMALL[m_cClauses];
if (pClauses == NULL)
return E_OUTOFMEMORY;
IfFailRet(GetProcess()->SafeReadBuffer(buffer, (BYTE*)pClauses.GetValue(), FALSE /*throwOnError*/));
// convert clauses
m_pClauses = new (nothrow)CorDebugEHClause[m_cClauses];
if (m_pClauses == NULL)
return E_OUTOFMEMORY;
for (ULONG32 i = 0; i < m_cClauses; i++)
{
BOOL isFilter = ((pClauses[i].Flags & COR_ILEXCEPTION_CLAUSE_FILTER) != 0);
m_pClauses[i].Flags = pClauses[i].Flags;
m_pClauses[i].TryOffset = pClauses[i].TryOffset;
m_pClauses[i].TryLength = pClauses[i].TryLength;
m_pClauses[i].HandlerOffset = pClauses[i].HandlerOffset;
m_pClauses[i].HandlerLength = pClauses[i].HandlerLength;
// these two fields are a union in the image, but are seperate in the struct ICorDebug returns
m_pClauses[i].ClassToken = isFilter ? 0 : pClauses[i].ClassToken;
m_pClauses[i].FilterOffset = isFilter ? pClauses[i].FilterOffset : 0;
}
}
return S_OK;
}
#ifndef MIN
#define MIN(a,b) ((a) < (b) ? (a) : (b))
#endif
//-----------------------------------------------------------------------------
// CordbReJitILCode::GetEHClauses
// Public method to get the EH clauses for IL code
//
// Parameters:
// cClauses - size of incoming clauses array (in elements).
// pcClauses - OUT param: cClauses>0 -> the number of elements written to in the clauses array.
// cClauses=0 -> the number of EH clauses this IL code has
// clauses - caller allocated storage to hold the EH clauses.
//
// Returns:
// S_OK if successfully copied elements to clauses array.
HRESULT CordbReJitILCode::GetEHClauses(ULONG32 cClauses, ULONG32 * pcClauses, CorDebugEHClause clauses[])
{
PUBLIC_API_ENTRY(this);
FAIL_IF_NEUTERED(this);
VALIDATE_POINTER_TO_OBJECT_OR_NULL(pcClauses, ULONG32 *);
VALIDATE_POINTER_TO_OBJECT_ARRAY_OR_NULL(clauses, CorDebugEHClause *, cClauses, true, true);
ATT_REQUIRE_STOPPED_MAY_FAIL(GetProcess());
if (cClauses != 0 && clauses == NULL)
{
return E_INVALIDARG;
}
if (pcClauses != NULL)
{
if (cClauses == 0)
{
*pcClauses = m_cClauses;
}
else
{
*pcClauses = MIN(cClauses, m_cClauses);
}
}
if (clauses != NULL)
{
memcpy_s(clauses, sizeof(CorDebugEHClause)*cClauses, m_pClauses, sizeof(CorDebugEHClause)*MIN(cClauses, m_cClauses));
}
return S_OK;
}
ULONG CordbReJitILCode::AddRef()
{
return CordbCode::AddRef();
}
ULONG CordbReJitILCode::Release()
{
return CordbCode::Release();
}
HRESULT CordbReJitILCode::QueryInterface(REFIID riid, void** ppInterface)
{
if (riid == IID_ICorDebugILCode)
{
*ppInterface = static_cast<ICorDebugILCode*>(this);
}
else if (riid == IID_ICorDebugILCode2)
{
*ppInterface = static_cast<ICorDebugILCode2*>(this);
}
else
{
return CordbILCode::QueryInterface(riid, ppInterface);
}
AddRef();
return S_OK;
}
HRESULT CordbReJitILCode::GetLocalVarSigToken(mdSignature *pmdSig)
{
PUBLIC_API_ENTRY(this);
FAIL_IF_NEUTERED(this);
VALIDATE_POINTER_TO_OBJECT(pmdSig, mdSignature *);
ATT_REQUIRE_STOPPED_MAY_FAIL(GetProcess());
*pmdSig = m_localVarSigToken;
return S_OK;
}
HRESULT CordbReJitILCode::GetInstrumentedILMap(ULONG32 cMap, ULONG32 *pcMap, COR_IL_MAP map[])
{
PUBLIC_API_ENTRY(this);
FAIL_IF_NEUTERED(this);
VALIDATE_POINTER_TO_OBJECT_OR_NULL(pcClauses, ULONG32 *);
VALIDATE_POINTER_TO_OBJECT_ARRAY_OR_NULL(map, COR_IL_MAP *, cMap, true, true);
ATT_REQUIRE_STOPPED_MAY_FAIL(GetProcess());
if (cMap != 0 && map == NULL)
{
return E_INVALIDARG;
}
if (pcMap != NULL)
{
if (cMap == 0)
{
*pcMap = m_cILMap;
}
else
{
*pcMap = MIN(cMap, m_cILMap);
}
}
if (map != NULL)
{
memcpy_s(map, sizeof(COR_IL_MAP)*cMap, m_pILMap, sizeof(COR_IL_MAP)*MIN(cMap, m_cILMap));
}
return S_OK;
}
// FindNativeInfoInILVariableArray
// Linear search through an array of NativeVarInfos, to find the variable of index dwIndex, valid
// at the given ip. Returns CORDBG_E_IL_VAR_NOT_AVAILABLE if the variable isn't valid at the given ip.
// Arguments:
// input: dwIndex - variable number
// ip - IP
// nativeInfoList - list of instances of NativeVarInfo
// output: ppNativeInfo - the element of nativeInfoList that corresponds to the IP and variable number
// if we find such an element or NULL otherwise
// Return value: HRESULT: returns S_OK or CORDBG_E_IL_VAR_NOT_AVAILABLE if the variable isn't found
//
HRESULT FindNativeInfoInILVariableArray(DWORD dwIndex,
SIZE_T ip,
const DacDbiArrayList<ICorDebugInfo::NativeVarInfo> * nativeInfoList,
const ICorDebugInfo::NativeVarInfo ** ppNativeInfo)
{
_ASSERTE(ppNativeInfo != NULL);
*ppNativeInfo = NULL;
// A few words about this search: it must be linear, and the
// comparison of startOffset and endOffset to ip must be
// <=/>. startOffset points to the first instruction that will
// make the variable's home valid. endOffset points to the first
// instruction at which the variable's home invalid.
int lastGoodOne = -1;
for (unsigned int i = 0; i < (unsigned)nativeInfoList->Count(); i++)
{
if ((*nativeInfoList)[i].varNumber == dwIndex)
{
if ( (lastGoodOne == -1) ||
((*nativeInfoList)[lastGoodOne].startOffset < (*nativeInfoList)[i].startOffset) )
{
lastGoodOne = i;
}
if (((*nativeInfoList)[i].startOffset <= ip) &&
((*nativeInfoList)[i].endOffset > ip))
{
*ppNativeInfo = &((*nativeInfoList)[i]);
return S_OK;
}
}
}
// workaround:
//
// We didn't find the variable. Was the endOffset of the last range for this variable
// equal to the current IP? If so, go ahead and "lie" and report that as the
// variable's home for now.
//
// Rationale:
//
// * See TODO comment in code:Compiler::siUpdate (jit\scopeinfo.cpp). In optimized
// code, the JIT can report var lifetimes as being one instruction too short.
// This workaround makes up for that. Example code:
//
// static void foo(int x)
// {
// int b = x; // Value of "x" would not be reported in optimized code without the workaround
// bar(ref b);
// }
//
// * Since this is the first instruction after the last range a variable was alive,
// we're essentially assuming that since that instruction hasn't been executed
// yet, and since there isn't a new home for the variable, that the last home is
// still good. This actually turns out to be true 99.9% of the time, so we'll go
// with it for now.
// * We've been lying like this since 1999, so surely it's safe.
if ((lastGoodOne > -1) && ((*nativeInfoList)[lastGoodOne].endOffset == ip))
{
*ppNativeInfo = &((*nativeInfoList)[lastGoodOne]);
return S_OK;
}
return CORDBG_E_IL_VAR_NOT_AVAILABLE;
} // FindNativeInfoInILVariableArray
// * ------------------------------------------------------------------------- *
// * Variable Enum class
// * ------------------------------------------------------------------------- *
//-----------------------------------------------------------------------------
// CordbVariableHome constructor
// Arguments:
// Input:
// pCode - CordbNativeCode instance containing this variable home
// pNativeVarInfo - native location, lifetime, and index information for
// this variable
// isLocal - indicates whether the instance is a local variable,
// as opposed to an argument
// index - the argument or slot index
// Output:
// fields of the CordbVariableHome instance have been initialized
//-----------------------------------------------------------------------------
CordbVariableHome::CordbVariableHome(CordbNativeCode *pCode,
const ICorDebugInfo::NativeVarInfo nativeVarInfo,
BOOL isLocal,
ULONG index) :
CordbBase(pCode->GetModule()->GetProcess(), 0)
{
_ASSERTE(pCode != NULL);
m_pCode.Assign(pCode);
m_nativeVarInfo = nativeVarInfo;
m_isLocal = isLocal;
m_index = index;
}
CordbVariableHome::~CordbVariableHome()
{
_ASSERTE(this->IsNeutered());
}
void CordbVariableHome::Neuter()
{
m_pCode.Clear();
CordbBase::Neuter();
}
//-----------------------------------------------------------------------------
// Public method for IUnknown::QueryInterface.
// Has standard QI semantics.
//-----------------------------------------------------------------------------
HRESULT CordbVariableHome::QueryInterface(REFIID id, void **pInterface)
{
if (id == IID_ICorDebugVariableHome)
{
*pInterface = static_cast<ICorDebugVariableHome *>(this);
}
else if (id == IID_IUnknown)
{
*pInterface = static_cast<IUnknown *>(static_cast<ICorDebugVariableHome *>(this));
}
else
{
*pInterface = NULL;
return E_NOINTERFACE;
}
ExternalAddRef();
return S_OK;
}
//-----------------------------------------------------------------------------
// CordbVariableHome::GetCode
// Public method to get the Code object containing this variable home.
//
// Parameters:
// ppCode - OUT: returns the Code object for this variable home.
//
// Returns:
// S_OK - on success.
//-----------------------------------------------------------------------------
HRESULT CordbVariableHome::GetCode(ICorDebugCode **ppCode)
{
PUBLIC_REENTRANT_API_ENTRY(this);
FAIL_IF_NEUTERED(this);
VALIDATE_POINTER_TO_OBJECT(ppCode, ICorDebugCode **);
ATT_REQUIRE_STOPPED_MAY_FAIL(m_pCode->GetProcess());
HRESULT hr = m_pCode->QueryInterface(IID_ICorDebugCode, (LPVOID*)ppCode);
return hr;
}
//-----------------------------------------------------------------------------
// CordbVariableHome::GetSlotIndex
// Public method to get the slot index for this variable home.
//
// Parameters:
// pSlotIndex - OUT: returns the managed slot-index of this variable home.
//
// Returns:
// S_OK - on success
// E_FAIL - if the variable is not a local variable, but an argument
//-----------------------------------------------------------------------------
HRESULT CordbVariableHome::GetSlotIndex(ULONG32 *pSlotIndex)
{
PUBLIC_REENTRANT_API_ENTRY(this);
FAIL_IF_NEUTERED(this);
VALIDATE_POINTER_TO_OBJECT(pSlotIndex, ULONG32 *);
ATT_REQUIRE_STOPPED_MAY_FAIL(m_pCode->GetProcess());
if (!m_isLocal)
{
return E_FAIL;
}
*pSlotIndex = m_index;
return S_OK;
}
//-----------------------------------------------------------------------------
// CordbVariableHome::GetArgumentIndex
// Public method to get the slot index for this variable home.
//
// Parameters:
// pSlotIndex - OUT: returns the managed argument-index of this variable home.
//
// Returns:
// S_OK - on success
// E_FAIL - if the variable is not an argument, but a local variable
//-----------------------------------------------------------------------------
HRESULT CordbVariableHome::GetArgumentIndex(ULONG32 *pArgumentIndex)
{
PUBLIC_REENTRANT_API_ENTRY(this);
FAIL_IF_NEUTERED(this);
VALIDATE_POINTER_TO_OBJECT(pArgumentIndex, ULONG32 *);
ATT_REQUIRE_STOPPED_MAY_FAIL(m_pCode->GetProcess());
if (m_isLocal)
{
return E_FAIL;
}
*pArgumentIndex = m_index;
return S_OK;
}
//-----------------------------------------------------------------------------
// CordbVariableHome::GetLiveRange
// Public method to get the native range over which this variable is live.
//
// Parameters:
// pStartOffset - OUT: returns the logical offset at which the variable is
// first live
// pEndOffset - OUT: returns the logical offset immediately after that at
// which the variable is last live
//
// Returns:
// S_OK - on success
//-----------------------------------------------------------------------------
HRESULT CordbVariableHome::GetLiveRange(ULONG32 *pStartOffset,
ULONG32 *pEndOffset)
{
PUBLIC_REENTRANT_API_ENTRY(this);
FAIL_IF_NEUTERED(this);
VALIDATE_POINTER_TO_OBJECT(pStartOffset, ULONG32 *);
VALIDATE_POINTER_TO_OBJECT(pEndOffset, ULONG32 *);
ATT_REQUIRE_STOPPED_MAY_FAIL(m_pCode->GetProcess());
*pStartOffset = m_nativeVarInfo.startOffset;
*pEndOffset = m_nativeVarInfo.endOffset;
return S_OK;
}
//-----------------------------------------------------------------------------
// CordbVariableHome::GetLocationType
// Public method to get the type of native location for this variable home.
//
// Parameters:
// pLocationType - OUT: the type of native location
//
// Returns:
// S_OK - on success
//-----------------------------------------------------------------------------
HRESULT CordbVariableHome::GetLocationType(VariableLocationType *pLocationType)
{
PUBLIC_REENTRANT_API_ENTRY(this);
FAIL_IF_NEUTERED(this);
VALIDATE_POINTER_TO_OBJECT(pLocationType, VariableLocationType *);
ATT_REQUIRE_STOPPED_MAY_FAIL(m_pCode->GetProcess());
switch (m_nativeVarInfo.loc.vlType)
{
case ICorDebugInfo::VLT_REG:
*pLocationType = VLT_REGISTER;
break;
case ICorDebugInfo::VLT_STK:
*pLocationType = VLT_REGISTER_RELATIVE;
break;
default:
*pLocationType = VLT_INVALID;
}
return S_OK;
}
//-----------------------------------------------------------------------------
// CordbVariableHome::GetRegister
// Public method to get the register or base register for this variable hom.
//
// Parameters:
// pRegister - OUT: for VLT_REGISTER location types, gives the register.
// for VLT_REGISTER_RELATIVE location types, gives the base
// register.
//
// Returns:
// S_OK - on success
// E_FAIL - for VLT_INVALID location types
//-----------------------------------------------------------------------------
HRESULT CordbVariableHome::GetRegister(CorDebugRegister *pRegister)
{
PUBLIC_REENTRANT_API_ENTRY(this);
FAIL_IF_NEUTERED(this);
VALIDATE_POINTER_TO_OBJECT(pRegister, CorDebugRegister *);
ATT_REQUIRE_STOPPED_MAY_FAIL(m_pCode->GetProcess());
switch (m_nativeVarInfo.loc.vlType)
{
case ICorDebugInfo::VLT_REG:
*pRegister = ConvertRegNumToCorDebugRegister(m_nativeVarInfo.loc.vlReg.vlrReg);
break;
case ICorDebugInfo::VLT_STK:
*pRegister = ConvertRegNumToCorDebugRegister(m_nativeVarInfo.loc.vlStk.vlsBaseReg);
break;
default:
return E_FAIL;
}
return S_OK;
}
//-----------------------------------------------------------------------------
// CordbVariableHome::GetOffset
// Public method to get the offset from the base register for this variable home.
//
// Parameters:
// pOffset - OUT: gives the offset from the base register
//
// Returns:
// S_OK - on success
// E_FAIL - for location types other than VLT_REGISTER_RELATIVE
//-----------------------------------------------------------------------------
HRESULT CordbVariableHome::GetOffset(LONG *pOffset)
{
PUBLIC_REENTRANT_API_ENTRY(this);
FAIL_IF_NEUTERED(this);
VALIDATE_POINTER_TO_OBJECT(pOffset, LONG *);
ATT_REQUIRE_STOPPED_MAY_FAIL(m_pCode->GetProcess());
switch (m_nativeVarInfo.loc.vlType)
{
case ICorDebugInfo::VLT_STK:
*pOffset = m_nativeVarInfo.loc.vlStk.vlsOffset;
break;
default:
return E_FAIL;
}
return S_OK;
}
// * ------------------------------------------------------------------------- *
// * Native Code class
// * ------------------------------------------------------------------------- */
//-----------------------------------------------------------------------------
// CordbNativeCode ctor to make Native code.
// Arguments:
// Input:
// pFunction - the function for which this is the native code object
// pJitData - the information about this code object retrieved from the DAC
// fIsInstantiatedGeneric - indicates whether this code object is an instantiated
// generic
// Output:
// fields of this instance of CordbNativeCode have been initialized
//-----------------------------------------------------------------------------
CordbNativeCode::CordbNativeCode(CordbFunction * pFunction,
const NativeCodeFunctionData * pJitData,
BOOL fIsInstantiatedGeneric)
: CordbCode(pFunction, (UINT_PTR)pJitData->m_rgCodeRegions[kHot].pAddress, pJitData->encVersion, FALSE),
m_vmNativeCodeMethodDescToken(pJitData->vmNativeCodeMethodDescToken),
m_fCodeAvailable(TRUE),
m_fIsInstantiatedGeneric(fIsInstantiatedGeneric != FALSE)
{
_ASSERTE(GetVersion() >= CorDB_DEFAULT_ENC_FUNCTION_VERSION);
for (CodeBlobRegion region = kHot; region < MAX_REGIONS; ++region)
{
m_rgCodeRegions[region] = pJitData->m_rgCodeRegions[region];
}
} //CordbNativeCode::CordbNativeCode
//-----------------------------------------------------------------------------
// Public method for IUnknown::QueryInterface.
// Has standard QI semantics.
//-----------------------------------------------------------------------------
HRESULT CordbNativeCode::QueryInterface(REFIID id, void ** pInterface)
{
if (id == IID_ICorDebugCode)
{
*pInterface = static_cast<ICorDebugCode *>(this);
}
else if (id == IID_ICorDebugCode2)
{
*pInterface = static_cast<ICorDebugCode2 *>(this);
}
else if (id == IID_ICorDebugCode3)
{
*pInterface = static_cast<ICorDebugCode3 *>(this);
}
else if (id == IID_ICorDebugCode4)
{
*pInterface = static_cast<ICorDebugCode4 *>(this);
}
else if (id == IID_IUnknown)
{
*pInterface = static_cast<IUnknown *>(static_cast<ICorDebugCode *>(this));
}
else
{
*pInterface = NULL;
return E_NOINTERFACE;
}
ExternalAddRef();
return S_OK;
}
//-----------------------------------------------------------------------------
// CordbNativeCode::GetAddress
// Public method to get the Entry address for the code. This is the address
// where the method first starts executing.
//
// Parameters:
// pStart - out-parameter to hold start address.
//
// Returns:
// S_OK if *pStart is properly updated.
//-----------------------------------------------------------------------------
HRESULT CordbNativeCode::GetAddress(CORDB_ADDRESS * pStart)
{
PUBLIC_REENTRANT_API_ENTRY(this);
FAIL_IF_NEUTERED(this);
VALIDATE_POINTER_TO_OBJECT(pStart, CORDB_ADDRESS *);
_ASSERTE(this != NULL);
_ASSERTE(this->GetFunction() != NULL);
_ASSERTE(this->GetFunction()->GetModule() != NULL);
_ASSERTE(this->GetFunction()->GetModule()->GetProcess() == GetProcess());
// Since we don't do code-pitching, the address points directly to the code.
*pStart = (m_rgCodeRegions[kHot].pAddress);
if (*pStart == NULL)
{
return CORDBG_E_CODE_NOT_AVAILABLE;
}
return S_OK;
} // CordbNativeCode::GetAddress
//-----------------------------------------------------------------------------
// CordbNativeCode::ReadCodeBytes
// Reads the actual bytes of native code from both the hot and cold regions
// into the data member m_rgbCode
// Arguments:
// none (uses data members)
// Return value:
// standard HRESULT values
// also allocates and initializes m_rgbCode
// Notes: assumes that the caller has checked to ensure that m_rgbCode doesn't
// hold valid data
//-----------------------------------------------------------------------------
HRESULT CordbNativeCode::ReadCodeBytes()
{
HRESULT hr = S_OK;
EX_TRY
{
// We have an address & size, so we'll just call ReadMemory.
// This will conveniently strip out any patches too.
CORDB_ADDRESS pHotStart = m_rgCodeRegions[kHot].pAddress;
CORDB_ADDRESS pColdStart = m_rgCodeRegions[kCold].pAddress;
ULONG32 cbHotSize = (ULONG32) m_rgCodeRegions[kHot].cbSize;
ULONG32 cbColdSize = GetColdSize();
delete [] m_rgbCode;
m_rgbCode = new BYTE[cbHotSize + cbColdSize];
SIZE_T cbRead;
hr = GetProcess()->ReadMemory(pHotStart, cbHotSize, m_rgbCode, &cbRead);
IfFailThrow(hr);
SIMPLIFYING_ASSUMPTION(cbRead == cbHotSize);
if (HasColdRegion())
{
hr = GetProcess()->ReadMemory(pColdStart, cbColdSize, (BYTE *) m_rgbCode + cbHotSize, &cbRead);
IfFailThrow(hr);
SIMPLIFYING_ASSUMPTION(cbRead == cbColdSize);
}
}
EX_CATCH_HRESULT(hr);
return hr;
} // CordbNativeCode::ReadCodeBytes
//-----------------------------------------------------------------------------
// CordbNativeCode::GetColdSize
// Get the size of the cold regions in bytes.
//
// Parameters:
// none--uses data member m_rgCodeRegions to compute total size.
//
// Returns:
// the size of the code in bytes.
//-----------------------------------------------------------------------------
ULONG32 CordbNativeCode::GetColdSize()
{
ULONG32 pcBytes = 0;
for (CodeBlobRegion index = kCold; index < MAX_REGIONS; ++index)
{
pcBytes += m_rgCodeRegions[index].cbSize;
}
return pcBytes;
} // CordbNativeCode::GetColdSize
//-----------------------------------------------------------------------------
// CordbNativeCode::GetSize
// Get the size of the code in bytes.
//
// Parameters:
// none--uses data member m_rgCodeRegions to compute total size.
//
// Returns:
// the size of the code in bytes.
//-----------------------------------------------------------------------------
ULONG32 CordbNativeCode::GetSize()
{
ULONG32 pcBytes = 0;
for (CodeBlobRegion index = kHot; index < MAX_REGIONS; ++index)
{
pcBytes += m_rgCodeRegions[index].cbSize;
}
return pcBytes;
} // CordbNativeCode::GetSize
//-----------------------------------------------------------------------------
// CordbNativeCode::GetILToNativeMapping
// Public method (implements ICorDebugCode) to get the IL-->{ Native Start, Native End} mapping.
// This can only be retrieved for native code.
// This will copy as much of the map as can fit in the incoming buffer.
//
// Parameters:
// cMap - size of incoming map[] array (in elements).
// pcMap - OUT: full size of IL-->Native map (in elements).
// map - caller allocated array to be filled in.
//
// Returns:
// S_OK on successful copying.
//-----------------------------------------------------------------------------
HRESULT CordbNativeCode::GetILToNativeMapping(ULONG32 cMap,
ULONG32 * pcMap,
COR_DEBUG_IL_TO_NATIVE_MAP map[])
{
PUBLIC_REENTRANT_API_ENTRY(this);
FAIL_IF_NEUTERED(this);
VALIDATE_POINTER_TO_OBJECT_OR_NULL(pcMap, ULONG32 *);
VALIDATE_POINTER_TO_OBJECT_ARRAY_OR_NULL(map, COR_DEBUG_IL_TO_NATIVE_MAP *,cMap,true,true);
ATT_REQUIRE_STOPPED_MAY_FAIL(GetProcess());
HRESULT hr = S_OK;
EX_TRY
{
LoadNativeInfo();
SequencePoints * pSeqPts = GetSequencePoints();
DebuggerILToNativeMap * rgMapInt = pSeqPts->GetMapAddr();
ULONG32 cMapIntCount = pSeqPts->GetEntryCount();
// If they gave us space to copy into...
if (map != NULL)
{
// Only copy as much as either they gave us or we have to copy.
ULONG32 cMapToCopy = min(cMap, cMapIntCount);
// Remember that we need to translate between our internal DebuggerILToNativeMap and the external
// COR_DEBUG_IL_TO_NATIVE_MAP!
ULONG32 size = GetSize();
ExportILToNativeMap(cMapToCopy, map, rgMapInt, size);
}
// return the full count of map entries
if (pcMap)
{
*pcMap = cMapIntCount;
}
}
EX_CATCH_HRESULT(hr);
return hr;
} // CordbNativeCode::GetILToNativeMapping
//-----------------------------------------------------------------------------
// CordbNativeCode::GetCodeChunks
// Public method to get the code regions of code. If the code
// is broken into discontinuous regions (hot + cold), this lets a debugger
// find the number of regions, and (start,size) of each.
//
// Parameters:
// cbufSize - size of incoming chunks array (in elements).
// pcnumChunks - OUT param: the number of elements written to in the chunk array.//
// chunks - caller allocated storage to hold the code chunks.
//
// Returns:
// S_OK if successfully copied elements to Chunk array.
//-----------------------------------------------------------------------------
HRESULT CordbNativeCode::GetCodeChunks(
ULONG32 cbufSize,
ULONG32 * pcnumChunks,
CodeChunkInfo chunks[]
)
{
PUBLIC_API_ENTRY(this);
if (pcnumChunks == NULL)
{
return E_INVALIDARG;
}
if ((chunks == NULL) != (cbufSize == 0))
{
return E_INVALIDARG;
}
// Current V2.0 implementation has at most 2 possible chunks right now (1 hot, and 1 cold).
ULONG32 cActualChunks = HasColdRegion() ? 2 : 1;
// If no buf size, then we're querying the total number of chunks.
if (cbufSize == 0)
{
*pcnumChunks = cActualChunks;
return S_OK;
}
// Else give them as many as they asked for.
for (CodeBlobRegion index = kHot; (index < MAX_REGIONS) && ((int)cbufSize > index); ++index)
{
// Fill in the region information
chunks[index].startAddr = m_rgCodeRegions[index].pAddress;
chunks[index].length = (ULONG32) (m_rgCodeRegions[index].cbSize);
*pcnumChunks = cbufSize;
}
return S_OK;
} // CordbNativeCode::GetCodeChunks
//-----------------------------------------------------------------------------
// CordbNativeCode::GetCompilerFlags
// Public entry point to get code flags for this Code object.
// Originally, ICDCode had this method implemented independently from the
// ICDModule method GetJitCompilerFlags. This was because it was considered that
// the flags would be per function, rather than per module.
// In addition, GetCompilerFlags did two different things depending on whether
// the code had a native image. It turned out that was the wrong thing to do
// .
//
// Parameters:
// pdwFlags - OUT: code gen flags (see CorDebugJITCompilerFlags)
//
// Return value:
// S_OK if pdwFlags is set properly.
//-----------------------------------------------------------------------------
HRESULT CordbNativeCode::GetCompilerFlags(DWORD * pdwFlags)
{
PUBLIC_API_ENTRY(this);
FAIL_IF_NEUTERED(this);
VALIDATE_POINTER_TO_OBJECT(pdwFlags, DWORD *);
*pdwFlags = 0;
ATT_REQUIRE_STOPPED_MAY_FAIL(GetProcess());
return GetFunction()->GetModule()->GetJITCompilerFlags(pdwFlags);
} // CordbNativeCode::GetCompilerFlags
//-----------------------------------------------------------------------------
// Given an IL local variable number and a native IP offset, return the
// location of the variable in jitted code.
//-----------------------------------------------------------------------------
HRESULT CordbNativeCode::ILVariableToNative(DWORD dwIndex,
SIZE_T ip,
const ICorDebugInfo::NativeVarInfo ** ppNativeInfo)
{
_ASSERTE(m_nativeVarData.IsInitialized());
return FindNativeInfoInILVariableArray(dwIndex,
ip,
m_nativeVarData.GetOffsetInfoList(),
ppNativeInfo);
} // CordbNativeCode::ILVariableToNative
HRESULT CordbNativeCode::GetReturnValueLiveOffset(ULONG32 ILoffset, ULONG32 bufferSize, ULONG32 *pFetched, ULONG32 *pOffsets)
{
HRESULT hr = S_OK;
PUBLIC_API_ENTRY(this);
FAIL_IF_NEUTERED(this);
VALIDATE_POINTER_TO_OBJECT(pFetched, ULONG32 *);
ATT_REQUIRE_STOPPED_MAY_FAIL(GetProcess());
EX_TRY
{
hr = GetReturnValueLiveOffsetImpl(NULL, ILoffset, bufferSize, pFetched, pOffsets);
}
EX_CATCH_HRESULT(hr);
return hr;
}
//-----------------------------------------------------------------------------
// CordbNativeCode::EnumerateVariableHomes
// Public method to get an enumeration of native variable homes. This may
// include multiple ICorDebugVariableHomes for the same slot or argument index
// if they have different homes at different points in the function.
//
// Parameters:
// ppEnum - OUT: returns the enum of variable homes.
//
// Returns:
// HRESULT for success or failure.
//-----------------------------------------------------------------------------
HRESULT CordbNativeCode::EnumerateVariableHomes(ICorDebugVariableHomeEnum **ppEnum)
{
PUBLIC_REENTRANT_API_ENTRY(this);
FAIL_IF_NEUTERED(this);
VALIDATE_POINTER_TO_OBJECT(ppEnum, ICorDebugVariableHomeEnum **);
ATT_REQUIRE_STOPPED_MAY_FAIL(GetProcess());
HRESULT hr = S_OK;
// Get the argument count
ULONG argCount = 0;
CordbFunction *func = GetFunction();
_ASSERTE(func != NULL);
IfFailRet(func->GetSig(NULL, &argCount, NULL));
#ifdef _DEBUG
// Get the number of locals
ULONG localCount = 0;
EX_TRY
{
GetFunction()->GetILCode()->GetLocalVarSig(NULL, &localCount);
}
EX_CATCH_HRESULT(hr);
IfFailRet(hr);
#endif
RSSmartPtr<CordbVariableHome> *rsHomes = NULL;
EX_TRY
{
CordbProcess *pProcess = GetProcess();
_ASSERTE(pProcess != NULL);
const DacDbiArrayList<ICorDebugInfo::NativeVarInfo> *pOffsetInfoList = m_nativeVarData.GetOffsetInfoList();
_ASSERTE(pOffsetInfoList != NULL);
DWORD countHomes = 0;
for (unsigned int i = 0; i < pOffsetInfoList->Count(); i++)
{
const ICorDebugInfo::NativeVarInfo *pNativeVarInfo = &((*pOffsetInfoList)[i]);
_ASSERTE(pNativeVarInfo != NULL);
// The variable information list can include variables
// with special varNumbers representing, for instance, the
// parameter types for generic methods. Here we are only
// interested in local variables and arguments.
if (pNativeVarInfo->varNumber < (DWORD)ICorDebugInfo::MAX_ILNUM)
{
countHomes++;
}
}
rsHomes = new RSSmartPtr<CordbVariableHome>[countHomes];
DWORD varHomeInd = 0;
for (unsigned int i = 0; i < pOffsetInfoList->Count(); i++)
{
const ICorDebugInfo::NativeVarInfo *pNativeVarInfo = &((*pOffsetInfoList)[i]);
// Again, only look for native var info representing local
// variables and arguments.
if (pNativeVarInfo->varNumber < (DWORD)ICorDebugInfo::MAX_ILNUM)
{
// determine whether this variable home represents and argument or local variable
BOOL isLocal = ((ULONG)pNativeVarInfo->varNumber >= argCount);
// determine the argument-index or slot-index of this variable home
ULONG argOrSlotIndex;
if (isLocal) {
argOrSlotIndex = pNativeVarInfo->varNumber - argCount;
_ASSERTE(argOrSlotIndex < localCount);
} else {
argOrSlotIndex = pNativeVarInfo->varNumber;
}
RSInitHolder<CordbVariableHome> pCVH(new CordbVariableHome(this,
(*pOffsetInfoList)[i],
isLocal,
argOrSlotIndex));
pProcess->GetContinueNeuterList()->Add(pProcess, pCVH);
_ASSERTE(varHomeInd < countHomes);
rsHomes[varHomeInd].Assign(pCVH);
pCVH.ClearAndMarkDontNeuter();
varHomeInd++;
}
}
RSInitHolder<CordbVariableHomeEnumerator> pCDVHE(
new CordbVariableHomeEnumerator(GetProcess(), &rsHomes, countHomes));
pProcess->GetContinueNeuterList()->Add(pProcess, pCDVHE);
pCDVHE.TransferOwnershipExternal(ppEnum);
}
EX_CATCH_HRESULT(hr);
return hr;
}
int CordbNativeCode::GetCallInstructionLength(BYTE *ip, ULONG32 count)
{
#if defined(TARGET_ARM)
if (Is32BitInstruction(*(WORD*)ip))
return 4;
else
return 2;
#elif defined(TARGET_ARM64)
return MAX_INSTRUCTION_LENGTH;
#elif defined(TARGET_X86)
if (count < 2)
return -1;
// Skip instruction prefixes
do
{
switch (*ip)
{
// Segment overrides
case 0x26: // ES
case 0x2E: // CS
case 0x36: // SS
case 0x3E: // DS
case 0x64: // FS
case 0x65: // GS
// Size overrides
case 0x66: // Operand-Size
case 0x67: // Address-Size
// Lock
case 0xf0:
// String REP prefixes
case 0xf1:
case 0xf2: // REPNE/REPNZ
case 0xf3:
ip++;
count--;
continue;
default:
break;
}
} while (0);
// Read the opcode
BYTE opcode = *ip++;
if (opcode == 0xcc)
{
// todo: Can we actually get this result? Doesn't ICorDebug hand out un-patched assembly?
_ASSERTE(!"Hit break opcode!");
return -1;
}
// Analyze what we can of the opcode
switch (opcode)
{
case 0xff:
{
// Count may have been decremented by prefixes.
if (count < 2)
return -1;
BYTE modrm = *ip++;
BYTE mod = (modrm & 0xC0) >> 6;
BYTE reg = (modrm & 0x38) >> 3;
BYTE rm = (modrm & 0x07);
int displace = -1;
if ((reg != 2) && (reg != 3) && (reg != 4) && (reg != 5))
{
//
// This is not a CALL or JMP instruction, return, unknown.
//
_ASSERTE(!"Unhandled opcode!");
return -1;
}
// Only try to decode registers if we actually have reg sets.
switch (mod)
{
case 0:
case 1:
case 2:
if (rm == 4)
{
if (count < 3)
return -1;
//
// Get values from the SIB byte
//
BYTE ss = (*ip & 0xC0) >> 6;
BYTE index = (*ip & 0x38) >> 3;
BYTE base = (*ip & 0x7);
//
// Finally add in the offset
//
if (mod == 0)
{
if (base == 5)
displace = 7;
else
displace = 3;
}
else if (mod == 1)
{
displace = 4;
}
else
{
displace = 7;
}
}
else
{
if (mod == 0)
{
if (rm == 5)
displace = 6;
else
displace = 2;
}
else if (mod == 1)
{
displace = 3;
}
else
{
displace = 6;
}
}
break;
case 3:
default:
displace = 2;
break;
}
return displace;
} // end of 0xFF case
case 0xe8:
return 5;
default:
break;
}
_ASSERTE(!"Unhandled opcode!");
return -1;
#elif defined(TARGET_AMD64)
BYTE rex = NULL;
BYTE prefix = *ip;
BOOL fContainsPrefix = FALSE;
// Should not happen.
if (prefix == 0xcc)
return -1;
// Skip instruction prefixes
//@TODO by euzem:
//This "loop" can't be really executed more than once so if CALL can really have more than one prefix we'll crash.
//Some of these prefixes are not allowed for CALL instruction and we should treat them as invalid code.
//It appears that this code was mostly copy/pasted from \NDP\clr\src\Debug\EE\amd64\amd64walker.cpp
//with very minimum fixes.
do
{
switch (prefix)
{
// Segment overrides
case 0x26: // ES
case 0x2E: // CS
case 0x36: // SS
case 0x3E: // DS
case 0x64: // FS
case 0x65: // GS
// Size overrides
case 0x66: // Operand-Size
case 0x67: // Address-Size
// Lock
case 0xf0:
// String REP prefixes
case 0xf2: // REPNE/REPNZ
case 0xf3:
ip++;
fContainsPrefix = TRUE;
continue;
// REX register extension prefixes
case 0x40:
case 0x41:
case 0x42:
case 0x43:
case 0x44:
case 0x45:
case 0x46:
case 0x47:
case 0x48:
case 0x49:
case 0x4a:
case 0x4b:
case 0x4c:
case 0x4d:
case 0x4e:
case 0x4f:
// make sure to set rex to prefix, not *ip because *ip still represents the
// codestream which has a 0xcc in it.
rex = prefix;
ip++;
fContainsPrefix = TRUE;
continue;
default:
break;
}
} while (0);
// Read the opcode
BYTE opcode = *ip++;
// Should not happen.
if (opcode == 0xcc)
return -1;
// Setup rex bits if needed
BYTE rex_b = 0;
BYTE rex_x = 0;
BYTE rex_r = 0;
if (rex != NULL)
{
rex_b = (rex & 0x1); // high bit to modrm r/m field or SIB base field or OPCODE reg field -- Hmm, when which?
rex_x = (rex & 0x2) >> 1; // high bit to sib index field
rex_r = (rex & 0x4) >> 2; // high bit to modrm reg field
}
// Analyze what we can of the opcode
switch (opcode)
{
case 0xff:
{
BYTE modrm = *ip++;
_ASSERT(modrm != NULL);
BYTE mod = (modrm & 0xC0) >> 6;
BYTE reg = (modrm & 0x38) >> 3;
BYTE rm = (modrm & 0x07);
reg |= (rex_r << 3);
rm |= (rex_b << 3);
if ((reg < 2) || (reg > 5 && reg < 8) || (reg > 15)) {
// not a valid register for a CALL or BRANCH
_ASSERTE(!"Invalid opcode!");
return -1;
}
SHORT displace = -1;
// See: Tables A-15,16,17 in AMD Dev Manual 3 for information
// about how the ModRM/SIB/REX bytes interact.
switch (mod)
{
case 0:
case 1:
case 2:
if ((rm & 0x07) == 4) // we have an SIB byte following
{
//
// Get values from the SIB byte
//
BYTE sib = *ip;
_ASSERT(sib != NULL);
BYTE base = (sib & 0x07);
base |= (rex_b << 3);
ip++;
//
// Finally add in the offset
//
if (mod == 0)
{
if ((base & 0x07) == 5)
displace = 7;
else
displace = 3;
}
else if (mod == 1)
{
displace = 4;
}
else // mod == 2
{
displace = 7;
}
}
else
{
//
// Get the value we need from the register.
//
// Check for RIP-relative addressing mode.
if ((mod == 0) && ((rm & 0x07) == 5))
{
displace = 6; // 1 byte opcode + 1 byte modrm + 4 byte displacement (signed)
}
else
{
if (mod == 0)
displace = 2;
else if (mod == 1)
displace = 3;
else // mod == 2
displace = 6;
}
}
break;
case 3:
default:
displace = 2;
}
// Displace should be set by one of the cases above
if (displace == -1)
{
_ASSERTE(!"GetCallInstructionLength() encountered unexpected call instruction");
return -1;
}
// Account for the 1 byte prefix (REX or otherwise)
if (fContainsPrefix)
displace++;
// reg == 4 or 5 means that it is not a CALL, but JMP instruction
// so we will fall back to ASSERT after break
if ((reg != 4) && (reg != 5))
return displace;
break;
}
case 0xe8:
{
//Near call with the target specified by a 32-bit relative displacement.
//[maybe 1 byte prefix] + [1 byte opcode E8h] + [4 bytes offset]
return 5 + (fContainsPrefix ? 1 : 0);
}
default:
break;
}
_ASSERTE(!"Invalid opcode!");
return -1;
#else
#error Platform not implemented
#endif
}
HRESULT CordbNativeCode::GetSigParserFromFunction(mdToken mdFunction, mdToken *pClass, SigParser &parser, SigParser &methodGenerics)
{
// mdFunction may be a MemberRef, a MethodDef, or a MethodSpec. We must handle all three cases.
HRESULT hr = S_OK;
IMetaDataImport* pImport = m_pFunction->GetModule()->GetMetaDataImporter();
RSExtSmartPtr<IMetaDataImport2> pImport2;
IfFailRet(pImport->QueryInterface(IID_IMetaDataImport2, (void**)&pImport2));
if (TypeFromToken(mdFunction) == mdtMemberRef)
{
PCCOR_SIGNATURE sig = 0;
ULONG sigSize = 0;
IfFailRet(pImport->GetMemberRefProps(mdFunction, pClass, NULL, 0, 0, &sig, &sigSize));
parser = SigParser(sig, sigSize);
}
else if (TypeFromToken(mdFunction) == mdtMethodDef)
{
PCCOR_SIGNATURE sig = 0;
ULONG sigSize = 0;
IfFailRet(pImport->GetMethodProps(mdFunction, pClass, NULL, 0, NULL, NULL, &sig, &sigSize, NULL, NULL));
parser = SigParser(sig, sigSize);
}
else if (TypeFromToken(mdFunction) == mdtMethodSpec)
{
// For a method spec, we use GetMethodSpecProps to get the generic singature and the parent token
// (which is a MethodDef token). We'll recurse to get the other properties from the parent token.
PCCOR_SIGNATURE sig = 0;
ULONG sigSize = 0;
mdToken parentToken = 0;
IfFailRet(pImport2->GetMethodSpecProps(mdFunction, &parentToken, &sig, &sigSize));
methodGenerics = SigParser(sig, sigSize);
if (pClass)
*pClass = parentToken;
return GetSigParserFromFunction(parentToken, pClass, parser, methodGenerics);
}
else
{
// According to ECMA III.3.19, this can never happen.
return E_UNEXPECTED;
}
return S_OK;
}
HRESULT CordbNativeCode::EnsureReturnValueAllowed(Instantiation *currentInstantiation, mdToken targetClass, SigParser &parser, SigParser &methodGenerics)
{
HRESULT hr = S_OK;
uint32_t genCount = 0;
IfFailRet(SkipToReturn(parser, &genCount));
return EnsureReturnValueAllowedWorker(currentInstantiation, targetClass, parser, methodGenerics, genCount);
}
HRESULT CordbNativeCode::EnsureReturnValueAllowedWorker(Instantiation *currentInstantiation, mdToken targetClass, SigParser &parser, SigParser &methodGenerics, ULONG genCount)
{
// There are a few considerations here:
// 1. Generic instantiations. This is a "Foo<T>", and we need to check if that "Foo"
// fits one of the categories we disallow (such as a struct).
// 2. Void return.
// 3. ValueType - Unsupported this release.
// 4. MVAR - Method generics. We need to get the actual generic type and recursively
// check if we allow that.
// 5. VAR - Class generics. We need to get the actual generic type and recurse.
SigParser original(parser);
HRESULT hr = S_OK;
CorElementType returnType;
IfFailRet(parser.GetElemType(&returnType));
if (returnType == ELEMENT_TYPE_GENERICINST)
{
IfFailRet(parser.GetElemType(&returnType));
if (returnType == ELEMENT_TYPE_CLASS)
return S_OK;
if (returnType != ELEMENT_TYPE_VALUETYPE)
return META_E_BAD_SIGNATURE;
if (currentInstantiation == NULL)
return S_OK; // We will check again when we have the instantiation.
NewArrayHolder<CordbType*> types;
Instantiation inst;
IfFailRet(CordbJITILFrame::BuildInstantiationForCallsite(GetModule(), types, inst, currentInstantiation, targetClass, SigParser(methodGenerics)));
CordbType *pType = 0;
IfFailRet(CordbType::SigToType(GetModule(), &original, &inst, &pType));
IfFailRet(pType->ReturnedByValue());
if (hr == S_OK) // not S_FALSE
return S_OK;
return CORDBG_E_UNSUPPORTED;
}
if (returnType == ELEMENT_TYPE_VALUETYPE)
{
Instantiation inst;
CordbType *pType = 0;
IfFailRet(CordbType::SigToType(GetModule(), &original, &inst, &pType));
IfFailRet(pType->ReturnedByValue());
if (hr == S_OK) // not S_FALSE
return S_OK;
return CORDBG_E_UNSUPPORTED;
}
if (returnType == ELEMENT_TYPE_TYPEDBYREF)
return CORDBG_E_UNSUPPORTED;
if (returnType == ELEMENT_TYPE_VOID)
return E_UNEXPECTED;
if (returnType == ELEMENT_TYPE_MVAR)
{
// Get which generic parameter is referenced.
uint32_t genParam = 0;
IfFailRet(parser.GetData(&genParam));
// Grab the calling convention of the method, ensure it's GENERICINST.
uint32_t callingConv = 0;
IfFailRet(methodGenerics.GetCallingConvInfo(&callingConv));
if (callingConv != IMAGE_CEE_CS_CALLCONV_GENERICINST)
return META_E_BAD_SIGNATURE;
// Ensure sensible bounds.
SigParser generics(methodGenerics); // Make a copy since operations are destructive.
uint32_t maxCount = 0;
IfFailRet(generics.GetData(&maxCount));
if (maxCount <= genParam || genParam > 1024)
return META_E_BAD_SIGNATURE;
// Walk to the parameter referenced.
while (genParam--)
IfFailRet(generics.SkipExactlyOne());
// Now recurse with "generics" at the location to continue parsing.
return EnsureReturnValueAllowedWorker(currentInstantiation, targetClass, generics, methodGenerics, genCount);
}
if (returnType == ELEMENT_TYPE_VAR)
{
// Get which type parameter is reference.
uint32_t typeParam = 0;
parser.GetData(&typeParam);
// Ensure something reasonable.
if (typeParam > 1024)
return META_E_BAD_SIGNATURE;
// Lookup the containing class's signature so we can get the referenced generic parameter.
IMetaDataImport *pImport = m_pFunction->GetModule()->GetMetaDataImporter();
PCCOR_SIGNATURE sig;
ULONG countSig;
IfFailRet(pImport->GetTypeSpecFromToken(targetClass, &sig, &countSig));
// Enusre the type's typespec is GENERICINST.
SigParser typeParser(sig, countSig);
CorElementType et;
IfFailRet(typeParser.GetElemType(&et));
if (et != ELEMENT_TYPE_GENERICINST)
return META_E_BAD_SIGNATURE;
// Move to the correct location.
IfFailRet(typeParser.GetElemType(&et));
if (et != ELEMENT_TYPE_VALUETYPE && et != ELEMENT_TYPE_CLASS)
return META_E_BAD_SIGNATURE;
IfFailRet(typeParser.GetToken(NULL));
uint32_t totalTypeCount = 0;
IfFailRet(typeParser.GetData(&totalTypeCount));
if (totalTypeCount < typeParam)
return META_E_BAD_SIGNATURE;
while (typeParam--)
IfFailRet(typeParser.SkipExactlyOne());
// This is a temporary workaround for an infinite recursion here. ALL of this code will
// go away when we allow struct return values, but in the mean time this avoids a corner
// case in the type system we haven't solved yet.
IfFailRet(typeParser.PeekElemType(&et));
if (et == ELEMENT_TYPE_VAR)
return E_FAIL;
// Now that typeParser is at the location of the correct generic parameter, recurse.
return EnsureReturnValueAllowedWorker(currentInstantiation, targetClass, typeParser, methodGenerics, genCount);
}
// Everything else supported
return S_OK;
}
HRESULT CordbNativeCode::SkipToReturn(SigParser &parser, uint32_t *genCount)
{
// Takes a method signature parser (at the beginning of a signature) and skips to the
// return value.
HRESULT hr = S_OK;
// Skip calling convention
uint32_t uCallConv;
IfFailRet(parser.GetCallingConvInfo(&uCallConv));
if ((uCallConv == IMAGE_CEE_CS_CALLCONV_FIELD) || (uCallConv == IMAGE_CEE_CS_CALLCONV_LOCAL_SIG))
return META_E_BAD_SIGNATURE;
// Skip type parameter count if function is generic
if (uCallConv & IMAGE_CEE_CS_CALLCONV_GENERIC)
IfFailRet(parser.GetData(genCount));
// Skip argument count
IfFailRet(parser.GetData(NULL));
return S_OK;
}
HRESULT CordbNativeCode::GetCallSignature(ULONG32 ILoffset, mdToken *pClass, mdToken *pFunction, SigParser &parser, SigParser &generics)
{
// check if specified IL offset is at a call instruction
CordbILCode *pCode = this->m_pFunction->GetILCode();
BYTE buffer[3];
ULONG32 fetched = 0;
HRESULT hr = pCode->GetCode(ILoffset, ILoffset+ARRAY_SIZE(buffer), ARRAY_SIZE(buffer), buffer, &fetched);
if (FAILED(hr))
return hr;
else if (fetched != ARRAY_SIZE(buffer))
return CORDBG_E_INVALID_OPCODE;
// tail. - fe 14 (ECMA III.2.4)
BYTE instruction = buffer[0];
if (buffer[0] == 0xfe && buffer[1] == 0x14)
{
// tail call case. We don't allow managed return values for tailcalls.
return CORDBG_E_INVALID_OPCODE;
}
// call - 28 (ECMA III.3.19)
// callvirt - 6f (ECMA III.4.2)
if (instruction != 0x28 && instruction != 0x6f)
return CORDBG_E_INVALID_OPCODE;
// Now grab the MD token of the call
mdToken mdFunction = 0;
const ULONG32 offset = ILoffset + 1;
hr = pCode->GetCode(offset, offset+sizeof(mdToken), sizeof(mdToken), (BYTE*)&mdFunction, &fetched);
if (FAILED(hr) || fetched != sizeof(mdToken))
return CORDBG_E_INVALID_OPCODE;
if (pFunction)
*pFunction = mdFunction;
// Convert to a signature parser
return GetSigParserFromFunction(mdFunction, pClass, parser, generics);
}
HRESULT CordbNativeCode::GetReturnValueLiveOffsetImpl(Instantiation *currentInstantiation, ULONG32 ILoffset, ULONG32 bufferSize, ULONG32 *pFetched, ULONG32 *pOffsets)
{
if (pFetched == NULL)
return E_INVALIDARG;
HRESULT hr = S_OK;
ULONG32 found = 0;
// verify that the call target actually returns something we allow
SigParser signature, generics;
mdToken mdClass = 0;
IfFailRet(GetCallSignature(ILoffset, &mdClass, NULL, signature, generics));
IfFailRet(EnsureReturnValueAllowed(currentInstantiation, mdClass, signature, generics));
// now find the native offset
SequencePoints *pSP = GetSequencePoints();
DebuggerILToNativeMap *pMap = pSP->GetCallsiteMapAddr();
for (ULONG32 i = 0; i < pSP->GetCallsiteEntryCount() && pMap; ++i, pMap++)
{
if (pMap->ilOffset == ILoffset && (pMap->source & ICorDebugInfo::CALL_INSTRUCTION) == ICorDebugInfo::CALL_INSTRUCTION)
{
// if we have a buffer, fill it in.
if (pOffsets && found < bufferSize)
{
// Fetch the actual assembly instructions
BYTE nativeBuffer[8];
ULONG32 fetched = 0;
IfFailRet(GetCode(pMap->nativeStartOffset, pMap->nativeStartOffset+ARRAY_SIZE(nativeBuffer), ARRAY_SIZE(nativeBuffer), nativeBuffer, &fetched));
int skipBytes = 0;
#if defined(PSEUDORANDOM_NOP_INSERTION)
// Skip nop sleds the JIT adds. These instructions as a security measure,
// and incorrectly reports to us the wrong offset of the call instruction.
const BYTE nop_opcode = 0x90;
while (fetched && nativeBuffer[0] == nop_opcode)
{
skipBytes++;
for (int j = 1; j < ARRAY_SIZE(nativeBuffer) && nativeBuffer[j] == nop_opcode; ++j)
skipBytes++;
// We must have at least one skip byte since the outer while ensures it. Thus we always need to reread
// the buffer at the end of this loop.
IfFailRet(GetCode(pMap->nativeStartOffset+skipBytes, pMap->nativeStartOffset+skipBytes+ARRAY_SIZE(nativeBuffer), ARRAY_SIZE(nativeBuffer), nativeBuffer, &fetched));
}
#endif
// Get the length of the call instruction.
int offset = GetCallInstructionLength(nativeBuffer, fetched);
if (offset == -1)
return E_UNEXPECTED; // Could not decode instruction, this should never happen.
pOffsets[found] = pMap->nativeStartOffset + offset + skipBytes;
}
found++;
}
}
if (pOffsets)
*pFetched = found < bufferSize ? found : bufferSize;
else
*pFetched = found;
if (found == 0)
return E_FAIL;
if (pOffsets && found > bufferSize)
return S_FALSE;
return S_OK;
}
//-----------------------------------------------------------------------------
// Creates a CordbNativeCode (if it's not already created) and adds it to the
// hash table of CordbNativeCode instances belonging to this module.
// Used by CordbFunction::InitNativeCodeInfo.
//
// Arguments:
// Input:
// methodToken - the methodDef token of the function this native code belongs to
// methodDesc - the methodDesc for the jitted method
// startAddress - the hot code startAddress for this method
// Return value:
// found or created CordbNativeCode pointer
// Assumptions: methodToken is in the metadata for this module
// methodDesc and startAddress should be consistent for
// a jitted instance of methodToken's method
//-----------------------------------------------------------------------------
CordbNativeCode * CordbModule::LookupOrCreateNativeCode(mdMethodDef methodToken,
VMPTR_MethodDesc methodDesc,
CORDB_ADDRESS startAddress)
{
INTERNAL_SYNC_API_ENTRY(GetProcess());
_ASSERTE(startAddress != NULL);
_ASSERTE(methodDesc != VMPTR_MethodDesc::NullPtr());
CordbNativeCode * pNativeCode = NULL;
NativeCodeFunctionData codeInfo;
RSLockHolder lockHolder(GetProcess()->GetProcessLock());
// see if we already have this--if not, we'll make an instance, otherwise we'll just return the one we have.
pNativeCode = m_nativeCodeTable.GetBase((UINT_PTR) startAddress);
if (pNativeCode == NULL)
{
GetProcess()->GetDAC()->GetNativeCodeInfoForAddr(methodDesc, startAddress, &codeInfo);
// We didn't have an instance, so we'll build one and add it to the hash table
LOG((LF_CORDB,
LL_INFO10000,
"R:CT::RSCreating code w/ ver:0x%x, md:0x%x, nativeStart=0x%08x, nativeSize=0x%08x\n",
codeInfo.encVersion,
VmPtrToCookie(codeInfo.vmNativeCodeMethodDescToken),
codeInfo.m_rgCodeRegions[kHot].pAddress,
codeInfo.m_rgCodeRegions[kHot].cbSize));
// Lookup the function object that this code should be bound to
CordbFunction* pFunction = CordbModule::LookupOrCreateFunction(methodToken, codeInfo.encVersion);
_ASSERTE(pFunction != NULL);
// There are bugs with the on-demand class load performed by CordbFunction in some cases. The old stack
// tracing code avoided them by eagerly loading the parent class so I am following suit
pFunction->InitParentClassOfFunction();
// First, create a new CordbNativeCode instance--we'll need this to make the CordbJITInfo instance
pNativeCode = new (nothrow)CordbNativeCode(pFunction, &codeInfo, codeInfo.isInstantiatedGeneric != 0);
_ASSERTE(pNativeCode != NULL);
m_nativeCodeTable.AddBaseOrThrow(pNativeCode);
}
return pNativeCode;
} // CordbNativeCode::LookupOrCreateFromJITData
// LoadNativeInfo loads from the left side any native variable info
// from the JIT.
//
void CordbNativeCode::LoadNativeInfo()
{
THROW_IF_NEUTERED(this);
INTERNAL_API_ENTRY(this->GetProcess());
// If we've either never done this before (no info), or we have, but the version number has increased, we
// should try and get a newer version of our JIT info.
if(m_nativeVarData.IsInitialized())
{
return;
}
// You can't do this if the function is implemented as part of the Runtime.
if (GetFunction()->IsNativeImpl() == CordbFunction::kNativeOnly)
{
ThrowHR(CORDBG_E_FUNCTION_NOT_IL);
}
CordbProcess *pProcess = GetProcess();
// Get everything via the DAC
if (m_fCodeAvailable)
{
RSLockHolder lockHolder(pProcess->GetProcessLock());
pProcess->GetDAC()->GetNativeCodeSequencePointsAndVarInfo(GetVMNativeCodeMethodDescToken(),
GetAddress(),
m_fCodeAvailable,
&m_nativeVarData,
&m_sequencePoints);
}
} // CordbNativeCode::LoadNativeInfo
| // Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
//*****************************************************************************
// File: module.cpp
//
//
//*****************************************************************************
#include "stdafx.h"
#include "winbase.h"
#include "metadataexports.h"
#include "winbase.h"
#include "corpriv.h"
#include "corsym.h"
#include "pedecoder.h"
#include "stgpool.h"
//---------------------------------------------------------------------------------------
// Update an existing metadata importer with a buffer
//
// Arguments:
// pUnk - IUnknoown of importer to update.
// pData - local buffer containing new metadata
// cbData - size of buffer in bytes.
// dwReOpenFlags - metadata flags to pass for reopening.
//
// Returns:
// S_OK on success. Else failure.
//
// Notes:
// This will call code:MDReOpenMetaDataWithMemoryEx from the metadata engine.
STDAPI ReOpenMetaDataWithMemoryEx(
void *pUnk,
LPCVOID pData,
ULONG cbData,
DWORD dwReOpenFlags)
{
HRESULT hr = MDReOpenMetaDataWithMemoryEx(pUnk,pData, cbData, dwReOpenFlags);
return hr;
}
//---------------------------------------------------------------------------------------
// Initialize a new CordbModule around a Module in the target.
//
// Arguments:
// pProcess - process that this module lives in
// vmDomainAssembly - CLR cookie for module.
CordbModule::CordbModule(
CordbProcess * pProcess,
VMPTR_Module vmModule,
VMPTR_DomainAssembly vmDomainAssembly)
: CordbBase(pProcess, vmDomainAssembly.IsNull() ? VmPtrToCookie(vmModule) : VmPtrToCookie(vmDomainAssembly), enumCordbModule),
m_pAssembly(0),
m_pAppDomain(0),
m_classes(11),
m_functions(101),
m_vmDomainAssembly(vmDomainAssembly),
m_vmModule(vmModule),
m_EnCCount(0),
m_fForceMetaDataSerialize(FALSE),
m_nativeCodeTable(101)
{
_ASSERTE(pProcess->GetProcessLock()->HasLock());
_ASSERTE(!vmModule.IsNull());
m_nLoadEventContinueCounter = 0;
#ifdef _DEBUG
m_classes.DebugSetRSLock(pProcess->GetProcessLock());
m_functions.DebugSetRSLock(pProcess->GetProcessLock());
#endif
// Fill out properties via DAC.
ModuleInfo modInfo;
pProcess->GetDAC()->GetModuleData(vmModule, &modInfo); // throws
m_PEBuffer.Init(modInfo.pPEBaseAddress, modInfo.nPESize);
m_fDynamic = modInfo.fIsDynamic;
m_fInMemory = modInfo.fInMemory;
m_vmPEFile = modInfo.vmPEAssembly;
if (!vmDomainAssembly.IsNull())
{
DomainAssemblyInfo dfInfo;
pProcess->GetDAC()->GetDomainAssemblyData(vmDomainAssembly, &dfInfo); // throws
m_pAppDomain = pProcess->LookupOrCreateAppDomain(dfInfo.vmAppDomain);
m_pAssembly = m_pAppDomain->LookupOrCreateAssembly(dfInfo.vmDomainAssembly);
}
else
{
// Not yet implemented
m_pAppDomain = pProcess->GetSharedAppDomain();
m_pAssembly = m_pAppDomain->LookupOrCreateAssembly(modInfo.vmAssembly);
}
#ifdef _DEBUG
m_nativeCodeTable.DebugSetRSLock(GetProcess()->GetProcessLock());
#endif
// MetaData is initialized lazily (via code:CordbModule::GetMetaDataImporter).
// Getting the metadata may be very expensive (especially if we go through the metadata locator, which
// invokes back to the data-target), so don't do it until asked.
// m_pIMImport, m_pInternalMetaDataImport are smart pointers that already initialize to NULL.
}
#ifdef _DEBUG
//---------------------------------------------------------------------------------------
// Callback helper for code:CordbModule::DbgAssertModuleDeleted
//
// Arguments
// vmDomainAssembly - domain file in the enumeration
// pUserData - pointer to the CordbModule that we just got an exit event for.
//
void DbgAssertModuleDeletedCallback(VMPTR_DomainAssembly vmDomainAssembly, void * pUserData)
{
CordbModule * pThis = reinterpret_cast<CordbModule *>(pUserData);
INTERNAL_DAC_CALLBACK(pThis->GetProcess());
if (!pThis->m_vmDomainAssembly.IsNull())
{
VMPTR_DomainAssembly vmDomainAssemblyDeleted = pThis->m_vmDomainAssembly;
CONSISTENCY_CHECK_MSGF((vmDomainAssemblyDeleted != vmDomainAssembly),
("A Module Unload event was sent for a module, but it still shows up in the enumeration.\n vmDomainAssemblyDeleted=%p\n",
VmPtrToCookie(vmDomainAssemblyDeleted)));
}
}
//---------------------------------------------------------------------------------------
// Assert that a module is no longer discoverable via enumeration.
//
// Notes:
// See code:IDacDbiInterface#Enumeration for rules that we're asserting.
// This is a debug only method. It's conceptually similar to
// code:CordbProcess::DbgAssertAppDomainDeleted.
//
void CordbModule::DbgAssertModuleDeleted()
{
GetProcess()->GetDAC()->EnumerateModulesInAssembly(
m_pAssembly->GetDomainAssemblyPtr(),
DbgAssertModuleDeletedCallback,
this);
}
#endif // _DEBUG
CordbModule::~CordbModule()
{
// We should have been explicitly neutered before our internal ref went to 0.
_ASSERTE(IsNeutered());
_ASSERTE(m_pIMImport == NULL);
}
// Neutered by CordbAppDomain
void CordbModule::Neuter()
{
// m_pAppDomain, m_pAssembly assigned w/o AddRef()
m_classes.NeuterAndClear(GetProcess()->GetProcessLock());
m_functions.NeuterAndClear(GetProcess()->GetProcessLock());
m_nativeCodeTable.NeuterAndClear(GetProcess()->GetProcessLock());
m_pClass.Clear();
// This is very important because it also releases the metadata's potential file locks.
m_pInternalMetaDataImport.Clear();
m_pIMImport.Clear();
CordbBase::Neuter();
}
//
// Creates an IStream based off the memory described by the TargetBuffer.
//
// Arguments:
// pProcess - process that buffer is valid in.
// buffer - memory range in target
// ppStream - out parameter to receive the new stream. *ppStream == NULL on input.
// caller owns the new object and must call Release.
//
// Returns:
// Throws on error.
// Common errors include if memory is missing in the target.
//
// Notes:
// This will copy the memory over from the TargetBuffer, and then create a new IStream
// object around it.
//
void GetStreamFromTargetBuffer(CordbProcess * pProcess, TargetBuffer buffer, IStream ** ppStream)
{
CONTRACTL
{
THROWS;
}
CONTRACTL_END;
_ASSERTE(ppStream != NULL);
_ASSERTE(*ppStream == NULL);
int cbSize = buffer.cbSize;
NewArrayHolder<BYTE> localBuffer(new BYTE[cbSize]);
pProcess->SafeReadBuffer(buffer, localBuffer);
HRESULT hr = E_FAIL;
hr = CInMemoryStream::CreateStreamOnMemoryCopy(localBuffer, cbSize, ppStream);
IfFailThrow(hr);
_ASSERTE(*ppStream != NULL);
}
//
// Helper API to get in-memory symbols from the target into a host stream object.
//
// Arguments:
// ppStream - out parameter to receive the new stream. *ppStream == NULL on input.
// caller owns the new object and must call Release.
//
// Returns:
// kSymbolFormatNone if no PDB stream is present. This is a common case for
// file-based modules, and also for dynamic modules that just aren't tracking
// debug information.
// The format of the symbols stored into ppStream. This is common:
// - Ref.Emit modules if the debuggee generated debug symbols,
// - in-memory modules (such as Load(Byte[], Byte[])
// - hosted modules.
// Throws on error
//
IDacDbiInterface::SymbolFormat CordbModule::GetInMemorySymbolStream(IStream ** ppStream)
{
// @dbgtodo : add a PUBLIC_REENTRANT_API_ENTRY_FOR_SHIM contract
// This function is mainly called internally in dbi, and also by the shim to emulate the
// UpdateModuleSymbols callback on attach.
CONTRACTL
{
THROWS;
}
CONTRACTL_END;
_ASSERTE(ppStream != NULL);
_ASSERTE(*ppStream == NULL);
*ppStream = NULL;
TargetBuffer bufferPdb;
IDacDbiInterface::SymbolFormat symFormat;
GetProcess()->GetDAC()->GetSymbolsBuffer(m_vmModule, &bufferPdb, &symFormat);
if (bufferPdb.IsEmpty())
{
// No in-memory PDB. Common case.
_ASSERTE(symFormat == IDacDbiInterface::kSymbolFormatNone);
return IDacDbiInterface::kSymbolFormatNone;
}
else
{
_ASSERTE(symFormat != IDacDbiInterface::kSymbolFormatNone);
GetStreamFromTargetBuffer(GetProcess(), bufferPdb, ppStream);
return symFormat;
}
}
//---------------------------------------------------------------------------------------
// Accessor for PE file.
//
// Returns:
// VMPTR_PEAssembly for this module. Should always be non-null
//
// Notes:
// A main usage of this is to find the proper internal MetaData importer.
// DACized code needs to map from PEAssembly --> IMDInternalImport.
//
VMPTR_PEAssembly CordbModule::GetPEFile()
{
return m_vmPEFile;
}
//---------------------------------------------------------------------------------------
//
// Top-level getter for the public metadata importer for this module
//
// Returns:
// metadata importer.
// Never returns NULL. Will throw some hr (likely CORDBG_E_MISSING_METADATA) instead.
//
// Notes:
// This will lazily create the metadata, possibly invoking back into the data-target.
IMetaDataImport * CordbModule::GetMetaDataImporter()
{
CONTRACTL
{
THROWS;
}
CONTRACTL_END;
// If we already have it, then we're done.
// This is critical to do at the top of this function to avoid potential recursion.
if (m_pIMImport != NULL)
{
return m_pIMImport;
}
// Lazily initialize
// Fetch metadata from target
LOG((LF_CORDB,LL_INFO1000, "CM::GMI Lazy init refreshing metadata\n"));
ALLOW_DATATARGET_MISSING_MEMORY(
RefreshMetaData();
);
// If lookup failed from the Module & target memory, try the metadata locator interface
// from debugger, if we have one.
if (m_pIMImport == NULL)
{
bool isILMetaDataForNGENImage; // Not currently used for anything.
// The process's LookupMetaData will ping the debugger's ICorDebugMetaDataLocator iface.
CordbProcess * pProcess = GetProcess();
RSLockHolder processLockHolder(pProcess->GetProcessLock());
m_pInternalMetaDataImport.Clear();
// Do not call code:CordbProcess::LookupMetaData from this function. It will try to load
// through the CordbModule again which will end up back here, and on failure you'll fill the stack.
// Since we've already done everything possible from the Module anyhow, just call the
// stuff that talks to the debugger.
// Don't do anything with the ptr returned here, since it's really m_pInternalMetaDataImport.
pProcess->LookupMetaDataFromDebugger(m_vmPEFile, isILMetaDataForNGENImage, this);
}
// If we still can't get it, throw.
if (m_pIMImport == NULL)
{
ThrowHR(CORDBG_E_MISSING_METADATA);
}
return m_pIMImport;
}
// Refresh the metadata cache if a profiler added new rows.
//
// Arguments:
// token - token that we want to ensure is in the metadata cache.
//
// Notes:
// In profiler case, this may be referred to new rows and we may need to update the metadata
// This only supports StandAloneSigs.
//
void CordbModule::UpdateMetaDataCacheIfNeeded(mdToken token)
{
CONTRACTL
{
THROWS;
}
CONTRACTL_END;
LOG((LF_CORDB,LL_INFO10000, "CM::UMCIN token=0x%x\n", token));
// If we aren't trying to keep parity with our legacy profiler metadata update behavior
// then we should avoid this temporary update mechanism entirely
if(GetProcess()->GetWriteableMetadataUpdateMode() != LegacyCompatPolicy)
{
return;
}
//
// 1) Check if in-range? Compare against tables, etc.
//
if(CheckIfTokenInMetaData(token))
{
LOG((LF_CORDB,LL_INFO10000, "CM::UMCIN token was present\n"));
return;
}
//
// 2) Copy over new MetaData. From now on we assume that the profiler is
// modifying module metadata and that we need to serialize in process
// at each refresh
//
LOG((LF_CORDB,LL_INFO10000, "CM::UMCIN token was not present, refreshing\n"));
m_fForceMetaDataSerialize = TRUE;
RefreshMetaData();
// If we are dump debugging, we may still not have it. Nothing to be done.
}
// Returns TRUE if the token is present, FALSE if not.
BOOL CordbModule::CheckIfTokenInMetaData(mdToken token)
{
CONTRACTL
{
THROWS;
}
CONTRACTL_END;
LOG((LF_CORDB,LL_INFO10000, "CM::CITIM token=0x%x\n", token));
_ASSERTE(TypeFromToken(token) == mdtSignature);
RSExtSmartPtr<IMetaDataTables> pTable;
HRESULT hr = GetMetaDataImporter()->QueryInterface(IID_IMetaDataTables, (void**) &pTable);
_ASSERTE(SUCCEEDED(hr));
if (FAILED(hr))
{
ThrowHR(hr);
}
ULONG cbRowsAvailable; // number of rows in the table
hr = pTable->GetTableInfo(
mdtSignature >> 24, // [IN] Which table.
NULL, // [OUT] Size of a row, bytes.
&cbRowsAvailable, // [OUT] Number of rows.
NULL, // [OUT] Number of columns in each row.
NULL, // [OUT] Key column, or -1 if none.
NULL); // [OUT] Name of the table.
_ASSERTE(SUCCEEDED(hr));
if (FAILED(hr))
{
ThrowHR(hr);
}
// Rows start counting with number 1.
ULONG rowRequested = RidFromToken(token);
LOG((LF_CORDB,LL_INFO10000, "CM::UMCIN requested=0x%x available=0x%x\n", rowRequested, cbRowsAvailable));
return (rowRequested <= cbRowsAvailable);
}
// This helper class ensures the remote serailzied buffer gets deleted in the RefreshMetaData
// function below
class CleanupRemoteBuffer
{
public:
CordbProcess* pProcess;
CordbModule* pModule;
TargetBuffer bufferMetaData;
BOOL fDoCleanup;
CleanupRemoteBuffer() :
fDoCleanup(FALSE) { }
~CleanupRemoteBuffer()
{
if(fDoCleanup)
{
//
// Send 2nd event to free buffer.
//
DebuggerIPCEvent event;
pProcess->InitIPCEvent(&event,
DB_IPCE_RESOLVE_UPDATE_METADATA_2,
true,
pModule->GetAppDomain()->GetADToken());
event.MetadataUpdateRequest.pMetadataStart = CORDB_ADDRESS_TO_PTR(bufferMetaData.pAddress);
// Note: two-way event here...
IfFailThrow(pProcess->SendIPCEvent(&event, sizeof(DebuggerIPCEvent)));
_ASSERTE(event.type == DB_IPCE_RESOLVE_UPDATE_METADATA_2_RESULT);
}
}
};
// Called to refetch metadata. This occurs when a dynamic module grows or the profiler
// has edited the metadata
void CordbModule::RefreshMetaData()
{
CONTRACTL
{
THROWS;
}
CONTRACTL_END;
LOG((LF_CORDB,LL_INFO1000, "CM::RM\n"));
// There are several different ways we can get the metadata
// 1) [Most common] Module is loaded into VM and never changed. The importer
// will be constructed refering to the file on disk. This is a significant
// working set win because the VM and debugger share the image. If there is
// an error reading by file we can fall back to case #2 for these modules
// 2) Most modules have a buffer in target memory that represents their
// metadata. We copy that data over the RS and construct an in-memory
// importer on top of it.
// 3) The only modules that don't have a suitable buffer (case #2) are those
// modified in memory via the profiling API (or ENC). A message can be sent from
// the debugger to the debuggee instructing it to allocate a buffer and
// serialize the metadata into it. Then we copy that data to the RS and
// construct an in-memory importer on top of it.
// We don't need to send this message in the ENC case because the debugger
// has the same changes applied as the debuggee.
// 4) Case #3 won't work when dump debugging because we can't send IPC events.
// Instead we can locate chunks of the metadata pointed to in the implementation
// details of a remote MDInternalRW object, marshal that memory over to the
// debugger process, and then put a metadata reader on top of it.
// In time this DAC'ized metadata could be used in almost any scenario,
// although its probably worth keeping the file mapping technique in case
// #1 around for its performance wins.
CordbProcess * pProcess = GetProcess();
TargetBuffer bufferMetaData;
CleanupRemoteBuffer cleanup; // this local has a destructor to do some finally work
// check for scenarios we might want to handle with case #4
if (GetProcess()->GetShim() == NULL &&
GetProcess()->GetWriteableMetadataUpdateMode() == AlwaysShowUpdates &&
!m_fDynamic)
{
//None of the above requirements are particularly hard to change in the future as needed...
// a) dump-debugging mode - If we do this on a process that can move forward we need a mechanism to determine
// when to refetch the metadata.
// b) AlwaysShowUpdates - this is purely a risk mitigation choice, there aren't any known back-compat issues
// using DAC'ized metadata. If you want back-compat with the in-proc debugging behavior
// you need to figure out how to ReOpen the same public MD interface with new data.
// c) !m_fDynamic - A risk mitigation choice. Initial testing suggests it would work fine.
// So far we've only got a reader for in-memory-writable metadata (MDInternalRW implementation)
// We could make a reader for MDInternalRO, but no need yet. This also ensures we don't encroach into common
// scenario where we can map a file on disk.
TADDR remoteMDInternalRWAddr = NULL;
GetProcess()->GetDAC()->GetPEFileMDInternalRW(m_vmPEFile, &remoteMDInternalRWAddr);
if (remoteMDInternalRWAddr != NULL)
{
// we should only be doing this once to initialize, we don't support reopen with this technique
_ASSERTE(m_pIMImport == NULL);
ULONG32 mdStructuresVersion;
HRESULT hr = GetProcess()->GetDAC()->GetMDStructuresVersion(&mdStructuresVersion);
IfFailThrow(hr);
ULONG32 mdStructuresDefines;
hr = GetProcess()->GetDAC()->GetDefinesBitField(&mdStructuresDefines);
IfFailThrow(hr);
IMetaDataDispenserCustom* pDispCustom = NULL;
hr = GetProcess()->GetDispenser()->QueryInterface(IID_IMetaDataDispenserCustom, (void**)&pDispCustom);
IfFailThrow(hr);
IMDCustomDataSource* pDataSource = NULL;
hr = CreateRemoteMDInternalRWSource(remoteMDInternalRWAddr, GetProcess()->GetDataTarget(), mdStructuresDefines, mdStructuresVersion, &pDataSource);
IfFailThrow(hr);
IMetaDataImport* pImport = NULL;
hr = pDispCustom->OpenScopeOnCustomDataSource(pDataSource, 0, IID_IMetaDataImport, (IUnknown**)&m_pIMImport);
IfFailThrow(hr);
UpdateInternalMetaData();
return;
}
}
if(!m_fForceMetaDataSerialize) // case 1 and 2
{
LOG((LF_CORDB,LL_INFO10000, "CM::RM !m_fForceMetaDataSerialize case\n"));
GetProcess()->GetDAC()->GetMetadata(m_vmModule, &bufferMetaData); // throws
}
else if (GetProcess()->GetShim() == NULL) // case 3 won't work on a dump so don't try
{
return;
}
else // case 3 on a live process
{
LOG((LF_CORDB,LL_INFO10000, "CM::RM m_fForceMetaDataSerialize case\n"));
//
// Send 1 event to get metadata. This allocates a buffer
//
DebuggerIPCEvent event;
pProcess->InitIPCEvent(&event,
DB_IPCE_RESOLVE_UPDATE_METADATA_1,
true,
GetAppDomain()->GetADToken());
event.MetadataUpdateRequest.vmModule = m_vmModule;
// Note: two-way event here...
IfFailThrow(pProcess->SendIPCEvent(&event, sizeof(DebuggerIPCEvent)));
_ASSERTE(event.type == DB_IPCE_RESOLVE_UPDATE_METADATA_1_RESULT);
//
// Update it on the RS
//
bufferMetaData.Init(PTR_TO_CORDB_ADDRESS(event.MetadataUpdateRequest.pMetadataStart), (ULONG) event.MetadataUpdateRequest.nMetadataSize);
// init the cleanup object to ensure the buffer gets destroyed later
cleanup.bufferMetaData = bufferMetaData;
cleanup.pProcess = pProcess;
cleanup.pModule = this;
cleanup.fDoCleanup = TRUE;
}
InitMetaData(bufferMetaData, IsFileMetaDataValid()); // throws
}
// Determines whether the on-disk metadata for this module is usable as the
// current metadata
BOOL CordbModule::IsFileMetaDataValid()
{
bool fOpenFromFile = true;
// Dynamic, In-memory, modules must be OpenScopeOnMemory.
// For modules that require the metadata to be serialized in memory, we must also OpenScopeOnMemory
// For Enc, we'll can use OpenScope(onFile) and it will get converted to Memory when we get an emitter.
// We're called from before the ModuleLoad callback, so EnC status hasn't been set yet, so
// EnC will be false.
if (m_fDynamic || m_fInMemory || m_fForceMetaDataSerialize)
{
LOG((LF_CORDB,LL_INFO10000, "CM::IFMV: m_fDynamic=0x%x m_fInMemory=0x%x m_fForceMetaDataSerialize=0x%x\n",
m_fDynamic, m_fInMemory, m_fForceMetaDataSerialize));
fOpenFromFile = false;
}
#ifdef _DEBUG
// Reg key override to force us to use Open-by-memory. This can let us run perf tests to
// compare the Open-by-mem vs. Open-by-file.
static DWORD openFromFile = 99;
if (openFromFile == 99)
openFromFile = CLRConfig::GetConfigValue(CLRConfig::INTERNAL_DbgNoOpenMDByFile);
if (openFromFile)
{
LOG((LF_CORDB,LL_INFO10000, "CM::IFMV: INTERNAL_DbgNoOpenMDByFile is set\n"));
fOpenFromFile = false;
}
#endif
LOG((LF_CORDB,LL_INFO10000, "CM::IFMV: returns 0x%x\n", fOpenFromFile));
return fOpenFromFile;
}
//---------------------------------------------------------------------------------------
// Accessor for Internal MetaData importer. This is lazily initialized.
//
// Returns:
// Internal MetaDataImporter, which can be handed off to DAC. Not AddRef().
// Should be non-null. Throws on error.
//
// Notes:
// An internal metadata importer is used extensively by DAC-ized code (And Edit-and-continue).
// This should not be handed out through ICorDebug.
IMDInternalImport * CordbModule::GetInternalMD()
{
if (m_pInternalMetaDataImport == NULL)
{
UpdateInternalMetaData(); // throws
}
return m_pInternalMetaDataImport;
}
//---------------------------------------------------------------------------------------
// The one-stop top-level initialization function the metadata (both public and private) for this module.
//
// Arguments:
// buffer - valid buffer into target containing the metadata.
// useFileMappingOptimization - if true this allows us to attempt just opening the importer
// by using the metadata in the module on disk. if false or
// if the attempt fails we open the metadata import on memory in
// target buffer
//
// Notes:
// This will initialize both the internal and public metadata from the buffer in the target.
// Only called as a helper from RefreshMetaData()
//
// This may throw (eg, target buffer is missing).
//
void CordbModule::InitMetaData(TargetBuffer buffer, BOOL allowFileMappingOptimization)
{
CONTRACTL
{
THROWS;
}
CONTRACTL_END;
LOG((LF_CORDB,LL_INFO100000, "CM::IM: initing with remote buffer 0x%p length 0x%x\n",
CORDB_ADDRESS_TO_PTR(buffer.pAddress), buffer.cbSize));
// clear all the metadata
m_pInternalMetaDataImport.Clear();
if (m_pIMImport == NULL)
{
// The optimization we're going for here is that the OS will use the same physical memory to
// back multiple ReadOnly opens of the same file. Thus since we expect the target process in
// live debugging, or the debugger in dump debugging, has already opened the file we would
// like to not create a local buffer and spend time copying in metadata from the target when
// the OS will happily do address lookup magic against the same physical memory for everyone.
// Try getting the data from the file if allowed, and fall back to using the buffer
// if required
HRESULT hr = S_OK;
if (allowFileMappingOptimization)
{
hr = InitPublicMetaDataFromFile();
if(FAILED(hr))
{
LOG((LF_CORDB,LL_INFO1000000, "CM::IPM: File mapping failed with hr=0x%x\n", hr));
}
}
if(!allowFileMappingOptimization || FAILED(hr))
{
// This is where the expensive copy of all metadata content from target memory
// that we would like to try and avoid happens.
InitPublicMetaData(buffer);
}
}
else
{
// We've already handed out an Import object, and so we can't create a new pointer instance.
// Instead, we update the existing instance with new data.
UpdatePublicMetaDataFromRemote(buffer);
}
// if we haven't set it by this point UpdateInternalMetaData below is going to get us
// in an infinite loop of refreshing public metadata
_ASSERTE(m_pIMImport != NULL);
// Now that public metadata has changed, force internal metadata to update too.
// Public and internal metadata expose different access interfaces to the same underlying storage.
UpdateInternalMetaData();
}
//---------------------------------------------------------------------------------------
// Updates the Internal MetaData object from the public importer. Lazily fetch public importer if needed.
//
// Assumptions:
// Caller has cleared Internal metadata before even updating public metadata.
// This way, if the caller fails halfway through updating the public metadata, we don't have
// stale internal MetaData.
void CordbModule::UpdateInternalMetaData()
{
CONTRACTL
{
THROWS;
}
CONTRACTL_END;
// Caller should have already cleared it.
_ASSERTE(m_pInternalMetaDataImport == NULL);
// Get the importer. If it's currently null, this will go fetch it.
IMetaDataImport * pImport = GetMetaDataImporter(); // throws
// If both the public and the private interfaces are NULL on entry to this function, the call above will
// recursively call this function. This can happen if the caller calls GetInternalMD() directly
// instead of InitMetaData(). In this case, the above function call will have initialized the internal
// interface as well, so we need to check for it here.
if (m_pInternalMetaDataImport == NULL)
{
HRESULT hr = GetMDInternalInterfaceFromPublic(
pImport,
IID_IMDInternalImport,
reinterpret_cast<void**> (&m_pInternalMetaDataImport));
if (m_pInternalMetaDataImport == NULL)
{
ThrowHR(hr);
}
}
_ASSERTE(m_pInternalMetaDataImport != NULL);
}
// Initialize the public metadata.
//
// The debuggee already has a copy of the metadata in its process.
// If we OpenScope on file as read-only, the OS file-system will share our metadata with the
// copy in the debuggee. This can be a major perf win. FX metadata can be over 8 MB+.
// OpenScopeOnMemory can't be shared b/c we allocate a buffer.
HRESULT CordbModule::InitPublicMetaDataFromFile()
{
INTERNAL_API_ENTRY(this->GetProcess());
// @dbgtodo metadata - In v3, we can't assume we have the same path namespace as the target (i.e. it could be
// a dump or remote), so we can't just try and open the file. Instead we have to rely on interfaces
// on the datatarget to map the metadata here. Note that this must also work for minidumps where the
// metadata isn't necessarily in the dump image.
// Get filename. There are 2 filenames to choose from:
// - ngen (if applicable).
// - non-ngen (aka "normal").
// By loading metadata out of the same OS file as loaded into the debuggee space, the OS can share those pages.
const WCHAR * szFullPathName = NULL;
bool fDebuggerLoadingNgen = false;
bool fDebuggeeLoadedNgen = false;
szFullPathName = GetNGenImagePath();
if(szFullPathName != NULL)
{
fDebuggeeLoadedNgen = true;
fDebuggerLoadingNgen = true;
#ifndef TARGET_UNIX
// NGEN images are large and we shouldn't load them if they won't be shared, therefore fail the NGEN mapping and
// fallback to IL image if the debugger doesn't have the image loaded already.
// Its possible that the debugger would still load the NGEN image sometime in the future and we will miss a sharing
// opportunity. Its an acceptable loss from an imperfect heuristic.
if (NULL == WszGetModuleHandle(szFullPathName))
#endif
{
szFullPathName = NULL;
fDebuggerLoadingNgen = false;
}
}
// If we don't have or decided not to load the NGEN image, check to see if IL image is available
if (!fDebuggerLoadingNgen)
{
szFullPathName = GetModulePath();
}
// If we are doing live debugging we shouldn't use metadata from an IL image because it doesn't match closely enough.
// In particular the RVAs for IL code headers are different between the two images which will cause all IL code and
// local var signature lookups to fail. With further work we could compensate for the RVAs by computing
// the image layout differences and adjusting the returned RVAs, but there may be other differences that need to be accounted
// for as well. If we did go that route we should do a binary diff across a variety of NGEN/IL image metadata blobs to
// get a concrete understanding of the format differences.
//
// This check should really be 'Are we OK with only getting the functionality level of mini-dump debugging?' but since we
// don't know the debugger's intent we guess whether or not we are doing dump debugging by checking if we are shimmed. Once
// the shim supports live debugging we should probably just stop automatically falling back to IL image and let the debugger
// decide via the ICorDebugMetadataLocator interface.
if(fDebuggeeLoadedNgen && !fDebuggerLoadingNgen && GetProcess()->GetShim()!=NULL)
{
// The IL image might be there, but we shouldn't use it for live debugging
return CORDBG_E_MISSING_METADATA;
}
// @dbgtodo metadata - This is really a CreateFile() call which we can't do. We must offload this to
// the data target for the dump-debugging scenarios.
//
// We're opening it as "read". If we QI for an IEmit interface (which we need for EnC),
// then the metadata engine will convert it to a "write" underneath us.
// We want "read" so that we can let the OS share the pages.
DWORD dwOpenFlags = 0;
// If we know we're never going to need to write (i.e. never do EnC), then we should indicate
// that to metadata by telling it this interface will always be read-only. By passing read-only,
// the metadata library will then also share the VM space for the image when the same image is
// opened multiple times for multiple AppDomains.
// We don't currently have a way to tell absolutely whether this module will support EnC, but we
// know that NGen modules NEVER support EnC, and NGen is the common case that eats up a lot of VM.
// So we'll use the heuristic of opening the metadata for all ngen images as read-only. Ideally
// we'd go even further here (perhaps even changing metadata to map only the region of the file it
// needs).
if (fDebuggerLoadingNgen)
{
dwOpenFlags = ofReadOnly | ofTrustedImage;
}
// This is the only place we ever validate that the file matches, because we're potentially
// loading the file from disk ourselves. We're doing this without giving the debugger a chance
// to do anything. We should never load a file that isn't an exact match.
return InitPublicMetaDataFromFile(szFullPathName, dwOpenFlags, true);
}
// We should only ever validate we have the correct file if it's a file we found ourselves.
// We allow the debugger to choose their own policy with regard to using metadata from the IL image
// when debugging an NI, or even intentionally using mismatched metadata if they like.
HRESULT CordbModule::InitPublicMetaDataFromFile(const WCHAR * pszFullPathName,
DWORD dwOpenFlags,
bool validateFileInfo)
{
#ifdef HOST_UNIX
// UNIXTODO: Some intricate details of file mapping don't work on Linux as on Windows.
// We have to revisit this and try to fix it for POSIX system.
return E_FAIL;
#else
if (validateFileInfo)
{
// Check that we've got the right file to target.
// There's nothing to prevent some other file being copied in for live, and with
// dump debugging there's nothing to say that we're not on another machine where a different
// file is at the same path.
// If we can't validate we have a hold of the correct file, we should not open it.
// We will fall back on asking the debugger to get us the correct file, or copying
// target memory back to the debugger.
DWORD dwImageTimeStamp = 0;
DWORD dwImageSize = 0;
bool isNGEN = false; // unused
StringCopyHolder filePath;
_ASSERTE(!m_vmPEFile.IsNull());
// MetaData lookup favors the NGEN image, which is what we want here.
if (!this->GetProcess()->GetDAC()->GetMetaDataFileInfoFromPEFile(m_vmPEFile,
dwImageTimeStamp,
dwImageSize,
isNGEN,
&filePath))
{
LOG((LF_CORDB,LL_WARNING, "CM::IM: Couldn't get metadata info for file \"%s\"\n", pszFullPathName));
return CORDBG_E_MISSING_METADATA;
}
// If the timestamp and size don't match, then this is the wrong file!
// Map the file and check them.
HandleHolder hMDFile = WszCreateFile(pszFullPathName,
GENERIC_READ,
FILE_SHARE_READ,
NULL, // default security descriptor
OPEN_EXISTING,
FILE_ATTRIBUTE_NORMAL,
NULL);
if (hMDFile == INVALID_HANDLE_VALUE)
{
LOG((LF_CORDB,LL_WARNING, "CM::IM: Couldn't open file \"%s\" (GLE=%x)\n", pszFullPathName, GetLastError()));
return CORDBG_E_MISSING_METADATA;
}
DWORD dwFileHigh = 0;
DWORD dwFileLow = GetFileSize(hMDFile, &dwFileHigh);
if (dwFileLow == INVALID_FILE_SIZE)
{
LOG((LF_CORDB,LL_WARNING, "CM::IM: File \"%s\" had invalid size.\n", pszFullPathName));
return CORDBG_E_MISSING_METADATA;
}
_ASSERTE(dwFileHigh == 0);
HandleHolder hMap = WszCreateFileMapping(hMDFile, NULL, PAGE_READONLY, dwFileHigh, dwFileLow, NULL);
if (hMap == NULL)
{
LOG((LF_CORDB,LL_WARNING, "CM::IM: Couldn't create mapping of file \"%s\" (GLE=%x)\n", pszFullPathName, GetLastError()));
return CORDBG_E_MISSING_METADATA;
}
MapViewHolder hMapView = MapViewOfFile(hMap, FILE_MAP_READ, 0, 0, 0);
if (hMapView == NULL)
{
LOG((LF_CORDB,LL_WARNING, "CM::IM: Couldn't map view of file \"%s\" (GLE=%x)\n", pszFullPathName, GetLastError()));
return CORDBG_E_MISSING_METADATA;
}
// Mapped as flat file, have PEDecoder go find what we want.
PEDecoder pedecoder(hMapView, (COUNT_T)dwFileLow);
if (!pedecoder.HasNTHeaders())
{
LOG((LF_CORDB,LL_WARNING, "CM::IM: \"%s\" did not have PE headers!\n", pszFullPathName));
return CORDBG_E_MISSING_METADATA;
}
if ((dwImageSize != pedecoder.GetVirtualSize()) ||
(dwImageTimeStamp != pedecoder.GetTimeDateStamp()))
{
LOG((LF_CORDB,LL_WARNING, "CM::IM: Validation of \"%s\" failed. "
"Expected size=%x, Expected timestamp=%x, Actual size=%x, Actual timestamp=%x\n",
pszFullPathName,
pedecoder.GetVirtualSize(),
pedecoder.GetTimeDateStamp(),
dwImageSize,
dwImageTimeStamp));
return CORDBG_E_MISSING_METADATA;
}
// All checks passed, go ahead and load this file for real.
}
// Get metadata Dispenser.
IMetaDataDispenserEx * pDisp = GetProcess()->GetDispenser();
HRESULT hr = pDisp->OpenScope(pszFullPathName, dwOpenFlags, IID_IMetaDataImport, (IUnknown**)&m_pIMImport);
_ASSERTE(SUCCEEDED(hr) == (m_pIMImport != NULL));
if (FAILED(hr))
{
// This should never happen in normal scenarios. It could happen if someone has renamed
// the assembly after it was opened by the debugee process, but this should be rare enough
// that we don't mind taking the perf. hit and loading from memory.
// @dbgtodo metadata - would this happen in the shadow-copy scenario?
LOG((LF_CORDB,LL_WARNING, "CM::IM: Couldn't open metadata in file \"%s\" (hr=%x)\n", pszFullPathName, hr));
}
return hr;
#endif // TARGET_UNIX
}
//---------------------------------------------------------------------------------------
// Initialize the public metadata.
//
// Arguments:
// buffer - valid buffer into target containing the metadata.
//
// Assumptions:
// This is an internal function which should only be called once to initialize the
// metadata. Future attempts to re-initialize (in dynamic cases) should call code:CordbModule::UpdatePublicMetaDataFromRemote
// After the public metadata is initialized, initialize private metadata via code:CordbModule::UpdateInternalMetaData
//
void CordbModule::InitPublicMetaData(TargetBuffer buffer)
{
CONTRACTL
{
THROWS;
}
CONTRACTL_END;
INTERNAL_API_ENTRY(this->GetProcess());
LOG((LF_CORDB,LL_INFO100000, "CM::IPM: initing with remote buffer 0x%p length 0x%x\n",
CORDB_ADDRESS_TO_PTR(buffer.pAddress), buffer.cbSize));
ULONG nMetaDataSize = buffer.cbSize;
if (nMetaDataSize == 0)
{
// We should always have metadata, and if we don't, we want to know.
// @dbgtodo metadata - we know metadata from dynamic modules doesn't work in V3
// (non-shim) cases yet.
// But our caller should already have handled that case.
SIMPLIFYING_ASSUMPTION(!"Error: missing the metadata");
return;
}
HRESULT hr = S_OK;
// Get metadata Dispenser.
IMetaDataDispenserEx * pDisp = GetProcess()->GetDispenser();
// copy it over from the remote process
CoTaskMemHolder<VOID> pMetaDataCopy;
CopyRemoteMetaData(buffer, pMetaDataCopy.GetAddr());
//
// Setup our metadata import object, m_pIMImport
//
// Save the old mode for restoration
VARIANT valueOld;
hr = pDisp->GetOption(MetaDataSetUpdate, &valueOld);
SIMPLIFYING_ASSUMPTION(!FAILED(hr));
// Set R/W mode so that we can update the metadata when
// we do EnC operations.
VARIANT valueRW;
V_VT(&valueRW) = VT_UI4;
V_I4(&valueRW) = MDUpdateFull;
hr = pDisp->SetOption(MetaDataSetUpdate, &valueRW);
SIMPLIFYING_ASSUMPTION(!FAILED(hr));
hr = pDisp->OpenScopeOnMemory(pMetaDataCopy,
nMetaDataSize,
ofTakeOwnership,
IID_IMetaDataImport,
reinterpret_cast<IUnknown**>( &m_pIMImport ));
// MetaData has taken ownership -don't free the memory
pMetaDataCopy.SuppressRelease();
// Immediately restore the old setting.
HRESULT hrRestore = pDisp->SetOption(MetaDataSetUpdate, &valueOld);
SIMPLIFYING_ASSUMPTION(!FAILED(hrRestore));
// Throw on errors.
IfFailThrow(hr);
IfFailThrow(hrRestore);
// Done!
}
//---------------------------------------------------------------------------------------
// Update public MetaData by copying it from the target and updating our IMetaDataImport object.
//
// Arguments:
// buffer - buffer into target space containing metadata blob
//
// Notes:
// Useful for additional class-loads into a dynamic module. A new class means new metadata
// and so we need to update the RS metadata to stay in sync with the left-side.
//
// This will call code:CordbModule::CopyRemoteMetaData to copy the remote buffer locally, and then
// it can OpenScopeOnMemory().
//
void CordbModule::UpdatePublicMetaDataFromRemote(TargetBuffer bufferRemoteMetaData)
{
CONTRACTL
{
// @dbgtodo metadata - think about the error semantics here. These fails during dispatching an event; so
// address this during event pipeline.
THROWS;
}
CONTRACTL_END;
if (bufferRemoteMetaData.IsEmpty())
{
ThrowHR(E_INVALIDARG);
}
INTERNAL_API_ENTRY(this->GetProcess()); //
LOG((LF_CORDB,LL_INFO100000, "CM::UPMFR: updating with remote buffer 0x%p length 0x%x\n",
CORDB_ADDRESS_TO_PTR(bufferRemoteMetaData.pAddress), bufferRemoteMetaData.cbSize));
// We're re-initializing existing metadata.
_ASSERTE(m_pIMImport != NULL);
HRESULT hr = S_OK;
ULONG dwMetaDataSize = bufferRemoteMetaData.cbSize;
// First copy it from the remote process
CoTaskMemHolder<VOID> pLocalMetaDataPtr;
CopyRemoteMetaData(bufferRemoteMetaData, pLocalMetaDataPtr.GetAddr());
IMetaDataDispenserEx * pDisp = GetProcess()->GetDispenser();
_ASSERTE(pDisp != NULL); // throws on error.
LOG((LF_CORDB,LL_INFO100000, "CM::RI: converting to new metadata\n"));
// now verify that the metadata is valid by opening a temporary scope on the memory
{
ReleaseHolder<IMetaDataImport> pIMImport;
hr = pDisp->OpenScopeOnMemory(pLocalMetaDataPtr,
dwMetaDataSize,
0,
IID_IMetaDataImport,
(IUnknown**)&pIMImport);
IfFailThrow(hr);
}
// We reopen on an existing instance, not create a new instance.
_ASSERTE(m_pIMImport != NULL); //
// Now tell our current IMetaDataImport object to re-initialize by swapping in the new memory block.
// This allows us to keep manipulating metadata objects on other threads without crashing.
// This will also invalidate an existing associated Internal MetaData.
hr = ReOpenMetaDataWithMemoryEx(m_pIMImport, pLocalMetaDataPtr, dwMetaDataSize, ofTakeOwnership );
IfFailThrow(hr);
// Success. MetaData now owns the metadata memory
pLocalMetaDataPtr.SuppressRelease();
}
//---------------------------------------------------------------------------------------
// Copy metadata memory from the remote process into a newly allocated local buffer.
//
// Arguments:
// pRemoteMetaDataPtr - pointer to remote buffer
// dwMetaDataSize - size of buffer.
// pLocalBuffer - holder to get local buffer.
//
// Returns:
// pLocalBuffer may be allocated.
// Throws on error (pLocalBuffer may contain garbage).
// Else if successful, pLocalBuffer contains local copy of metadata.
//
// Notes:
// This can copy metadata out for the dynamic case or the normal case.
// Uses an allocator (CoTaskMemHolder) that lets us hand off the memory to the metadata.
void CordbModule::CopyRemoteMetaData(
TargetBuffer buffer,
CoTaskMemHolder<VOID> * pLocalBuffer)
{
CONTRACTL
{
THROWS;
}
CONTRACTL_END;
_ASSERTE(pLocalBuffer != NULL);
_ASSERTE(!buffer.IsEmpty());
// Allocate space for the local copy of the metadata
// No need to zero out the memory since we'll fill it all here.
LPVOID pRawBuffer = CoTaskMemAlloc(buffer.cbSize);
if (pRawBuffer == NULL)
{
ThrowOutOfMemory();
}
pLocalBuffer->Assign(pRawBuffer);
// Copy the metadata from the left side
GetProcess()->SafeReadBuffer(buffer, (BYTE *)pRawBuffer);
return;
}
HRESULT CordbModule::QueryInterface(REFIID id, void **pInterface)
{
if (id == IID_ICorDebugModule)
{
*pInterface = static_cast<ICorDebugModule*>(this);
}
else if (id == IID_ICorDebugModule2)
{
*pInterface = static_cast<ICorDebugModule2*>(this);
}
else if (id == IID_ICorDebugModule3)
{
*pInterface = static_cast<ICorDebugModule3*>(this);
}
else if (id == IID_ICorDebugModule4)
{
*pInterface = static_cast<ICorDebugModule4*>(this);
}
else if (id == IID_IUnknown)
{
*pInterface = static_cast<IUnknown*>(static_cast<ICorDebugModule*>(this));
}
else
{
*pInterface = NULL;
return E_NOINTERFACE;
}
ExternalAddRef();
return S_OK;
}
HRESULT CordbModule::GetProcess(ICorDebugProcess **ppProcess)
{
PUBLIC_API_ENTRY(this);
FAIL_IF_NEUTERED(this);
VALIDATE_POINTER_TO_OBJECT(ppProcess, ICorDebugProcess **);
*ppProcess = static_cast<ICorDebugProcess*> (GetProcess());
GetProcess()->ExternalAddRef();
return S_OK;
}
HRESULT CordbModule::GetBaseAddress(CORDB_ADDRESS *pAddress)
{
PUBLIC_API_ENTRY(this);
FAIL_IF_NEUTERED(this);
VALIDATE_POINTER_TO_OBJECT(pAddress, CORDB_ADDRESS *);
*pAddress = m_PEBuffer.pAddress;
return S_OK;
}
HRESULT CordbModule::GetAssembly(ICorDebugAssembly **ppAssembly)
{
PUBLIC_API_ENTRY(this);
FAIL_IF_NEUTERED(this);
VALIDATE_POINTER_TO_OBJECT(ppAssembly, ICorDebugAssembly **);
*ppAssembly = static_cast<ICorDebugAssembly *> (m_pAssembly);
if (m_pAssembly != NULL)
{
m_pAssembly->ExternalAddRef();
}
return S_OK;
}
// Public implementation of ICorDebugModule::GetName,
// wrapper around code:GetNameWorker (which throws).
HRESULT CordbModule::GetName(ULONG32 cchName, ULONG32 *pcchName, _Out_writes_to_opt_(cchName, *pcchName) WCHAR szName[])
{
HRESULT hr = S_OK;
PUBLIC_API_BEGIN(this)
{
EX_TRY
{
hr = GetNameWorker(cchName, pcchName, szName);
}
EX_CATCH_HRESULT(hr);
// GetNameWorker can use metadata. If it fails due to missing metadata, or if we fail to find expected
// target memory (dump debugging) then we should fall back to getting the file name without metadata.
if ((hr == CORDBG_E_MISSING_METADATA) ||
(hr == CORDBG_E_READVIRTUAL_FAILURE) ||
(hr == HRESULT_FROM_WIN32(ERROR_PARTIAL_COPY)))
{
DWORD dwImageTimeStamp = 0; // unused
DWORD dwImageSize = 0; // unused
bool isNGEN = false;
StringCopyHolder filePath;
_ASSERTE(!m_vmPEFile.IsNull());
if (this->GetProcess()->GetDAC()->GetMetaDataFileInfoFromPEFile(m_vmPEFile,
dwImageTimeStamp,
dwImageSize,
isNGEN,
&filePath))
{
_ASSERTE(filePath.IsSet());
// Unfortunately, metadata lookup preferentially takes the ngen image - so in this case,
// we need to go back and get the IL image's name instead.
if ((isNGEN) &&
(this->GetProcess()->GetDAC()->GetILImageInfoFromNgenPEFile(m_vmPEFile,
dwImageTimeStamp,
dwImageSize,
&filePath)))
{
_ASSERTE(filePath.IsSet());
}
hr = CopyOutString(filePath, cchName, pcchName, szName);
}
}
}
PUBLIC_API_END(hr);
return hr;
}
//---------------------------------------------------------------------------------------
// Gets the module pretty name (may be filename or faked up name)
//
// Arguments:
// cchName - count of characters in the szName buffer on input.
// *pcchName - Optional Out parameter, which gets set to the fully requested size
// (not just how many characters are written).
// szName - buffer to get name.
//
// Returns:
// S_OK on success.
// S_FALSE if we fabricate the name.
// Return failing HR (on common errors) or Throw on exceptional errors.
//
// Note:
// Filename isn't necessarily the same as the module name in the metadata.
//
HRESULT CordbModule::GetNameWorker(ULONG32 cchName, ULONG32 *pcchName, _Out_writes_to_opt_(cchName, *pcchName) WCHAR szName[])
{
CONTRACTL
{
THROWS;
}
CONTRACTL_END;
HRESULT hr = S_OK;
const WCHAR * szTempName = NULL;
ALLOW_DATATARGET_MISSING_MEMORY(
szTempName = GetModulePath();
);
#if defined(FEATURE_DBGIPC_TRANSPORT_DI)
// To support VS when debugging remotely we act like the Compact Framework and return the assembly name
// when asked for the name of an in-memory module.
if (szTempName == NULL)
{
IMetaDataAssemblyImport *pAssemblyImport = NULL;
if (SUCCEEDED(hr = GetMetaDataImporter()->QueryInterface(IID_IMetaDataAssemblyImport, (void**)&pAssemblyImport)))
{
mdAssembly mda = TokenFromRid(1, mdtAssembly);
hr = pAssemblyImport->GetAssemblyProps(mda, // [IN] The Assembly for which to get the properties.
NULL, // [OUT] Pointer to the Originator blob.
NULL, // [OUT] Count of bytes in the Originator Blob.
NULL, // [OUT] Hash Algorithm.
szName, // [OUT] Buffer to fill with name.
cchName, // [IN] Size of buffer in wide chars.
(ULONG*)pcchName, // [OUT] Actual # of wide chars in name.
NULL, // [OUT] Assembly MetaData.
NULL); // [OUT] Flags.
pAssemblyImport->Release();
return hr;
}
// reset hr
hr = S_OK;
}
#endif // FEATURE_DBGIPC_TRANSPORT_DI
EX_TRY_ALLOW_DATATARGET_MISSING_MEMORY
{
StringCopyHolder buffer;
// If the module has no file name, then we'll fabricate a fake name
if (!szTempName)
{
// On MiniDumpNormal, if the debugger can't find the module then there's no way we will
// find metadata.
hr = HRESULT_FROM_WIN32(ERROR_PARTIAL_COPY);
// Tempting to use the metadata-scope name, but that's a regression from Whidbey. For manifest modules,
// the metadata scope name is not initialized with the string the user supplied to create the
// dynamic assembly. So we call into the runtime to use CLR heuristics to get a more accurate name.
m_pProcess->GetDAC()->GetModuleSimpleName(m_vmModule, &buffer);
_ASSERTE(buffer.IsSet());
szTempName = buffer;
// Note that we considered returning S_FALSE for fabricated names like this, but that's a breaking
// change from Whidbey that is known to trigger bugs in vS. If a debugger wants to differentiate
// real path names from fake simple names, we'll just have to add a new API with the right semantics.
}
hr = CopyOutString(szTempName, cchName, pcchName, szName);
}
EX_END_CATCH_ALLOW_DATATARGET_MISSING_MEMORY
return hr;
}
//---------------------------------------------------------------------------------------
// Gets actual name of loaded module. (no faked names)
//
// Returns:
// string for full path to module name. This is a file that can be opened.
// NULL if name is not available (such as in some dynamic module cases)
// Throws if failed accessing target
//
// Notes:
// We avoid using the method name "GetModuleFileName" because winbase.h #defines that
// token (along with many others) to have an A or W suffix.
const WCHAR * CordbModule::GetModulePath()
{
// Lazily initialize. Module filenames cannot change, and so once
// we've retrieved this successfully, it's stored for good.
if (!m_strModulePath.IsSet())
{
IDacDbiInterface * pDac = m_pProcess->GetDAC(); // throws
pDac->GetModulePath(m_vmModule, &m_strModulePath); // throws
_ASSERTE(m_strModulePath.IsSet());
}
if (m_strModulePath.IsEmpty())
{
return NULL; // module has no filename
}
return m_strModulePath;
}
//---------------------------------------------------------------------------------------
// Get and caches ngen image path.
//
// Returns:
// Null-terminated string to ngen image path.
// NULL if there is no ngen filename (eg, file is not ngenned).
// Throws on error (such as inability to read the path from the target).
//
// Notes:
// This can be used to get the path to find metadata. For ngenned images,
// the IL (and associated metadata) may not be loaded, so we may want to get the
// metadata out of the ngen image.
const WCHAR * CordbModule::GetNGenImagePath()
{
HRESULT hr = S_OK;
EX_TRY
{
// Lazily initialize. Module filenames cannot change, and so once
// we've retrieved this successfully, it's stored for good.
if (!m_strNGenImagePath.IsSet())
{
IDacDbiInterface * pDac = m_pProcess->GetDAC(); // throws
BOOL fNonEmpty = pDac->GetModuleNGenPath(m_vmModule, &m_strNGenImagePath); // throws
(void)fNonEmpty; //prevent "unused variable" error from GCC
_ASSERTE(m_strNGenImagePath.IsSet() && (m_strNGenImagePath.IsEmpty() == !fNonEmpty));
}
}
EX_CATCH_HRESULT(hr);
if (FAILED(hr) ||
m_strNGenImagePath == NULL ||
m_strNGenImagePath.IsEmpty())
{
return NULL; // module has no ngen filename
}
return m_strNGenImagePath;
}
// Implementation of ICorDebugModule::EnableJITDebugging
// See also code:CordbModule::SetJITCompilerFlags
HRESULT CordbModule::EnableJITDebugging(BOOL bTrackJITInfo, BOOL bAllowJitOpts)
{
// Leftside will enforce that this is a valid time to change jit flags.
// V1.0 behavior allowed setting these in the middle of a module's lifetime, which meant
// that different methods throughout the module may have been jitted differently.
// Since V2, this has to be set when the module is first loaded, before anything is jitted.
PUBLIC_API_ENTRY(this);
FAIL_IF_NEUTERED(this);
DWORD dwFlags = CORDEBUG_JIT_DEFAULT;
// Since V2, bTrackJITInfo is the default and cannot be turned off.
if (!bAllowJitOpts)
{
dwFlags |= CORDEBUG_JIT_DISABLE_OPTIMIZATION;
}
return SetJITCompilerFlags(dwFlags);
}
HRESULT CordbModule::EnableClassLoadCallbacks(BOOL bClassLoadCallbacks)
{
PUBLIC_API_ENTRY(this);
FAIL_IF_NEUTERED(this);
ATT_ALLOW_LIVE_DO_STOPGO(GetProcess());
// You must receive ClassLoad callbacks for dynamic modules so that we can keep the metadata up-to-date on the Right
// Side. Therefore, we refuse to turn them off for all dynamic modules (they were forced on when the module was
// loaded on the Left Side.)
if (m_fDynamic && !bClassLoadCallbacks)
return E_INVALIDARG;
if (m_vmDomainAssembly.IsNull())
return E_UNEXPECTED;
// Send a Set Class Load Flag event to the left side. There is no need to wait for a response, and this can be
// called whether or not the process is synchronized.
CordbProcess *pProcess = GetProcess();
DebuggerIPCEvent event;
pProcess->InitIPCEvent(&event,
DB_IPCE_SET_CLASS_LOAD_FLAG,
false,
(GetAppDomain()->GetADToken()));
event.SetClassLoad.vmDomainAssembly = this->m_vmDomainAssembly;
event.SetClassLoad.flag = (bClassLoadCallbacks == TRUE);
HRESULT hr = pProcess->m_cordb->SendIPCEvent(pProcess, &event,
sizeof(DebuggerIPCEvent));
hr = WORST_HR(hr, event.hr);
return hr;
}
//-----------------------------------------------------------------------------
// Public implementation of ICorDebugModule::GetFunctionFromToken
// Get the CordbFunction matches this token / module pair.
// Each time a function is Enc-ed, it gets its own CordbFunction object.
// This will return the latest EnC version of the function for this Module,Token pair.
HRESULT CordbModule::GetFunctionFromToken(mdMethodDef token,
ICorDebugFunction **ppFunction)
{
// This is not reentrant. DBI should call code:CordbModule::LookupOrCreateFunctionLatestVersion instead.
PUBLIC_API_ENTRY(this);
ATT_ALLOW_LIVE_DO_STOPGO(GetProcess()); // @todo - can this be RequiredStop?
FAIL_IF_NEUTERED(this);
VALIDATE_POINTER_TO_OBJECT(ppFunction, ICorDebugFunction **);
HRESULT hr = S_OK;
EX_TRY
{
RSLockHolder lockHolder(GetProcess()->GetProcessLock());
// Check token is valid.
if ((token == mdMethodDefNil) ||
(TypeFromToken(token) != mdtMethodDef) ||
(!GetMetaDataImporter()->IsValidToken(token)))
{
ThrowHR(E_INVALIDARG);
}
CordbFunction * pFunction = LookupOrCreateFunctionLatestVersion(token);
*ppFunction = static_cast<ICorDebugFunction*> (pFunction);
pFunction->ExternalAddRef();
}
EX_CATCH_HRESULT(hr);
return hr;
}
HRESULT CordbModule::GetFunctionFromRVA(CORDB_ADDRESS rva,
ICorDebugFunction **ppFunction)
{
PUBLIC_API_ENTRY(this);
FAIL_IF_NEUTERED(this);
VALIDATE_POINTER_TO_OBJECT(ppFunction, ICorDebugFunction **);
return E_NOTIMPL;
}
HRESULT CordbModule::LookupClassByToken(mdTypeDef token,
CordbClass **ppClass)
{
INTERNAL_API_ENTRY(this->GetProcess()); //
FAIL_IF_NEUTERED(this);
HRESULT hr = S_OK;
EX_TRY // @dbgtodo exceptions - push this up
{
*ppClass = NULL;
if ((token == mdTypeDefNil) || (TypeFromToken(token) != mdtTypeDef))
{
ThrowHR(E_INVALIDARG);
}
RSLockHolder lockHolder(GetProcess()->GetProcessLock()); // @dbgtodo synchronization - Push this up
CordbClass *pClass = m_classes.GetBase(token);
if (pClass == NULL)
{
// Validate the token.
if (!GetMetaDataImporter()->IsValidToken(token))
{
ThrowHR(E_INVALIDARG);
}
RSInitHolder<CordbClass> pClassInit(new CordbClass(this, token));
pClass = pClassInit.TransferOwnershipToHash(&m_classes);
}
*ppClass = pClass;
}
EX_CATCH_HRESULT(hr);
return hr;
}
HRESULT CordbModule::GetClassFromToken(mdTypeDef token,
ICorDebugClass **ppClass)
{
PUBLIC_API_ENTRY(this);
FAIL_IF_NEUTERED(this);
ATT_ALLOW_LIVE_DO_STOPGO(this->GetProcess()); // @todo - could this be RequiredStopped?
VALIDATE_POINTER_TO_OBJECT(ppClass, ICorDebugClass **);
HRESULT hr = S_OK;
EX_TRY
{
CordbClass *pClass = NULL;
*ppClass = NULL;
// Validate the token.
if (!GetMetaDataImporter()->IsValidToken(token))
{
ThrowHR(E_INVALIDARG);
}
hr = LookupClassByToken(token, &pClass);
IfFailThrow(hr);
*ppClass = static_cast<ICorDebugClass*> (pClass);
pClass->ExternalAddRef();
}
EX_CATCH_HRESULT(hr);
return hr;
}
HRESULT CordbModule::CreateBreakpoint(ICorDebugModuleBreakpoint **ppBreakpoint)
{
PUBLIC_API_ENTRY(this);
FAIL_IF_NEUTERED(this);
VALIDATE_POINTER_TO_OBJECT(ppBreakpoint, ICorDebugModuleBreakpoint **);
return E_NOTIMPL;
}
//
// Return the token for the Module table entry for this object. The token
// may then be passed to the meta data import api's.
//
HRESULT CordbModule::GetToken(mdModule *pToken)
{
PUBLIC_API_ENTRY(this);
FAIL_IF_NEUTERED(this);
VALIDATE_POINTER_TO_OBJECT(pToken, mdModule *);
HRESULT hr = S_OK;
EX_TRY
{
hr = GetMetaDataImporter()->GetModuleFromScope(pToken);
IfFailThrow(hr);
}
EX_CATCH_HRESULT(hr);
return hr;
}
// public implementation for ICorDebugModule::GetMetaDataInterface
// Return a meta data interface pointer that can be used to examine the
// meta data for this module.
HRESULT CordbModule::GetMetaDataInterface(REFIID riid, IUnknown **ppObj)
{
PUBLIC_API_ENTRY(this);
FAIL_IF_NEUTERED(this);
VALIDATE_POINTER_TO_OBJECT(ppObj, IUnknown **);
HRESULT hr = S_OK;
EX_TRY
{
// QI the importer that we already have and return the result.
hr = GetMetaDataImporter()->QueryInterface(riid, (void**)ppObj);
IfFailThrow(hr);
}
EX_CATCH_HRESULT(hr);
return hr;
}
//-----------------------------------------------------------------------------
// LookupFunctionLatestVersion finds the latest cached version of an existing CordbFunction
// in the given module. If the function doesn't exist, it returns NULL.
//
// Arguments:
// funcMetaDataToken - methoddef token for function to lookup
//
//
// Notes:
// If no CordbFunction instance was cached, then this returns NULL.
// use code:CordbModule::LookupOrCreateFunctionLatestVersion to do a lookup that will
// populate the cache if needed.
CordbFunction* CordbModule::LookupFunctionLatestVersion(mdMethodDef funcMetaDataToken)
{
INTERNAL_API_ENTRY(this);
return m_functions.GetBase(funcMetaDataToken);
}
//-----------------------------------------------------------------------------
// Lookup (or create) the CordbFunction for the latest EnC version.
//
// Arguments:
// funcMetaDataToken - methoddef token for function to lookup
//
// Returns:
// CordbFunction instance for that token. This will create an instance if needed, and so never returns null.
// Throws on critical error.
//
// Notes:
// This creates the latest EnC version. Use code:CordbModule::LookupOrCreateFunction to do an
// enc-version aware function lookup.
//
CordbFunction* CordbModule::LookupOrCreateFunctionLatestVersion(mdMethodDef funcMetaDataToken)
{
INTERNAL_API_ENTRY(this);
CordbFunction * pFunction = m_functions.GetBase(funcMetaDataToken);
if (pFunction != NULL)
{
return pFunction;
}
// EnC adds each version to the hash. So if the hash lookup fails, then it must not be an EnC case,
// and so we can use the default version number.
return CreateFunction(funcMetaDataToken, CorDB_DEFAULT_ENC_FUNCTION_VERSION);
}
//-----------------------------------------------------------------------------
// LookupOrCreateFunction finds an existing version of CordbFunction in the given module.
// If the function doesn't exist, it creates it.
//
// The outgoing function is not yet fully inititalized. For eg, the Class field is not set.
// However, ICorDebugFunction::GetClass() will check that and lazily initialize the field.
//
// Throws on error.
//
CordbFunction * CordbModule::LookupOrCreateFunction(mdMethodDef funcMetaDataToken, SIZE_T enCVersion)
{
INTERNAL_API_ENTRY(this);
_ASSERTE(GetProcess()->ThreadHoldsProcessLock());
CordbFunction * pFunction = m_functions.GetBase(funcMetaDataToken);
// special case non-existance as need to add to the hash table too
if (pFunction == NULL)
{
// EnC adds each version to the hash. So if the hash lookup fails,
// then it must not be an EnC case.
return CreateFunction(funcMetaDataToken, enCVersion);
}
// linked list sorted with most recent version at front. Version numbers correspond
// to actual edit count against the module, so version numbers not necessarily contiguous.
// Any valid EnC version must already exist as we would have created it on the ApplyChanges
for (CordbFunction *pf=pFunction; pf != NULL; pf = pf->GetPrevVersion())
{
if (pf->GetEnCVersionNumber() == enCVersion)
{
return pf;
}
}
_ASSERTE(!"Couldn't find EnC version of function\n");
ThrowHR(E_FAIL);
}
HRESULT CordbModule::IsDynamic(BOOL *pDynamic)
{
PUBLIC_API_ENTRY(this);
FAIL_IF_NEUTERED(this);
VALIDATE_POINTER_TO_OBJECT(pDynamic, BOOL *);
(*pDynamic) = m_fDynamic;
return S_OK;
}
BOOL CordbModule::IsDynamic()
{
return m_fDynamic;
}
HRESULT CordbModule::IsInMemory(BOOL *pInMemory)
{
PUBLIC_API_ENTRY(this);
FAIL_IF_NEUTERED(this);
VALIDATE_POINTER_TO_OBJECT(pInMemory, BOOL *);
(*pInMemory) = m_fInMemory;
return S_OK;
}
HRESULT CordbModule::GetGlobalVariableValue(mdFieldDef fieldDef,
ICorDebugValue **ppValue)
{
PUBLIC_API_ENTRY(this);
FAIL_IF_NEUTERED(this);
VALIDATE_POINTER_TO_OBJECT(ppValue, ICorDebugValue **);
ATT_REQUIRE_STOPPED_MAY_FAIL(this->GetProcess());
HRESULT hr = S_OK;
EX_TRY
{
if (m_pClass == NULL)
{
CordbClass * pGlobalClass = NULL;
hr = LookupClassByToken(COR_GLOBAL_PARENT_TOKEN, &pGlobalClass);
IfFailThrow(hr);
m_pClass.Assign(pGlobalClass);
_ASSERTE(m_pClass != NULL);
}
hr = m_pClass->GetStaticFieldValue(fieldDef, NULL, ppValue);
IfFailThrow(hr);
}
EX_CATCH_HRESULT(hr);
return hr;
}
//
// CreateFunction creates a new function from the given information and
// adds it to the module.
//
CordbFunction * CordbModule::CreateFunction(mdMethodDef funcMetaDataToken, SIZE_T enCVersion)
{
INTERNAL_API_ENTRY(this);
// In EnC cases, the token may not yet be valid. We may be caching the CordbFunction
// for a token for an added method before the metadata is updated on the RS.
// We rely that our caller has done token validation.
// Create a new CordbFunction object or throw.
RSInitHolder<CordbFunction> pFunction(new CordbFunction(this, funcMetaDataToken, enCVersion)); // throws
CordbFunction * pCopy = pFunction.TransferOwnershipToHash(&m_functions);
return pCopy;
}
#ifdef EnC_SUPPORTED
//---------------------------------------------------------------------------------------
//
// Creates a new CordbFunction object to represent this new version of a function and
// updates the module's function collection to mark this as the latest version.
//
// Arguments:
// funcMetaDataToken - the functions methodDef token in this module
// enCVerison - The new version number of this function
// ppFunction - Output param for the new instance - optional
//
// Assumptions:
// Assumes the specified version of this function doesn't already exist (i.e. enCVersion
// is newer than all existing versions).
//
HRESULT CordbModule::UpdateFunction(mdMethodDef funcMetaDataToken,
SIZE_T enCVersion,
CordbFunction** ppFunction)
{
INTERNAL_API_ENTRY(this);
if (ppFunction)
*ppFunction = NULL;
_ASSERTE(funcMetaDataToken);
RSLockHolder lockHolder(GetProcess()->GetProcessLock());
// pOldVersion is the 2nd newest version
CordbFunction* pOldVersion = LookupFunctionLatestVersion(funcMetaDataToken);
// if don't have an old version, then create a default versioned one as will most likely
// go looking for it later and easier to put it in now than have code to insert it later.
if (!pOldVersion)
{
LOG((LF_ENC, LL_INFO10000, "CM::UF: adding %8.8x with version %d\n", funcMetaDataToken, enCVersion));
HRESULT hr = S_OK;
EX_TRY
{
pOldVersion = CreateFunction(funcMetaDataToken, CorDB_DEFAULT_ENC_FUNCTION_VERSION);
}
EX_CATCH_HRESULT(hr);
if (FAILED(hr))
{
return hr;
}
}
// This method should not be called for versions that already exist
_ASSERTE( enCVersion > pOldVersion->GetEnCVersionNumber());
LOG((LF_ENC, LL_INFO10000, "CM::UF: updating %8.8x with version %d\n", funcMetaDataToken, enCVersion));
// Create a new function object.
CordbFunction * pNewVersion = new (nothrow) CordbFunction(this, funcMetaDataToken, enCVersion);
if (pNewVersion == NULL)
return E_OUTOFMEMORY;
// Chain the 2nd most recent version onto this instance (this will internal addref).
pNewVersion->SetPrevVersion(pOldVersion);
// Add the function to the Module's hash of all functions.
HRESULT hr = m_functions.SwapBase(pOldVersion, pNewVersion);
if (FAILED(hr))
{
delete pNewVersion;
return hr;
}
// Do cleanup for function which is no longer the latest version
pNewVersion->GetPrevVersion()->MakeOld();
if (ppFunction)
*ppFunction = pNewVersion;
return hr;
}
#endif // EnC_SUPPORTED
HRESULT CordbModule::LookupOrCreateClass(mdTypeDef classMetaDataToken,CordbClass** ppClass)
{
INTERNAL_API_ENTRY(this);
FAIL_IF_NEUTERED(this);
RSLockHolder lockHolder(GetProcess()->GetProcessLock()); // @dbgtodo exceptions synchronization-
// Push this lock up, convert to exceptions.
HRESULT hr = S_OK;
*ppClass = LookupClass(classMetaDataToken);
if (*ppClass == NULL)
{
hr = CreateClass(classMetaDataToken,ppClass);
if (!SUCCEEDED(hr))
{
return hr;
}
_ASSERTE(*ppClass != NULL);
}
return hr;
}
//
// LookupClass finds an existing CordbClass in the given module.
// If the class doesn't exist, it returns NULL.
//
CordbClass* CordbModule::LookupClass(mdTypeDef classMetaDataToken)
{
INTERNAL_API_ENTRY(this);
_ASSERTE(GetProcess()->ThreadHoldsProcessLock());
return m_classes.GetBase(classMetaDataToken);
}
//
// CreateClass creates a new class from the given information and
// adds it to the module.
//
HRESULT CordbModule::CreateClass(mdTypeDef classMetaDataToken,
CordbClass** ppClass)
{
INTERNAL_API_ENTRY(this);
FAIL_IF_NEUTERED(this);
_ASSERTE(GetProcess()->ThreadHoldsProcessLock());
CordbClass* pClass = new (nothrow) CordbClass(this, classMetaDataToken);
if (pClass == NULL)
return E_OUTOFMEMORY;
HRESULT hr = m_classes.AddBase(pClass);
if (SUCCEEDED(hr))
{
*ppClass = pClass;
if (classMetaDataToken == COR_GLOBAL_PARENT_TOKEN)
{
_ASSERTE( m_pClass == NULL ); //redundant create
m_pClass.Assign(pClass);
}
}
else
{
delete pClass;
}
return hr;
}
// Resolve a type-ref from this module to a CordbClass
//
// Arguments:
// token - a Type Ref in this module's scope.
// ppClass - out parameter to get the class we resolve to.
//
// Returns:
// S_OK on success.
// CORDBG_E_CLASS_NOT_LOADED is the TypeRef is not yet resolved because the type it will refer
// to is not yet loaded.
//
// Notes:
// In general, a TypeRef refers to a type in another module. (Although as a corner case, it could
// refer to this module too). This resolves a TypeRef within the current module's scope to a
// (TypeDef, metadata scope), which is in turn encapsulated as a CordbClass.
//
// A TypeRef has a resolution scope (ModuleRef or AssemblyRef) and string name for the type
// within that scope. Resolving means:
// 1. Determining the actual metadata scope loaded for the resolution scope.
// See also code:CordbModule::ResolveAssemblyInternal
// If the resolved module hasn't been loaded yet, the resolution will fail.
// 2. Doing a string lookup of the TypeRef's name within that resolved scope to find the TypeDef.
// 3. Returning the (resolved scope, TypeDef) pair.
//
HRESULT CordbModule::ResolveTypeRef(mdTypeRef token, CordbClass **ppClass)
{
FAIL_IF_NEUTERED(this);
INTERNAL_SYNC_API_ENTRY(GetProcess()); //
CordbProcess * pProcess = GetProcess();
_ASSERTE((pProcess->GetShim() == NULL) || pProcess->GetSynchronized());
if ((token == mdTypeRefNil) || (TypeFromToken(token) != mdtTypeRef))
{
return E_INVALIDARG;
}
if (m_vmDomainAssembly.IsNull() || m_pAppDomain == NULL)
{
return E_UNEXPECTED;
}
HRESULT hr = S_OK;
*ppClass = NULL;
EX_TRY
{
TypeRefData inData = {m_vmDomainAssembly, token};
TypeRefData outData;
{
RSLockHolder lockHolder(pProcess->GetProcessLock());
pProcess->GetDAC()->ResolveTypeReference(&inData, &outData);
}
CordbModule * pModule = m_pAppDomain->LookupOrCreateModule(outData.vmDomainAssembly);
IfFailThrow(pModule->LookupClassByToken(outData.typeToken, ppClass));
}
EX_CATCH_HRESULT(hr);
return hr;
} // CordbModule::ResolveTypeRef
// Resolve a type ref or def to a CordbClass
//
// Arguments:
// token - a mdTypeDef or mdTypeRef in this module's scope to be resolved
// ppClass - out parameter to get the CordbClass for this type
//
// Notes:
// See code:CordbModule::ResolveTypeRef for more details.
HRESULT CordbModule::ResolveTypeRefOrDef(mdToken token, CordbClass **ppClass)
{
FAIL_IF_NEUTERED(this);
INTERNAL_SYNC_API_ENTRY(this->GetProcess()); //
if ((token == mdTypeRefNil) ||
(TypeFromToken(token) != mdtTypeRef && TypeFromToken(token) != mdtTypeDef))
return E_INVALIDARG;
if (TypeFromToken(token)==mdtTypeRef)
{
// It's a type-ref. That means the type is defined in another module.
// That other module is determined at runtime by Fusion / Loader policy. So we need to
// ultimately ask the runtime which module was actually loaded.
return ( ResolveTypeRef(token, ppClass) );
}
else
{
// It's a type-def. This is the easy case because the type is defined in this same module.
return ( LookupClassByToken(token, ppClass) );
}
}
//
// GetSize returns the size of the module.
//
HRESULT CordbModule::GetSize(ULONG32 *pcBytes)
{
PUBLIC_API_ENTRY(this);
FAIL_IF_NEUTERED(this);
VALIDATE_POINTER_TO_OBJECT(pcBytes, ULONG32 *);
*pcBytes = m_PEBuffer.cbSize;
return S_OK;
}
CordbAssembly *CordbModule::GetCordbAssembly()
{
INTERNAL_API_ENTRY(this);
return m_pAssembly;
}
// This is legacy from the aborted V1 EnC attempt - not used in V2 EnC support
HRESULT CordbModule::GetEditAndContinueSnapshot(
ICorDebugEditAndContinueSnapshot **ppEditAndContinueSnapshot)
{
return E_NOTIMPL;
}
//---------------------------------------------------------------------------------------
//
// Requests that an edit be applied to the module for edit and continue and updates
// the right-side state and metadata.
//
// Arguments:
// cbMetaData - number of bytes in pbMetaData
// pbMetaData - a delta metadata blob describing the metadata edits to be made
// cbIL - number of bytes in pbIL
// pbIL - a new method body stream containing all of the method body information
// (IL, EH info, etc) for edited and added methods.
//
// Return Value:
// S_OK on success, various errors on failure
//
// Notes:
//
//
// This applies the same changes to the RS's copy of the metadata that the left-side will apply to
// it's copy of the metadata. see code:EditAndContinueModule::ApplyEditAndContinue
//
HRESULT CordbModule::ApplyChanges(ULONG cbMetaData,
BYTE pbMetaData[],
ULONG cbIL,
BYTE pbIL[])
{
PUBLIC_API_ENTRY(this);
FAIL_IF_NEUTERED(this);
ATT_REQUIRE_STOPPED_MAY_FAIL(GetProcess());
#ifdef FEATURE_ENC_SUPPORTED
// We enable EnC back in code:CordbModule::SetJITCompilerFlags.
// If EnC isn't enabled, then we'll fail in the LS when we try to ApplyChanges.
// We'd expect a well-behaved debugger to never actually land here.
LOG((LF_CORDB,LL_INFO10000, "CP::AC: applying changes"));
VALIDATE_POINTER_TO_OBJECT_ARRAY(pbMetaData,
BYTE,
cbMetaData,
true,
true);
VALIDATE_POINTER_TO_OBJECT_ARRAY(pbIL,
BYTE,
cbIL,
true,
true);
HRESULT hr;
RSExtSmartPtr<IUnknown> pUnk;
RSExtSmartPtr<IMDInternalImport> pMDImport;
RSExtSmartPtr<IMDInternalImport> pMDImport2;
//
// Edit was successful - update the right-side state to reflect the edit
//
++m_EnCCount;
// apply the changes to our copy of the metadata
_ASSERTE(m_pIMImport != NULL); // must have metadata at this point in EnC
IfFailGo(m_pIMImport->QueryInterface(IID_IUnknown, (void**)&pUnk));
IfFailGo(GetMDInternalInterfaceFromPublic(pUnk, IID_IMDInternalImport,
(void **)&pMDImport));
// The left-side will call this same method on its copy of the metadata.
hr = pMDImport->ApplyEditAndContinue(pbMetaData, cbMetaData, &pMDImport2);
if (pMDImport2 != NULL)
{
// ApplyEditAndContinue() expects IMDInternalImport**, but we give it RSExtSmartPtr<IMDInternalImport>
// Silent cast of RSExtSmartPtr to IMDInternalImport* leads to assignment of a raw pointer
// without calling AddRef(), thus we need to do it manually.
// @todo - ApplyEditAndContinue should probably AddRef the out parameter.
pMDImport2->AddRef();
}
IfFailGo(hr);
// We're about to get a new importer object, so release the old one.
m_pIMImport.Clear();
IfFailGo(GetMDPublicInterfaceFromInternal(pMDImport2, IID_IMetaDataImport, (void **)&m_pIMImport));
// set the new RVA value
// Send the delta over to the debugee and request that it apply the edit
IfFailGo( ApplyChangesInternal(cbMetaData, pbMetaData, cbIL, pbIL) );
EX_TRY
{
m_pInternalMetaDataImport.Clear();
UpdateInternalMetaData();
}
EX_CATCH_HRESULT(hr);
_ASSERTE(SUCCEEDED(hr));
ErrExit:
// MetaData interface pointers will be automatically released via SmartPtr dtors.
// @todo : prevent further execution of program
return hr;
#else
return E_NOTIMPL;
#endif
}
//---------------------------------------------------------------------------------------
//
// Requests that an edit be applied to the module for edit and continue and updates
// some right-side state, but does not update our copy of the metadata.
//
// Arguments:
// cbMetaData - number of bytes in pbMetaData
// pbMetaData - a delta metadata blob describing the metadata edits to be made
// cbIL - number of bytes in pbIL
// pbIL - a new method body stream containing all of the method body information
// (IL, EH info, etc) for edited and added methods.
//
// Return Value:
// S_OK on success, various errors on failure
//
HRESULT CordbModule::ApplyChangesInternal(ULONG cbMetaData,
BYTE pbMetaData[],
ULONG cbIL,
BYTE pbIL[])
{
CONTRACTL
{
NOTHROW;
}
CONTRACTL_END;
LOG((LF_ENC,LL_INFO100, "CordbProcess::ApplyChangesInternal\n"));
FAIL_IF_NEUTERED(this);
INTERNAL_SYNC_API_ENTRY(this->GetProcess()); //
if (m_vmDomainAssembly.IsNull())
return E_UNEXPECTED;
#ifdef FEATURE_ENC_SUPPORTED
HRESULT hr;
void * pRemoteBuf = NULL;
EX_TRY
{
// Create and initialize the event as synchronous
// We'll be sending a NULL appdomain pointer since the individual modules
// will contains pointers to their respective A.D.s
DebuggerIPCEvent event;
GetProcess()->InitIPCEvent(&event, DB_IPCE_APPLY_CHANGES, false, VMPTR_AppDomain::NullPtr());
event.ApplyChanges.vmDomainAssembly = this->m_vmDomainAssembly;
// Have the left-side create a buffer for us to store the delta into
ULONG cbSize = cbMetaData+cbIL;
TargetBuffer tbFull = GetProcess()->GetRemoteBuffer(cbSize);
pRemoteBuf = CORDB_ADDRESS_TO_PTR(tbFull.pAddress);
TargetBuffer tbMetaData = tbFull.SubBuffer(0, cbMetaData); // 1st half
TargetBuffer tbIL = tbFull.SubBuffer(cbMetaData); // 2nd half
// Copy the delta metadata over to the debugee
GetProcess()->SafeWriteBuffer(tbMetaData, pbMetaData); // throws
GetProcess()->SafeWriteBuffer(tbIL, pbIL); // throws
// Send a synchronous event requesting the debugee apply the edit
event.ApplyChanges.pDeltaMetadata = tbMetaData.pAddress;
event.ApplyChanges.cbDeltaMetadata = tbMetaData.cbSize;
event.ApplyChanges.pDeltaIL = tbIL.pAddress;
event.ApplyChanges.cbDeltaIL = tbIL.cbSize;
LOG((LF_ENC,LL_INFO100, "CordbProcess::ApplyChangesInternal sending event\n"));
hr = GetProcess()->SendIPCEvent(&event, sizeof(event));
hr = WORST_HR(hr, event.hr);
IfFailThrow(hr);
// Allocate space for the return event.
// We always copy over the whole buffer size which is bigger than sizeof(DebuggerIPCEvent)
// This seems ugly, in this case we know the exact size of the event we want to read
// why copy over all the extra data?
DebuggerIPCEvent *retEvent = (DebuggerIPCEvent *) _alloca(CorDBIPC_BUFFER_SIZE);
{
//
// Wait for events to return from the RC. We expect zero or more add field,
// add function or update function events and one completion event.
//
while (TRUE)
{
hr = GetProcess()->m_cordb->WaitForIPCEventFromProcess(GetProcess(),
GetAppDomain(),
retEvent);
IfFailThrow(hr);
if (retEvent->type == DB_IPCE_APPLY_CHANGES_RESULT)
{
// Done receiving update events
hr = retEvent->ApplyChangesResult.hr;
LOG((LF_CORDB, LL_INFO1000, "[%x] RCET::DRCE: EnC apply changes result %8.8x.\n", hr));
break;
}
_ASSERTE(retEvent->type == DB_IPCE_ENC_UPDATE_FUNCTION ||
retEvent->type == DB_IPCE_ENC_ADD_FUNCTION ||
retEvent->type == DB_IPCE_ENC_ADD_FIELD);
LOG((LF_CORDB, LL_INFO1000, "[%x] RCET::DRCE: EnC %s %8.8x to version %d.\n",
GetCurrentThreadId(),
retEvent->type == DB_IPCE_ENC_UPDATE_FUNCTION ? "Update function" :
retEvent->type == DB_IPCE_ENC_ADD_FUNCTION ? "Add function" : "Add field",
retEvent->EnCUpdate.memberMetadataToken, retEvent->EnCUpdate.newVersionNumber));
CordbAppDomain *pAppDomain = GetAppDomain();
_ASSERTE(NULL != pAppDomain);
CordbModule* pModule = NULL;
pModule = pAppDomain->LookupOrCreateModule(retEvent->EnCUpdate.vmDomainAssembly); // throws
_ASSERTE(pModule != NULL);
// update to the newest version
if (retEvent->type == DB_IPCE_ENC_UPDATE_FUNCTION ||
retEvent->type == DB_IPCE_ENC_ADD_FUNCTION)
{
// Update the function collection to reflect this edit
hr = pModule->UpdateFunction(retEvent->EnCUpdate.memberMetadataToken, retEvent->EnCUpdate.newVersionNumber, NULL);
}
// mark the class and relevant type as old so we update it next time we try to query it
if (retEvent->type == DB_IPCE_ENC_ADD_FUNCTION ||
retEvent->type == DB_IPCE_ENC_ADD_FIELD)
{
RSLockHolder lockHolder(GetProcess()->GetProcessLock()); // @dbgtodo synchronization - push this up
CordbClass* pClass = pModule->LookupClass(retEvent->EnCUpdate.classMetadataToken);
// if don't find class, that is fine because it hasn't been loaded yet so doesn't
// need to be updated
if (pClass)
{
pClass->MakeOld();
}
}
}
}
LOG((LF_ENC,LL_INFO100, "CordbProcess::ApplyChangesInternal complete.\n"));
}
EX_CATCH_HRESULT(hr);
// process may have gone away by the time we get here so don't assume is there.
CordbProcess *pProcess = GetProcess();
if (pProcess)
{
HRESULT hr2 = pProcess->ReleaseRemoteBuffer(&pRemoteBuf);
TESTANDRETURNHR(hr2);
}
return hr;
#else // FEATURE_ENC_SUPPORTED
return E_NOTIMPL;
#endif // FEATURE_ENC_SUPPORTED
}
// Set the JMC status for the entire module.
// All methods specified in others[] will have jmc status !fIsUserCode
// All other methods will have jmc status fIsUserCode.
HRESULT CordbModule::SetJMCStatus(
BOOL fIsUserCode,
ULONG32 cOthers,
mdToken others[])
{
PUBLIC_API_ENTRY(this);
FAIL_IF_NEUTERED(this);
ATT_REQUIRE_STOPPED_MAY_FAIL(GetProcess());
if (m_vmDomainAssembly.IsNull())
return E_UNEXPECTED;
// @todo -allow the other parameters. These are functions that have default status
// opposite of fIsUserCode.
if (cOthers != 0)
{
_ASSERTE(!"not yet impl for cOthers != 0");
return E_NOTIMPL;
}
// Send event to the LS.
CordbProcess* pProcess = this->GetProcess();
_ASSERTE(pProcess != NULL);
// Tell the LS that this module is/is not user code
DebuggerIPCEvent event;
pProcess->InitIPCEvent(&event, DB_IPCE_SET_MODULE_JMC_STATUS, true, this->GetAppDomain()->GetADToken());
event.SetJMCFunctionStatus.vmDomainAssembly = m_vmDomainAssembly;
event.SetJMCFunctionStatus.dwStatus = fIsUserCode;
// Note: two-way event here...
HRESULT hr = pProcess->m_cordb->SendIPCEvent(pProcess, &event, sizeof(DebuggerIPCEvent));
// Stop now if we can't even send the event.
if (!SUCCEEDED(hr))
{
LOG((LF_CORDB, LL_INFO10, "CordbModule::SetJMCStatus failed 0x%08x...\n", hr));
return hr;
}
_ASSERTE(event.type == DB_IPCE_SET_MODULE_JMC_STATUS_RESULT);
LOG((LF_CORDB, LL_INFO10, "returning from CordbModule::SetJMCStatus 0x%08x...\n", hr));
return event.hr;
}
//
// Resolve an assembly given an AssemblyRef token. Note that
// this will not trigger the loading of assembly. If assembly is not yet loaded,
// this will return an CORDBG_E_CANNOT_RESOLVE_ASSEMBLY error
//
HRESULT CordbModule::ResolveAssembly(mdToken tkAssemblyRef,
ICorDebugAssembly **ppAssembly)
{
PUBLIC_API_ENTRY(this);
FAIL_IF_NEUTERED(this);
ATT_REQUIRE_STOPPED_MAY_FAIL(this->GetProcess());
if(ppAssembly)
{
*ppAssembly = NULL;
}
HRESULT hr = S_OK;
EX_TRY
{
CordbAssembly *pCordbAsm = ResolveAssemblyInternal(tkAssemblyRef);
if (pCordbAsm == NULL)
{
// Don't throw here. It's a common-case failure path and not exceptional.
hr = CORDBG_E_CANNOT_RESOLVE_ASSEMBLY;
}
else if(ppAssembly)
{
_ASSERTE(pCordbAsm != NULL);
*ppAssembly = pCordbAsm;
pCordbAsm->ExternalAddRef();
}
}
EX_CATCH_HRESULT(hr);
return hr;
}
//---------------------------------------------------------------------------------------
// Worker to resolve an assembly ref.
//
// Arguments:
// tkAssemblyRef - token of assembly ref to resolve
//
// Returns:
// Assembly that this token resolves to.
// NULL if it's a valid token but the assembly has not yet been resolved.
// (This is a non-exceptional error case).
//
// Notes:
// MetaData has tokens to represent a reference to another assembly.
// But Loader/Fusion policy ultimately decides which specific assembly is actually loaded
// for that token.
// This does the lookup of actual assembly and reports back to the debugger.
CordbAssembly * CordbModule::ResolveAssemblyInternal(mdToken tkAssemblyRef)
{
INTERNAL_SYNC_API_ENTRY(GetProcess()); //
if (TypeFromToken(tkAssemblyRef) != mdtAssemblyRef || tkAssemblyRef == mdAssemblyRefNil)
{
// Not a valid token
ThrowHR(E_INVALIDARG);
}
CordbAssembly * pAssembly = NULL;
if (!m_vmDomainAssembly.IsNull())
{
// Get DAC to do the real work to resolve the assembly
VMPTR_DomainAssembly vmDomainAssembly = GetProcess()->GetDAC()->ResolveAssembly(m_vmDomainAssembly, tkAssemblyRef);
// now find the ICorDebugAssembly corresponding to it
if (!vmDomainAssembly.IsNull() && m_pAppDomain != NULL)
{
RSLockHolder lockHolder(GetProcess()->GetProcessLock());
// Don't throw here because if the lookup fails, we want to throw CORDBG_E_CANNOT_RESOLVE_ASSEMBLY.
pAssembly = m_pAppDomain->LookupOrCreateAssembly(vmDomainAssembly);
}
}
return pAssembly;
}
//
// CreateReaderForInMemorySymbols - create an ISymUnmanagedReader object for symbols
// which are loaded into memory in the CLR. See interface definition in cordebug.idl for
// details.
//
HRESULT CordbModule::CreateReaderForInMemorySymbols(REFIID riid, void** ppObj)
{
PUBLIC_API_ENTRY(this);
FAIL_IF_NEUTERED(this);
CordbProcess *pProcess = GetProcess();
ATT_REQUIRE_STOPPED_MAY_FAIL(pProcess);
HRESULT hr = S_OK;
EX_TRY
{
// Get the symbol memory in a stream to give to the reader.
ReleaseHolder<IStream> pStream;
IDacDbiInterface::SymbolFormat symFormat = GetInMemorySymbolStream(&pStream);
// First create the symbol binder corresponding to the format of the stream
ReleaseHolder<ISymUnmanagedBinder> pBinder;
if (symFormat == IDacDbiInterface::kSymbolFormatPDB)
{
#ifndef TARGET_UNIX
// PDB format - use diasymreader.dll with COM activation
InlineSString<_MAX_PATH> ssBuf;
IfFailThrow(GetClrModuleDirectory(ssBuf));
IfFailThrow(FakeCoCreateInstanceEx(CLSID_CorSymBinder_SxS,
ssBuf.GetUnicode(),
IID_ISymUnmanagedBinder,
(void**)&pBinder,
NULL));
#else
IfFailThrow(FakeCoCreateInstance(CLSID_CorSymBinder_SxS,
IID_ISymUnmanagedBinder,
(void**)&pBinder));
#endif
}
else
{
// No in-memory symbols, return the appropriate error
_ASSERTE(symFormat == IDacDbiInterface::kSymbolFormatNone);
if (m_fDynamic || m_fInMemory)
{
// This is indeed an in-memory or dynamic module, we just don't have any symbols for it.
// This means the application didn't supply any, or they are not yet available. Symbols
// first become available at LoadClass time for dynamic modules and UpdateModuleSymbols
// time for non-dynamic in-memory modules.
ThrowHR(CORDBG_E_SYMBOLS_NOT_AVAILABLE);
}
// This module is on disk - the debugger should use it's normal symbol-loading logic.
ThrowHR(CORDBG_E_MODULE_LOADED_FROM_DISK);
}
// In the attach or dump case, if we attach or take the dump after we have defined a dynamic module, we may
// have already set the symbol format to "PDB" by the time we call CreateReaderForInMemorySymbols during initialization
// for loaded modules. (In the launch case, we do this initialization when the module is actually loaded, and before we
// set the symbol format.) When we call CreateReaderForInMemorySymbols, we can't assume the initialization was already
// performed or specifically, that we already have m_pIMImport initialized. We can't call into diasymreader with a NULL
// pointer as the value for m_pIMImport, so we need to check that here.
if (m_pIMImport == NULL)
{
ThrowHR(CORDBG_E_SYMBOLS_NOT_AVAILABLE);
}
// Now create the symbol reader from the data
ReleaseHolder<ISymUnmanagedReader> pReader;
IfFailThrow(pBinder->GetReaderFromStream(m_pIMImport, pStream, &pReader));
// Attempt to return the interface requested
// Note that this does an AddRef for our return value ppObj, so we don't suppress the release
// of the pReader holder.
IfFailThrow(pReader->QueryInterface(riid, ppObj));
}
EX_CATCH_HRESULT(hr);
return hr;
}
/* ------------------------------------------------------------------------- *
* Class class
* ------------------------------------------------------------------------- */
//---------------------------------------------------------------------------------------
// Set the continue counter that marks when the module is in its Load event
//
// Notes:
// Jit flags can only be changed in the real module Load event. We may
// have multiple module load events on different threads coming at the
// same time. So each module load tracks its continue counter.
//
// This can be used by code:CordbModule::EnsureModuleIsInLoadCallback to
// properly return CORDBG_E_MUST_BE_IN_LOAD_MODULE
void CordbModule::SetLoadEventContinueMarker()
{
// Well behaved targets should only set this once.
GetProcess()->TargetConsistencyCheck(m_nLoadEventContinueCounter == 0);
m_nLoadEventContinueCounter = GetProcess()->m_continueCounter;
}
//---------------------------------------------------------------------------------------
// Return CORDBG_E_MUST_BE_IN_LOAD_MODULE if the module is not in the load module callback.
//
// Notes:
// The comparison is done via continue counters. The counter of the load
// event is cached via code:CordbModule::SetLoadEventContinueMarker.
//
// This state is currently stored on the RS. Alternatively, it could likely be retreived from the LS state as
// well. One disadvantage of the current model is that if we detach during the load-module callback and
// then reattach, the RS state is flushed and we lose the fact that we can toggle the jit flags.
HRESULT CordbModule::EnsureModuleIsInLoadCallback()
{
if (this->m_nLoadEventContinueCounter < GetProcess()->m_continueCounter)
{
return CORDBG_E_MUST_BE_IN_LOAD_MODULE;
}
else
{
return S_OK;
}
}
// Implementation of ICorDebugModule2::SetJITCompilerFlags
// See also code:CordbModule::EnableJITDebugging
HRESULT CordbModule::SetJITCompilerFlags(DWORD dwFlags)
{
PUBLIC_REENTRANT_API_ENTRY(this);
FAIL_IF_NEUTERED(this);
CordbProcess *pProcess = GetProcess();
ATT_REQUIRE_STOPPED_MAY_FAIL(pProcess);
HRESULT hr = S_OK;
EX_TRY
{
// can't have a subset of these, eg 0x101, so make sure we have an exact match
if ((dwFlags != CORDEBUG_JIT_DEFAULT) &&
(dwFlags != CORDEBUG_JIT_DISABLE_OPTIMIZATION) &&
(dwFlags != CORDEBUG_JIT_ENABLE_ENC))
{
hr = E_INVALIDARG;
}
else
{
BOOL fAllowJitOpts = ((dwFlags & CORDEBUG_JIT_DISABLE_OPTIMIZATION) != CORDEBUG_JIT_DISABLE_OPTIMIZATION);
BOOL fEnableEnC = ((dwFlags & CORDEBUG_JIT_ENABLE_ENC) == CORDEBUG_JIT_ENABLE_ENC);
// Can only change jit flags when module is first loaded and before there's any jitted code.
// This ensures all code in the module is jitted the same way.
hr = EnsureModuleIsInLoadCallback();
if (SUCCEEDED(hr))
{
// DD interface will check if it's a valid time to change the flags.
hr = pProcess->GetDAC()->SetCompilerFlags(GetRuntimeDomainAssembly(), fAllowJitOpts, fEnableEnC);
}
}
}
EX_CATCH_HRESULT(hr);
// emulate v2 hresults
if (GetProcess()->GetShim() != NULL)
{
// Emulate Whidbey error hresults
hr = GetProcess()->GetShim()->FilterSetJitFlagsHresult(hr);
}
return hr;
}
// Implementation of ICorDebugModule2::GetJitCompilerFlags
HRESULT CordbModule::GetJITCompilerFlags(DWORD *pdwFlags )
{
PUBLIC_REENTRANT_API_ENTRY(this);
FAIL_IF_NEUTERED(this);
VALIDATE_POINTER_TO_OBJECT(pdwFlags, DWORD*);
*pdwFlags = CORDEBUG_JIT_DEFAULT;;
CordbProcess *pProcess = GetProcess();
ATT_REQUIRE_STOPPED_MAY_FAIL(pProcess);
HRESULT hr = S_OK;
EX_TRY
{
BOOL fAllowJitOpts;
BOOL fEnableEnC;
pProcess->GetDAC()->GetCompilerFlags (
GetRuntimeDomainAssembly(),
&fAllowJitOpts,
&fEnableEnC);
if (fEnableEnC)
{
*pdwFlags = CORDEBUG_JIT_ENABLE_ENC;
}
else if (! fAllowJitOpts)
{
*pdwFlags = CORDEBUG_JIT_DISABLE_OPTIMIZATION;
}
}
EX_CATCH_HRESULT(hr);
return hr;
}
HRESULT CordbModule::IsMappedLayout(BOOL *isMapped)
{
PUBLIC_API_ENTRY(this);
VALIDATE_POINTER_TO_OBJECT(isMapped, BOOL*);
FAIL_IF_NEUTERED(this);
HRESULT hr = S_OK;
*isMapped = FALSE;
CordbProcess *pProcess = GetProcess();
ATT_REQUIRE_STOPPED_MAY_FAIL(pProcess);
EX_TRY
{
hr = pProcess->GetDAC()->IsModuleMapped(m_vmModule, isMapped);
}
EX_CATCH_HRESULT(hr);
return hr;
}
/* ------------------------------------------------------------------------- *
* CordbCode class
* ------------------------------------------------------------------------- */
//-----------------------------------------------------------------------------
// CordbCode constructor
// Arguments:
// Input:
// pFunction - CordbFunction instance for this function
// encVersion - Edit and Continue version number for this code chunk
// fIsIL - indicates whether the instance is a CordbILCode (as
// opposed to a CordbNativeCode)
// id - This is the hashtable key for CordbCode objects
// - for native code, the code start address
// - for IL code, 0
// - for ReJit IL code, the remote pointer to the ReJitSharedInfo
// Output:
// fields of the CordbCode instance have been initialized
//-----------------------------------------------------------------------------
CordbCode::CordbCode(CordbFunction * pFunction, UINT_PTR id, SIZE_T encVersion, BOOL fIsIL)
: CordbBase(pFunction->GetProcess(), id, enumCordbCode),
m_fIsIL(fIsIL),
m_nVersion(encVersion),
m_rgbCode(NULL),
m_continueCounterLastSync(0),
m_pFunction(pFunction)
{
_ASSERTE(pFunction != NULL);
_ASSERTE(m_nVersion >= CorDB_DEFAULT_ENC_FUNCTION_VERSION);
} // CordbCode::CordbCode
//-----------------------------------------------------------------------------
// Destructor for CordbCode object
//-----------------------------------------------------------------------------
CordbCode::~CordbCode()
{
_ASSERTE(IsNeutered());
}
//-----------------------------------------------------------------------------
// Neutered by CordbFunction
// See CordbBase::Neuter for neuter semantics.
//-----------------------------------------------------------------------------
void CordbCode::Neuter()
{
m_pFunction = NULL;
delete [] m_rgbCode;
m_rgbCode = NULL;
CordbBase::Neuter();
}
//-----------------------------------------------------------------------------
// Public method for IUnknown::QueryInterface.
// Has standard QI semantics.
//-----------------------------------------------------------------------------
HRESULT CordbCode::QueryInterface(REFIID id, void ** pInterface)
{
if (id == IID_ICorDebugCode)
{
*pInterface = static_cast<ICorDebugCode*>(this);
}
else if (id == IID_IUnknown)
{
*pInterface = static_cast<IUnknown *>(static_cast<ICorDebugCode *>(this));
}
else
{
*pInterface = NULL;
return E_NOINTERFACE;
}
ExternalAddRef();
return S_OK;
}
//-----------------------------------------------------------------------------
// NOT IMPLEMENTED. Remap sequence points are entirely private to the LS,
// and ICorDebug will dispatch a RemapOpportunity callback to notify the
// debugger instead of letting the debugger query for the points.
//
// Returns: E_NOTIMPL
//-----------------------------------------------------------------------------
HRESULT CordbCode::GetEnCRemapSequencePoints(ULONG32 cMap, ULONG32 * pcMap, ULONG32 offsets[])
{
FAIL_IF_NEUTERED(this);
VALIDATE_POINTER_TO_OBJECT_OR_NULL(pcMap, ULONG32*);
VALIDATE_POINTER_TO_OBJECT_ARRAY_OR_NULL(offsets, ULONG32*, cMap, true, true);
//
// Old EnC interface - deprecated
//
return E_NOTIMPL;
} // CordbCode::GetEnCRemapSequencePoints
//-----------------------------------------------------------------------------
// CordbCode::IsIL
// Public method to determine if this Code object represents IL or native code.
//
// Parameters:
// pbIL - OUT: on return, set to True if IL code, else False.
//
// Returns:
// S_OK on success.
//-----------------------------------------------------------------------------
HRESULT CordbCode::IsIL(BOOL *pbIL)
{
PUBLIC_API_ENTRY(this);
FAIL_IF_NEUTERED(this);
VALIDATE_POINTER_TO_OBJECT(pbIL, BOOL *);
*pbIL = IsIL();
return S_OK;
}
//-----------------------------------------------------------------------------
// CordbCode::GetFunction
// Public method to get the Function object associated with this Code object.
// Function:Code = 1:1 for IL, and 1:n for Native. So there is always a single
// unique Function object to return.
//
// Parameters:
// ppFunction - OUT: returns the Function object for this Code.
//
// Returns:
// S_OK - on success.
//-----------------------------------------------------------------------------
HRESULT CordbCode::GetFunction(ICorDebugFunction **ppFunction)
{
PUBLIC_API_ENTRY(this);
FAIL_IF_NEUTERED(this);
VALIDATE_POINTER_TO_OBJECT(ppFunction, ICorDebugFunction **);
*ppFunction = static_cast<ICorDebugFunction*> (m_pFunction);
m_pFunction->ExternalAddRef();
return S_OK;
}
//-----------------------------------------------------------------------------
// CordbCode::GetSize
// Get the size of the code in bytes. If this is IL code, it will be bytes of IL.
// If this is native code, it will be bytes of native code.
//
// Parameters:
// pcBytes - OUT: on return, set to the size of the code in bytes.
//
// Returns:
// S_OK on success.
//-----------------------------------------------------------------------------
HRESULT CordbCode::GetSize(ULONG32 *pcBytes)
{
PUBLIC_REENTRANT_API_ENTRY(this);
FAIL_IF_NEUTERED(this);
VALIDATE_POINTER_TO_OBJECT(pcBytes, ULONG32 *);
*pcBytes = GetSize();
return S_OK;
}
//-----------------------------------------------------------------------------
// CordbCode::CreateBreakpoint
// public method to create a breakpoint in the code.
//
// Parameters:
// offset - offset in bytes to set the breakpoint at. If this is a Native
// code object (IsIl == false), then units are bytes of native code. If
// this is an IL code object, then units are bytes of IL code.
// ppBreakpoint- out-parameter to hold newly created breakpoint object.
//
// Return value:
// S_OK iff *ppBreakpoint is set. Else some error.
//-----------------------------------------------------------------------------
HRESULT CordbCode::CreateBreakpoint(ULONG32 offset,
ICorDebugFunctionBreakpoint **ppBreakpoint)
{
PUBLIC_REENTRANT_API_ENTRY(this);
FAIL_IF_NEUTERED(this);
VALIDATE_POINTER_TO_OBJECT(ppBreakpoint, ICorDebugFunctionBreakpoint **);
HRESULT hr;
ULONG32 size = GetSize();
BOOL offsetIsIl = IsIL();
LOG((LF_CORDB, LL_INFO10000, "CCode::CreateBreakpoint, offset=%d, size=%d, IsIl=%d, this=0x%p\n",
offset, size, offsetIsIl, this));
// Make sure the offset is within range of the method.
// If we're native code, then both offset & total code size are bytes of native code,
// else they're both bytes of IL.
if (offset >= size)
{
return CORDBG_E_UNABLE_TO_SET_BREAKPOINT;
}
CordbFunctionBreakpoint *bp = new (nothrow) CordbFunctionBreakpoint(this, offset, offsetIsIl);
if (bp == NULL)
return E_OUTOFMEMORY;
hr = bp->Activate(TRUE);
if (SUCCEEDED(hr))
{
*ppBreakpoint = static_cast<ICorDebugFunctionBreakpoint*> (bp);
bp->ExternalAddRef();
return S_OK;
}
else
{
delete bp;
return hr;
}
}
//-----------------------------------------------------------------------------
// CordbCode::GetCode
// Public method to get the code-bytes for this Code object. For an IL-code
// object, this will be bytes of IL. For a native-code object, this will be
// bytes of native opcodes.
// The units of the offsets are the same as the units on the CordbCode object.
// (eg, IL offsets for an IL code object, and native offsets for a native code object)
// This will glue together hot + cold regions into a single blob.
//
// Units are also logical (aka linear) values, which
// Parameters:
// startOffset - linear offset in Code to start copying from.
// endOffset - linear offset in Code to end copying from. Total bytes copied would be (endOffset - startOffset)
// cBufferAlloc - number of bytes in the buffer supplied by the buffer[] parameter.
// buffer - caller allocated storage to copy bytes into.
// pcBufferSize - required out-parameter, holds number of bytes copied into buffer.
//
// Returns:
// S_OK if copy successful. Else error.
//-----------------------------------------------------------------------------
HRESULT CordbCode::GetCode(ULONG32 startOffset,
ULONG32 endOffset,
ULONG32 cBufferAlloc,
BYTE buffer[],
ULONG32 *pcBufferSize)
{
PUBLIC_REENTRANT_API_ENTRY(this);
FAIL_IF_NEUTERED(this);
VALIDATE_POINTER_TO_OBJECT_ARRAY(buffer, BYTE, cBufferAlloc, true, true);
VALIDATE_POINTER_TO_OBJECT(pcBufferSize, ULONG32 *);
LOG((LF_CORDB,LL_EVERYTHING, "CC::GC: for token:0x%x\n", m_pFunction->GetMetadataToken()));
ATT_REQUIRE_STOPPED_MAY_FAIL(GetProcess());
HRESULT hr = S_OK;
*pcBufferSize = 0;
// Check ranges.
ULONG32 totalSize = GetSize();
if (cBufferAlloc < endOffset - startOffset)
endOffset = startOffset + cBufferAlloc;
if (endOffset > totalSize)
endOffset = totalSize;
if (startOffset > totalSize)
startOffset = totalSize;
// Check the continue counter since WriteMemory bumps it up.
if ((m_rgbCode == NULL) ||
(m_continueCounterLastSync < GetProcess()->m_continueCounter))
{
ReadCodeBytes();
m_continueCounterLastSync = GetProcess()->m_continueCounter;
}
// if we just got the code, we'll have to copy it over
if (*pcBufferSize == 0 && m_rgbCode != NULL)
{
memcpy(buffer,
m_rgbCode+startOffset,
endOffset - startOffset);
*pcBufferSize = endOffset - startOffset;
}
return hr;
} // CordbCode::GetCode
#include "dbgipcevents.h"
//-----------------------------------------------------------------------------
// CordbCode::GetVersionNumber
// Public method to get the EnC version number of the code.
//
// Parameters:
// nVersion - OUT: on return, set to the version number.
//
// Returns:
// S_OK on success.
//-----------------------------------------------------------------------------
HRESULT CordbCode::GetVersionNumber( ULONG32 *nVersion)
{
PUBLIC_API_ENTRY(this);
FAIL_IF_NEUTERED(this);
VALIDATE_POINTER_TO_OBJECT(nVersion, ULONG32 *);
LOG((LF_CORDB,LL_INFO10000,"R:CC:GVN:Returning 0x%x "
"as version\n",m_nVersion));
*nVersion = (ULONG32)m_nVersion;
#ifndef EnC_SUPPORTED
_ASSERTE(*nVersion == 1);
#endif // EnC_SUPPORTED
return S_OK;
}
// get the CordbFunction instance for this code object
CordbFunction * CordbCode::GetFunction()
{
_ASSERTE(m_pFunction != NULL);
return m_pFunction;
}
/* ------------------------------------------------------------------------- *
* CordbILCode class
* ------------------------------------------------------------------------- */
//-----------------------------------------------------------------------------
// CordbILCode ctor to make IL code.
// Arguments:
// Input:
// pFunction - pointer to the CordbFunction instance for this function
// codeRegionInfo - starting address and size in bytes of IL code blob
// nVersion - EnC version number for this IL code blob
// localVarSigToken - LocalVarSig for this IL blob
// id - the key when using ILCode in a CordbHashTable
// Output:
// fields of this instance of CordbILCode have been initialized
//-----------------------------------------------------------------------------
CordbILCode::CordbILCode(CordbFunction * pFunction,
TargetBuffer codeRegionInfo,
SIZE_T nVersion,
mdSignature localVarSigToken,
UINT_PTR id)
: CordbCode(pFunction, id, nVersion, TRUE),
#ifdef EnC_SUPPORTED
m_fIsOld(FALSE),
#endif
m_codeRegionInfo(codeRegionInfo),
m_localVarSigToken(localVarSigToken)
{
} // CordbILCode::CordbILCode
#ifdef EnC_SUPPORTED
//-----------------------------------------------------------------------------
// CordbILCode::MakeOld
// Internal method to perform any cleanup necessary when a code blob is no longer
// the most current.
//-----------------------------------------------------------------------------
void CordbILCode::MakeOld()
{
m_fIsOld = TRUE;
}
#endif
//-----------------------------------------------------------------------------
// CordbILCode::GetAddress
// Public method to get the Entry address for the code. This is the address
// where the method first starts executing.
//
// Parameters:
// pStart - out-parameter to hold start address.
//
// Returns:
// S_OK if *pStart is properly updated.
//-----------------------------------------------------------------------------
HRESULT CordbILCode::GetAddress(CORDB_ADDRESS * pStart)
{
PUBLIC_REENTRANT_API_ENTRY(this);
FAIL_IF_NEUTERED(this);
VALIDATE_POINTER_TO_OBJECT(pStart, CORDB_ADDRESS *);
_ASSERTE(this != NULL);
_ASSERTE(this->GetFunction() != NULL);
_ASSERTE(this->GetFunction()->GetModule() != NULL);
_ASSERTE(this->GetFunction()->GetModule()->GetProcess() == GetProcess());
*pStart = (m_codeRegionInfo.pAddress);
return S_OK;
} // CordbILCode::GetAddress
//-----------------------------------------------------------------------------
// CordbILCode::ReadCodeBytes
// Reads the actual bytes of IL code into the data member m_rgbCode
// Arguments:
// none (uses data members)
// Return value:
// standard HRESULT values
// also allocates and initializes m_rgbCode
// Notes: assumes that the caller has checked to ensure that m_rgbCode doesn't
// hold valid data
//-----------------------------------------------------------------------------
HRESULT CordbILCode::ReadCodeBytes()
{
HRESULT hr = S_OK;
EX_TRY
{
// We have an address & size, so we'll just call ReadMemory.
// This will conveniently strip out any patches too.
CORDB_ADDRESS pStart = m_codeRegionInfo.pAddress;
ULONG32 cbSize = (ULONG32) m_codeRegionInfo.cbSize;
delete [] m_rgbCode;
m_rgbCode = new BYTE[cbSize]; // throws
SIZE_T cbRead;
hr = GetProcess()->ReadMemory(pStart, cbSize, m_rgbCode, &cbRead);
IfFailThrow(hr);
SIMPLIFYING_ASSUMPTION(cbRead == cbSize);
}
EX_CATCH_HRESULT(hr);
return hr;
} // CordbILCode::ReadCodeBytes
//-----------------------------------------------------------------------------
// CordbILCode::GetILToNativeMapping
// Public method (implements ICorDebugCode) to get the IL-->{ Native Start, Native End} mapping.
// Since 1 CordbILCode can map to multiple CordbNativeCode due to generics, we cannot reliably return the
// mapping information in all cases. So we always fail with CORDBG_E_NON_NATIVE_FRAME. The caller should
// call code:CordbNativeCode::GetILToNativeMapping instead.
//
// Parameters:
// cMap - size of incoming map[] array (in elements).
// pcMap - OUT: full size of IL-->Native map (in elements).
// map - caller allocated array to be filled in.
//
// Returns:
// CORDBG_E_NON_NATIVE_FRAME in all cases
//-----------------------------------------------------------------------------
HRESULT CordbILCode::GetILToNativeMapping(ULONG32 cMap,
ULONG32 * pcMap,
COR_DEBUG_IL_TO_NATIVE_MAP map[])
{
PUBLIC_API_ENTRY(this);
FAIL_IF_NEUTERED(this);
VALIDATE_POINTER_TO_OBJECT_OR_NULL(pcMap, ULONG32 *);
VALIDATE_POINTER_TO_OBJECT_ARRAY_OR_NULL(map, COR_DEBUG_IL_TO_NATIVE_MAP *, cMap, true, true);
ATT_REQUIRE_STOPPED_MAY_FAIL(GetProcess());
return CORDBG_E_NON_NATIVE_FRAME;
} // CordbILCode::GetILToNativeMapping
/*
* CordbILCode::GetLocalVarSig
*
* Get the method's local variable metadata signature. This may be cached, but for dynamic modules we'll always
* read it from the metadata. This function also returns the count of local variables in the method.
*
* Parameters:
* pLocalSigParser - OUT: the local variable signature for the method.
* pLocalCount - OUT: the number of locals the method has.
*
* Returns:
* HRESULT for success or failure.
*
*/
HRESULT CordbILCode::GetLocalVarSig(SigParser *pLocalSigParser,
ULONG *pLocalVarCount)
{
INTERNAL_SYNC_API_ENTRY(GetProcess());
CONTRACTL // @dbgtodo exceptions - convert to throws...
{
NOTHROW;
}
CONTRACTL_END;
FAIL_IF_NEUTERED(this);
HRESULT hr = S_OK;
// A function will not have a local var sig if it has no locals!
if (m_localVarSigToken != mdSignatureNil)
{
PCCOR_SIGNATURE localSignature = NULL;
ULONG size = 0;
EX_TRY // // @dbgtodo exceptions - push this up
{
GetFunction()->GetModule()->UpdateMetaDataCacheIfNeeded(m_localVarSigToken);
hr = GetFunction()->GetModule()->GetMetaDataImporter()->GetSigFromToken(m_localVarSigToken,
&localSignature,
&size);
}
EX_CATCH_HRESULT(hr);
if (FAILED(hr))
{
LOG((LF_CORDB, LL_WARNING, "CICF::GLVS caught hr=0x%x\n", hr));
}
IfFailRet(hr);
LOG((LF_CORDB, LL_INFO100000, "CIC::GLVS creating sig parser sig=0x%x size=0x%x\n", localSignature, size));
SigParser sigParser = SigParser(localSignature, size);
uint32_t data;
IfFailRet(sigParser.GetCallingConvInfo(&data));
_ASSERTE(data == IMAGE_CEE_CS_CALLCONV_LOCAL_SIG);
// Snagg the count of locals in the sig.
uint32_t localCount;
IfFailRet(sigParser.GetData(&localCount));
LOG((LF_CORDB, LL_INFO100000, "CIC::GLVS localCount=0x%x\n", localCount));
if (pLocalSigParser != NULL)
{
*pLocalSigParser = sigParser;
}
if (pLocalVarCount != NULL)
{
*pLocalVarCount = localCount;
}
}
else
{
//
// Signature is Nil, so fill in everything with NULLs and zeros
//
if (pLocalSigParser != NULL)
{
*pLocalSigParser = SigParser(NULL, 0);
}
if (pLocalVarCount != NULL)
{
*pLocalVarCount = 0;
}
}
LOG((LF_CORDB, LL_INFO100000, "CIC::GLVS returning hr=0x%x\n", hr));
return hr;
}
//-----------------------------------------------------------------------------
// CordbILCode::GetLocalVariableType
// Internal method. Return the type of an IL local, specified by 0-based index.
//
// Parameters:
// dwIndex - 0-based index for IL local number.
// inst - instantiation information if this is a generic function. Eg,
// if function is List<T>, inst describes T.
// res - out parameter, yields to CordbType of the local.
//
// Return:
// S_OK on success.
//
HRESULT CordbILCode::GetLocalVariableType(DWORD dwIndex,
const Instantiation * pInst,
CordbType ** ppResultType)
{
ATT_ALLOW_LIVE_DO_STOPGO(GetProcess());
LOG((LF_CORDB, LL_INFO10000, "CIC::GLVT dwIndex=0x%x pInst=0x%p\n", dwIndex, pInst));
HRESULT hr = S_OK;
EX_TRY
{
// Get the local variable signature.
SigParser sigParser;
ULONG cLocals;
IfFailThrow(GetLocalVarSig(&sigParser, &cLocals));
// Check the index.
if (dwIndex >= cLocals)
{
ThrowHR(E_INVALIDARG);
}
// Run the signature and find the required argument.
for (unsigned int i = 0; i < dwIndex; i++)
{
LOG((LF_CORDB, LL_INFO10000, "CIC::GLVT scanning index 0x%x\n", dwIndex));
IfFailThrow(sigParser.SkipExactlyOne());
}
hr = CordbType::SigToType(GetFunction()->GetModule(), &sigParser, pInst, ppResultType);
LOG((LF_CORDB, LL_INFO10000, "CIC::GLVT CT::SigToType returned hr=0x%x\n", hr));
IfFailThrow(hr);
} EX_CATCH_HRESULT(hr);
return hr;
}
mdSignature CordbILCode::GetLocalVarSigToken()
{
return m_localVarSigToken;
}
HRESULT CordbILCode::CreateNativeBreakpoint(ICorDebugFunctionBreakpoint **ppBreakpoint)
{
FAIL_IF_NEUTERED(this);
VALIDATE_POINTER_TO_OBJECT(ppBreakpoint, ICorDebugFunctionBreakpoint **);
HRESULT hr;
ULONG32 size = GetSize();
LOG((LF_CORDB, LL_INFO10000, "CordbILCode::CreateNativeBreakpoint, size=%d, this=0x%p\n",
size, this));
ULONG32 offset = 0;
CordbFunctionBreakpoint *bp = new (nothrow) CordbFunctionBreakpoint(this, offset, FALSE);
if (bp == NULL)
{
return E_OUTOFMEMORY;
}
hr = bp->Activate(TRUE);
if (SUCCEEDED(hr))
{
*ppBreakpoint = static_cast<ICorDebugFunctionBreakpoint*> (bp);
bp->ExternalAddRef();
return S_OK;
}
else
{
delete bp;
return hr;
}
}
CordbReJitILCode::CordbReJitILCode(CordbFunction *pFunction, SIZE_T encVersion, VMPTR_ILCodeVersionNode vmILCodeVersionNode) :
CordbILCode(pFunction, TargetBuffer(), encVersion, mdSignatureNil, VmPtrToCookie(vmILCodeVersionNode)),
m_cClauses(0),
m_cbLocalIL(0),
m_cILMap(0)
{
_ASSERTE(!vmILCodeVersionNode.IsNull());
DacSharedReJitInfo data = { 0 };
IfFailThrow(GetProcess()->GetDAC()->GetILCodeVersionNodeData(vmILCodeVersionNode, &data));
IfFailThrow(Init(&data));
}
//-----------------------------------------------------------------------------
// CordbReJitILCode::Init
//
// Returns:
// S_OK if all fields are inited. Else error.
HRESULT CordbReJitILCode::Init(DacSharedReJitInfo* pSharedReJitInfo)
{
HRESULT hr = S_OK;
// Instrumented IL map
if (pSharedReJitInfo->m_cInstrumentedMapEntries)
{
if (pSharedReJitInfo->m_cInstrumentedMapEntries > 100000)
return CORDBG_E_TARGET_INCONSISTENT;
m_cILMap = pSharedReJitInfo->m_cInstrumentedMapEntries;
m_pILMap = new (nothrow)COR_IL_MAP[m_cILMap];
TargetBuffer mapBuffer(pSharedReJitInfo->m_rgInstrumentedMapEntries, m_cILMap*sizeof(COR_IL_MAP));
IfFailRet(GetProcess()->SafeReadBuffer(mapBuffer, (BYTE*)m_pILMap.GetValue(), FALSE /* bThrowOnError */));
}
// Read the method's IL header
CORDB_ADDRESS pIlHeader = pSharedReJitInfo->m_pbIL;
IMAGE_COR_ILMETHOD_FAT header = { 0 };
bool headerMustBeTiny = false;
ULONG32 headerSize = 0;
hr = GetProcess()->SafeReadStruct(pIlHeader, &header);
if (hr != S_OK)
{
// Its possible the header is tiny and there isn't enough memory to read a complete
// FAT header
headerMustBeTiny = true;
IfFailRet(GetProcess()->SafeReadStruct(pIlHeader, (IMAGE_COR_ILMETHOD_TINY *)&header));
}
// Read the ILCodeSize and LocalVarSigTok from header
ULONG32 ilCodeSize = 0;
IMAGE_COR_ILMETHOD_TINY *pMethodTinyHeader = (IMAGE_COR_ILMETHOD_TINY *)&header;
bool isTinyHeader = ((pMethodTinyHeader->Flags_CodeSize & (CorILMethod_FormatMask >> 1)) == CorILMethod_TinyFormat);
if (isTinyHeader)
{
ilCodeSize = (((unsigned)pMethodTinyHeader->Flags_CodeSize) >> (CorILMethod_FormatShift - 1));
headerSize = sizeof(IMAGE_COR_ILMETHOD_TINY);
m_localVarSigToken = mdSignatureNil;
}
else if (headerMustBeTiny)
{
// header was not CorILMethod_TinyFormat
// this is not possible, must be an error when reading from data target
return CORDBG_E_READVIRTUAL_FAILURE;
}
else
{
ilCodeSize = header.CodeSize;
headerSize = header.Size * 4;
m_localVarSigToken = header.LocalVarSigTok;
}
if (ilCodeSize == 0 || ilCodeSize > 100000)
{
return CORDBG_E_TARGET_INCONSISTENT;
}
m_codeRegionInfo.Init(pIlHeader + headerSize, ilCodeSize);
m_pLocalIL = new (nothrow) BYTE[ilCodeSize];
if (m_pLocalIL == NULL)
return E_OUTOFMEMORY;
m_cbLocalIL = ilCodeSize;
IfFailRet(GetProcess()->SafeReadBuffer(m_codeRegionInfo, m_pLocalIL, FALSE /*throwOnError*/));
// Check if this il code has exception clauses
if ((pMethodTinyHeader->Flags_CodeSize & CorILMethod_MoreSects) == 0)
{
return S_OK; // no EH, done initing
}
// EH section starts at the 4 byte aligned address after the code
CORDB_ADDRESS ehClauseHeader = ((pIlHeader + headerSize + ilCodeSize - 1) & ~3) + 4;
BYTE kind = 0;
IfFailRet(GetProcess()->SafeReadStruct(ehClauseHeader, &kind));
if ((kind & CorILMethod_Sect_KindMask) != CorILMethod_Sect_EHTable)
{
return S_OK;
}
if (kind & CorILMethod_Sect_FatFormat)
{
// Read the section header to see how many clauses there are
IMAGE_COR_ILMETHOD_SECT_FAT sectionHeader = { 0 };
IfFailRet(GetProcess()->SafeReadStruct(ehClauseHeader, §ionHeader));
m_cClauses = (sectionHeader.DataSize - 4) / sizeof(IMAGE_COR_ILMETHOD_SECT_EH_CLAUSE_FAT);
if (m_cClauses > 10000) // sanity check the data before allocating
{
return CORDBG_E_TARGET_INCONSISTENT;
}
// Read in the clauses
TargetBuffer buffer(ehClauseHeader + sizeof(IMAGE_COR_ILMETHOD_SECT_FAT), m_cClauses*sizeof(IMAGE_COR_ILMETHOD_SECT_EH_CLAUSE_FAT));
NewArrayHolder<IMAGE_COR_ILMETHOD_SECT_EH_CLAUSE_FAT> pClauses = new (nothrow)IMAGE_COR_ILMETHOD_SECT_EH_CLAUSE_FAT[m_cClauses];
if (pClauses == NULL)
return E_OUTOFMEMORY;
IfFailRet(GetProcess()->SafeReadBuffer(buffer, (BYTE*)pClauses.GetValue(), FALSE /*throwOnError*/));
// convert clauses
m_pClauses = new (nothrow)CorDebugEHClause[m_cClauses];
if (m_pClauses == NULL)
return E_OUTOFMEMORY;
for (ULONG32 i = 0; i < m_cClauses; i++)
{
BOOL isFilter = ((pClauses[i].Flags & COR_ILEXCEPTION_CLAUSE_FILTER) != 0);
m_pClauses[i].Flags = pClauses[i].Flags;
m_pClauses[i].TryOffset = pClauses[i].TryOffset;
m_pClauses[i].TryLength = pClauses[i].TryLength;
m_pClauses[i].HandlerOffset = pClauses[i].HandlerOffset;
m_pClauses[i].HandlerLength = pClauses[i].HandlerLength;
// these two fields are a union in the image, but are seperate in the struct ICorDebug returns
m_pClauses[i].ClassToken = isFilter ? 0 : pClauses[i].ClassToken;
m_pClauses[i].FilterOffset = isFilter ? pClauses[i].FilterOffset : 0;
}
}
else
{
// Read in the section header to see how many small clauses there are
IMAGE_COR_ILMETHOD_SECT_SMALL sectionHeader = { 0 };
IfFailRet(GetProcess()->SafeReadStruct(ehClauseHeader, §ionHeader));
ULONG32 m_cClauses = (sectionHeader.DataSize - 4) / sizeof(IMAGE_COR_ILMETHOD_SECT_SMALL);
if (m_cClauses > 10000) // sanity check the data before allocating
{
return CORDBG_E_TARGET_INCONSISTENT;
}
// Read in the clauses
TargetBuffer buffer(ehClauseHeader + sizeof(IMAGE_COR_ILMETHOD_SECT_SMALL), m_cClauses*sizeof(IMAGE_COR_ILMETHOD_SECT_EH_CLAUSE_SMALL));
NewArrayHolder<IMAGE_COR_ILMETHOD_SECT_EH_CLAUSE_SMALL> pClauses = new (nothrow)IMAGE_COR_ILMETHOD_SECT_EH_CLAUSE_SMALL[m_cClauses];
if (pClauses == NULL)
return E_OUTOFMEMORY;
IfFailRet(GetProcess()->SafeReadBuffer(buffer, (BYTE*)pClauses.GetValue(), FALSE /*throwOnError*/));
// convert clauses
m_pClauses = new (nothrow)CorDebugEHClause[m_cClauses];
if (m_pClauses == NULL)
return E_OUTOFMEMORY;
for (ULONG32 i = 0; i < m_cClauses; i++)
{
BOOL isFilter = ((pClauses[i].Flags & COR_ILEXCEPTION_CLAUSE_FILTER) != 0);
m_pClauses[i].Flags = pClauses[i].Flags;
m_pClauses[i].TryOffset = pClauses[i].TryOffset;
m_pClauses[i].TryLength = pClauses[i].TryLength;
m_pClauses[i].HandlerOffset = pClauses[i].HandlerOffset;
m_pClauses[i].HandlerLength = pClauses[i].HandlerLength;
// these two fields are a union in the image, but are seperate in the struct ICorDebug returns
m_pClauses[i].ClassToken = isFilter ? 0 : pClauses[i].ClassToken;
m_pClauses[i].FilterOffset = isFilter ? pClauses[i].FilterOffset : 0;
}
}
return S_OK;
}
#ifndef MIN
#define MIN(a,b) ((a) < (b) ? (a) : (b))
#endif
//-----------------------------------------------------------------------------
// CordbReJitILCode::GetEHClauses
// Public method to get the EH clauses for IL code
//
// Parameters:
// cClauses - size of incoming clauses array (in elements).
// pcClauses - OUT param: cClauses>0 -> the number of elements written to in the clauses array.
// cClauses=0 -> the number of EH clauses this IL code has
// clauses - caller allocated storage to hold the EH clauses.
//
// Returns:
// S_OK if successfully copied elements to clauses array.
HRESULT CordbReJitILCode::GetEHClauses(ULONG32 cClauses, ULONG32 * pcClauses, CorDebugEHClause clauses[])
{
PUBLIC_API_ENTRY(this);
FAIL_IF_NEUTERED(this);
VALIDATE_POINTER_TO_OBJECT_OR_NULL(pcClauses, ULONG32 *);
VALIDATE_POINTER_TO_OBJECT_ARRAY_OR_NULL(clauses, CorDebugEHClause *, cClauses, true, true);
ATT_REQUIRE_STOPPED_MAY_FAIL(GetProcess());
if (cClauses != 0 && clauses == NULL)
{
return E_INVALIDARG;
}
if (pcClauses != NULL)
{
if (cClauses == 0)
{
*pcClauses = m_cClauses;
}
else
{
*pcClauses = MIN(cClauses, m_cClauses);
}
}
if (clauses != NULL)
{
memcpy_s(clauses, sizeof(CorDebugEHClause)*cClauses, m_pClauses, sizeof(CorDebugEHClause)*MIN(cClauses, m_cClauses));
}
return S_OK;
}
ULONG CordbReJitILCode::AddRef()
{
return CordbCode::AddRef();
}
ULONG CordbReJitILCode::Release()
{
return CordbCode::Release();
}
HRESULT CordbReJitILCode::QueryInterface(REFIID riid, void** ppInterface)
{
if (riid == IID_ICorDebugILCode)
{
*ppInterface = static_cast<ICorDebugILCode*>(this);
}
else if (riid == IID_ICorDebugILCode2)
{
*ppInterface = static_cast<ICorDebugILCode2*>(this);
}
else
{
return CordbILCode::QueryInterface(riid, ppInterface);
}
AddRef();
return S_OK;
}
HRESULT CordbReJitILCode::GetLocalVarSigToken(mdSignature *pmdSig)
{
PUBLIC_API_ENTRY(this);
FAIL_IF_NEUTERED(this);
VALIDATE_POINTER_TO_OBJECT(pmdSig, mdSignature *);
ATT_REQUIRE_STOPPED_MAY_FAIL(GetProcess());
*pmdSig = m_localVarSigToken;
return S_OK;
}
HRESULT CordbReJitILCode::GetInstrumentedILMap(ULONG32 cMap, ULONG32 *pcMap, COR_IL_MAP map[])
{
PUBLIC_API_ENTRY(this);
FAIL_IF_NEUTERED(this);
VALIDATE_POINTER_TO_OBJECT_OR_NULL(pcClauses, ULONG32 *);
VALIDATE_POINTER_TO_OBJECT_ARRAY_OR_NULL(map, COR_IL_MAP *, cMap, true, true);
ATT_REQUIRE_STOPPED_MAY_FAIL(GetProcess());
if (cMap != 0 && map == NULL)
{
return E_INVALIDARG;
}
if (pcMap != NULL)
{
if (cMap == 0)
{
*pcMap = m_cILMap;
}
else
{
*pcMap = MIN(cMap, m_cILMap);
}
}
if (map != NULL)
{
memcpy_s(map, sizeof(COR_IL_MAP)*cMap, m_pILMap, sizeof(COR_IL_MAP)*MIN(cMap, m_cILMap));
}
return S_OK;
}
// FindNativeInfoInILVariableArray
// Linear search through an array of NativeVarInfos, to find the variable of index dwIndex, valid
// at the given ip. Returns CORDBG_E_IL_VAR_NOT_AVAILABLE if the variable isn't valid at the given ip.
// Arguments:
// input: dwIndex - variable number
// ip - IP
// nativeInfoList - list of instances of NativeVarInfo
// output: ppNativeInfo - the element of nativeInfoList that corresponds to the IP and variable number
// if we find such an element or NULL otherwise
// Return value: HRESULT: returns S_OK or CORDBG_E_IL_VAR_NOT_AVAILABLE if the variable isn't found
//
HRESULT FindNativeInfoInILVariableArray(DWORD dwIndex,
SIZE_T ip,
const DacDbiArrayList<ICorDebugInfo::NativeVarInfo> * nativeInfoList,
const ICorDebugInfo::NativeVarInfo ** ppNativeInfo)
{
_ASSERTE(ppNativeInfo != NULL);
*ppNativeInfo = NULL;
// A few words about this search: it must be linear, and the
// comparison of startOffset and endOffset to ip must be
// <=/>. startOffset points to the first instruction that will
// make the variable's home valid. endOffset points to the first
// instruction at which the variable's home invalid.
int lastGoodOne = -1;
for (unsigned int i = 0; i < (unsigned)nativeInfoList->Count(); i++)
{
if ((*nativeInfoList)[i].varNumber == dwIndex)
{
if ( (lastGoodOne == -1) ||
((*nativeInfoList)[lastGoodOne].startOffset < (*nativeInfoList)[i].startOffset) )
{
lastGoodOne = i;
}
if (((*nativeInfoList)[i].startOffset <= ip) &&
((*nativeInfoList)[i].endOffset > ip))
{
*ppNativeInfo = &((*nativeInfoList)[i]);
return S_OK;
}
}
}
// workaround:
//
// We didn't find the variable. Was the endOffset of the last range for this variable
// equal to the current IP? If so, go ahead and "lie" and report that as the
// variable's home for now.
//
// Rationale:
//
// * See TODO comment in code:Compiler::siUpdate (jit\scopeinfo.cpp). In optimized
// code, the JIT can report var lifetimes as being one instruction too short.
// This workaround makes up for that. Example code:
//
// static void foo(int x)
// {
// int b = x; // Value of "x" would not be reported in optimized code without the workaround
// bar(ref b);
// }
//
// * Since this is the first instruction after the last range a variable was alive,
// we're essentially assuming that since that instruction hasn't been executed
// yet, and since there isn't a new home for the variable, that the last home is
// still good. This actually turns out to be true 99.9% of the time, so we'll go
// with it for now.
// * We've been lying like this since 1999, so surely it's safe.
if ((lastGoodOne > -1) && ((*nativeInfoList)[lastGoodOne].endOffset == ip))
{
*ppNativeInfo = &((*nativeInfoList)[lastGoodOne]);
return S_OK;
}
return CORDBG_E_IL_VAR_NOT_AVAILABLE;
} // FindNativeInfoInILVariableArray
// * ------------------------------------------------------------------------- *
// * Variable Enum class
// * ------------------------------------------------------------------------- *
//-----------------------------------------------------------------------------
// CordbVariableHome constructor
// Arguments:
// Input:
// pCode - CordbNativeCode instance containing this variable home
// pNativeVarInfo - native location, lifetime, and index information for
// this variable
// isLocal - indicates whether the instance is a local variable,
// as opposed to an argument
// index - the argument or slot index
// Output:
// fields of the CordbVariableHome instance have been initialized
//-----------------------------------------------------------------------------
CordbVariableHome::CordbVariableHome(CordbNativeCode *pCode,
const ICorDebugInfo::NativeVarInfo nativeVarInfo,
BOOL isLocal,
ULONG index) :
CordbBase(pCode->GetModule()->GetProcess(), 0)
{
_ASSERTE(pCode != NULL);
m_pCode.Assign(pCode);
m_nativeVarInfo = nativeVarInfo;
m_isLocal = isLocal;
m_index = index;
}
CordbVariableHome::~CordbVariableHome()
{
_ASSERTE(this->IsNeutered());
}
void CordbVariableHome::Neuter()
{
m_pCode.Clear();
CordbBase::Neuter();
}
//-----------------------------------------------------------------------------
// Public method for IUnknown::QueryInterface.
// Has standard QI semantics.
//-----------------------------------------------------------------------------
HRESULT CordbVariableHome::QueryInterface(REFIID id, void **pInterface)
{
if (id == IID_ICorDebugVariableHome)
{
*pInterface = static_cast<ICorDebugVariableHome *>(this);
}
else if (id == IID_IUnknown)
{
*pInterface = static_cast<IUnknown *>(static_cast<ICorDebugVariableHome *>(this));
}
else
{
*pInterface = NULL;
return E_NOINTERFACE;
}
ExternalAddRef();
return S_OK;
}
//-----------------------------------------------------------------------------
// CordbVariableHome::GetCode
// Public method to get the Code object containing this variable home.
//
// Parameters:
// ppCode - OUT: returns the Code object for this variable home.
//
// Returns:
// S_OK - on success.
//-----------------------------------------------------------------------------
HRESULT CordbVariableHome::GetCode(ICorDebugCode **ppCode)
{
PUBLIC_REENTRANT_API_ENTRY(this);
FAIL_IF_NEUTERED(this);
VALIDATE_POINTER_TO_OBJECT(ppCode, ICorDebugCode **);
ATT_REQUIRE_STOPPED_MAY_FAIL(m_pCode->GetProcess());
HRESULT hr = m_pCode->QueryInterface(IID_ICorDebugCode, (LPVOID*)ppCode);
return hr;
}
//-----------------------------------------------------------------------------
// CordbVariableHome::GetSlotIndex
// Public method to get the slot index for this variable home.
//
// Parameters:
// pSlotIndex - OUT: returns the managed slot-index of this variable home.
//
// Returns:
// S_OK - on success
// E_FAIL - if the variable is not a local variable, but an argument
//-----------------------------------------------------------------------------
HRESULT CordbVariableHome::GetSlotIndex(ULONG32 *pSlotIndex)
{
PUBLIC_REENTRANT_API_ENTRY(this);
FAIL_IF_NEUTERED(this);
VALIDATE_POINTER_TO_OBJECT(pSlotIndex, ULONG32 *);
ATT_REQUIRE_STOPPED_MAY_FAIL(m_pCode->GetProcess());
if (!m_isLocal)
{
return E_FAIL;
}
*pSlotIndex = m_index;
return S_OK;
}
//-----------------------------------------------------------------------------
// CordbVariableHome::GetArgumentIndex
// Public method to get the slot index for this variable home.
//
// Parameters:
// pSlotIndex - OUT: returns the managed argument-index of this variable home.
//
// Returns:
// S_OK - on success
// E_FAIL - if the variable is not an argument, but a local variable
//-----------------------------------------------------------------------------
HRESULT CordbVariableHome::GetArgumentIndex(ULONG32 *pArgumentIndex)
{
PUBLIC_REENTRANT_API_ENTRY(this);
FAIL_IF_NEUTERED(this);
VALIDATE_POINTER_TO_OBJECT(pArgumentIndex, ULONG32 *);
ATT_REQUIRE_STOPPED_MAY_FAIL(m_pCode->GetProcess());
if (m_isLocal)
{
return E_FAIL;
}
*pArgumentIndex = m_index;
return S_OK;
}
//-----------------------------------------------------------------------------
// CordbVariableHome::GetLiveRange
// Public method to get the native range over which this variable is live.
//
// Parameters:
// pStartOffset - OUT: returns the logical offset at which the variable is
// first live
// pEndOffset - OUT: returns the logical offset immediately after that at
// which the variable is last live
//
// Returns:
// S_OK - on success
//-----------------------------------------------------------------------------
HRESULT CordbVariableHome::GetLiveRange(ULONG32 *pStartOffset,
ULONG32 *pEndOffset)
{
PUBLIC_REENTRANT_API_ENTRY(this);
FAIL_IF_NEUTERED(this);
VALIDATE_POINTER_TO_OBJECT(pStartOffset, ULONG32 *);
VALIDATE_POINTER_TO_OBJECT(pEndOffset, ULONG32 *);
ATT_REQUIRE_STOPPED_MAY_FAIL(m_pCode->GetProcess());
*pStartOffset = m_nativeVarInfo.startOffset;
*pEndOffset = m_nativeVarInfo.endOffset;
return S_OK;
}
//-----------------------------------------------------------------------------
// CordbVariableHome::GetLocationType
// Public method to get the type of native location for this variable home.
//
// Parameters:
// pLocationType - OUT: the type of native location
//
// Returns:
// S_OK - on success
//-----------------------------------------------------------------------------
HRESULT CordbVariableHome::GetLocationType(VariableLocationType *pLocationType)
{
PUBLIC_REENTRANT_API_ENTRY(this);
FAIL_IF_NEUTERED(this);
VALIDATE_POINTER_TO_OBJECT(pLocationType, VariableLocationType *);
ATT_REQUIRE_STOPPED_MAY_FAIL(m_pCode->GetProcess());
switch (m_nativeVarInfo.loc.vlType)
{
case ICorDebugInfo::VLT_REG:
*pLocationType = VLT_REGISTER;
break;
case ICorDebugInfo::VLT_STK:
*pLocationType = VLT_REGISTER_RELATIVE;
break;
default:
*pLocationType = VLT_INVALID;
}
return S_OK;
}
//-----------------------------------------------------------------------------
// CordbVariableHome::GetRegister
// Public method to get the register or base register for this variable hom.
//
// Parameters:
// pRegister - OUT: for VLT_REGISTER location types, gives the register.
// for VLT_REGISTER_RELATIVE location types, gives the base
// register.
//
// Returns:
// S_OK - on success
// E_FAIL - for VLT_INVALID location types
//-----------------------------------------------------------------------------
HRESULT CordbVariableHome::GetRegister(CorDebugRegister *pRegister)
{
PUBLIC_REENTRANT_API_ENTRY(this);
FAIL_IF_NEUTERED(this);
VALIDATE_POINTER_TO_OBJECT(pRegister, CorDebugRegister *);
ATT_REQUIRE_STOPPED_MAY_FAIL(m_pCode->GetProcess());
switch (m_nativeVarInfo.loc.vlType)
{
case ICorDebugInfo::VLT_REG:
*pRegister = ConvertRegNumToCorDebugRegister(m_nativeVarInfo.loc.vlReg.vlrReg);
break;
case ICorDebugInfo::VLT_STK:
*pRegister = ConvertRegNumToCorDebugRegister(m_nativeVarInfo.loc.vlStk.vlsBaseReg);
break;
default:
return E_FAIL;
}
return S_OK;
}
//-----------------------------------------------------------------------------
// CordbVariableHome::GetOffset
// Public method to get the offset from the base register for this variable home.
//
// Parameters:
// pOffset - OUT: gives the offset from the base register
//
// Returns:
// S_OK - on success
// E_FAIL - for location types other than VLT_REGISTER_RELATIVE
//-----------------------------------------------------------------------------
HRESULT CordbVariableHome::GetOffset(LONG *pOffset)
{
PUBLIC_REENTRANT_API_ENTRY(this);
FAIL_IF_NEUTERED(this);
VALIDATE_POINTER_TO_OBJECT(pOffset, LONG *);
ATT_REQUIRE_STOPPED_MAY_FAIL(m_pCode->GetProcess());
switch (m_nativeVarInfo.loc.vlType)
{
case ICorDebugInfo::VLT_STK:
*pOffset = m_nativeVarInfo.loc.vlStk.vlsOffset;
break;
default:
return E_FAIL;
}
return S_OK;
}
// * ------------------------------------------------------------------------- *
// * Native Code class
// * ------------------------------------------------------------------------- */
//-----------------------------------------------------------------------------
// CordbNativeCode ctor to make Native code.
// Arguments:
// Input:
// pFunction - the function for which this is the native code object
// pJitData - the information about this code object retrieved from the DAC
// fIsInstantiatedGeneric - indicates whether this code object is an instantiated
// generic
// Output:
// fields of this instance of CordbNativeCode have been initialized
//-----------------------------------------------------------------------------
CordbNativeCode::CordbNativeCode(CordbFunction * pFunction,
const NativeCodeFunctionData * pJitData,
BOOL fIsInstantiatedGeneric)
: CordbCode(pFunction, (UINT_PTR)pJitData->m_rgCodeRegions[kHot].pAddress, pJitData->encVersion, FALSE),
m_vmNativeCodeMethodDescToken(pJitData->vmNativeCodeMethodDescToken),
m_fCodeAvailable(TRUE),
m_fIsInstantiatedGeneric(fIsInstantiatedGeneric != FALSE)
{
_ASSERTE(GetVersion() >= CorDB_DEFAULT_ENC_FUNCTION_VERSION);
for (CodeBlobRegion region = kHot; region < MAX_REGIONS; ++region)
{
m_rgCodeRegions[region] = pJitData->m_rgCodeRegions[region];
}
} //CordbNativeCode::CordbNativeCode
//-----------------------------------------------------------------------------
// Public method for IUnknown::QueryInterface.
// Has standard QI semantics.
//-----------------------------------------------------------------------------
HRESULT CordbNativeCode::QueryInterface(REFIID id, void ** pInterface)
{
if (id == IID_ICorDebugCode)
{
*pInterface = static_cast<ICorDebugCode *>(this);
}
else if (id == IID_ICorDebugCode2)
{
*pInterface = static_cast<ICorDebugCode2 *>(this);
}
else if (id == IID_ICorDebugCode3)
{
*pInterface = static_cast<ICorDebugCode3 *>(this);
}
else if (id == IID_ICorDebugCode4)
{
*pInterface = static_cast<ICorDebugCode4 *>(this);
}
else if (id == IID_IUnknown)
{
*pInterface = static_cast<IUnknown *>(static_cast<ICorDebugCode *>(this));
}
else
{
*pInterface = NULL;
return E_NOINTERFACE;
}
ExternalAddRef();
return S_OK;
}
//-----------------------------------------------------------------------------
// CordbNativeCode::GetAddress
// Public method to get the Entry address for the code. This is the address
// where the method first starts executing.
//
// Parameters:
// pStart - out-parameter to hold start address.
//
// Returns:
// S_OK if *pStart is properly updated.
//-----------------------------------------------------------------------------
HRESULT CordbNativeCode::GetAddress(CORDB_ADDRESS * pStart)
{
PUBLIC_REENTRANT_API_ENTRY(this);
FAIL_IF_NEUTERED(this);
VALIDATE_POINTER_TO_OBJECT(pStart, CORDB_ADDRESS *);
_ASSERTE(this != NULL);
_ASSERTE(this->GetFunction() != NULL);
_ASSERTE(this->GetFunction()->GetModule() != NULL);
_ASSERTE(this->GetFunction()->GetModule()->GetProcess() == GetProcess());
// Since we don't do code-pitching, the address points directly to the code.
*pStart = (m_rgCodeRegions[kHot].pAddress);
if (*pStart == NULL)
{
return CORDBG_E_CODE_NOT_AVAILABLE;
}
return S_OK;
} // CordbNativeCode::GetAddress
//-----------------------------------------------------------------------------
// CordbNativeCode::ReadCodeBytes
// Reads the actual bytes of native code from both the hot and cold regions
// into the data member m_rgbCode
// Arguments:
// none (uses data members)
// Return value:
// standard HRESULT values
// also allocates and initializes m_rgbCode
// Notes: assumes that the caller has checked to ensure that m_rgbCode doesn't
// hold valid data
//-----------------------------------------------------------------------------
HRESULT CordbNativeCode::ReadCodeBytes()
{
HRESULT hr = S_OK;
EX_TRY
{
// We have an address & size, so we'll just call ReadMemory.
// This will conveniently strip out any patches too.
CORDB_ADDRESS pHotStart = m_rgCodeRegions[kHot].pAddress;
CORDB_ADDRESS pColdStart = m_rgCodeRegions[kCold].pAddress;
ULONG32 cbHotSize = (ULONG32) m_rgCodeRegions[kHot].cbSize;
ULONG32 cbColdSize = GetColdSize();
delete [] m_rgbCode;
m_rgbCode = new BYTE[cbHotSize + cbColdSize];
SIZE_T cbRead;
hr = GetProcess()->ReadMemory(pHotStart, cbHotSize, m_rgbCode, &cbRead);
IfFailThrow(hr);
SIMPLIFYING_ASSUMPTION(cbRead == cbHotSize);
if (HasColdRegion())
{
hr = GetProcess()->ReadMemory(pColdStart, cbColdSize, (BYTE *) m_rgbCode + cbHotSize, &cbRead);
IfFailThrow(hr);
SIMPLIFYING_ASSUMPTION(cbRead == cbColdSize);
}
}
EX_CATCH_HRESULT(hr);
return hr;
} // CordbNativeCode::ReadCodeBytes
//-----------------------------------------------------------------------------
// CordbNativeCode::GetColdSize
// Get the size of the cold regions in bytes.
//
// Parameters:
// none--uses data member m_rgCodeRegions to compute total size.
//
// Returns:
// the size of the code in bytes.
//-----------------------------------------------------------------------------
ULONG32 CordbNativeCode::GetColdSize()
{
ULONG32 pcBytes = 0;
for (CodeBlobRegion index = kCold; index < MAX_REGIONS; ++index)
{
pcBytes += m_rgCodeRegions[index].cbSize;
}
return pcBytes;
} // CordbNativeCode::GetColdSize
//-----------------------------------------------------------------------------
// CordbNativeCode::GetSize
// Get the size of the code in bytes.
//
// Parameters:
// none--uses data member m_rgCodeRegions to compute total size.
//
// Returns:
// the size of the code in bytes.
//-----------------------------------------------------------------------------
ULONG32 CordbNativeCode::GetSize()
{
ULONG32 pcBytes = 0;
for (CodeBlobRegion index = kHot; index < MAX_REGIONS; ++index)
{
pcBytes += m_rgCodeRegions[index].cbSize;
}
return pcBytes;
} // CordbNativeCode::GetSize
//-----------------------------------------------------------------------------
// CordbNativeCode::GetILToNativeMapping
// Public method (implements ICorDebugCode) to get the IL-->{ Native Start, Native End} mapping.
// This can only be retrieved for native code.
// This will copy as much of the map as can fit in the incoming buffer.
//
// Parameters:
// cMap - size of incoming map[] array (in elements).
// pcMap - OUT: full size of IL-->Native map (in elements).
// map - caller allocated array to be filled in.
//
// Returns:
// S_OK on successful copying.
//-----------------------------------------------------------------------------
HRESULT CordbNativeCode::GetILToNativeMapping(ULONG32 cMap,
ULONG32 * pcMap,
COR_DEBUG_IL_TO_NATIVE_MAP map[])
{
PUBLIC_REENTRANT_API_ENTRY(this);
FAIL_IF_NEUTERED(this);
VALIDATE_POINTER_TO_OBJECT_OR_NULL(pcMap, ULONG32 *);
VALIDATE_POINTER_TO_OBJECT_ARRAY_OR_NULL(map, COR_DEBUG_IL_TO_NATIVE_MAP *,cMap,true,true);
ATT_REQUIRE_STOPPED_MAY_FAIL(GetProcess());
HRESULT hr = S_OK;
EX_TRY
{
LoadNativeInfo();
SequencePoints * pSeqPts = GetSequencePoints();
DebuggerILToNativeMap * rgMapInt = pSeqPts->GetMapAddr();
ULONG32 cMapIntCount = pSeqPts->GetEntryCount();
// If they gave us space to copy into...
if (map != NULL)
{
// Only copy as much as either they gave us or we have to copy.
ULONG32 cMapToCopy = min(cMap, cMapIntCount);
// Remember that we need to translate between our internal DebuggerILToNativeMap and the external
// COR_DEBUG_IL_TO_NATIVE_MAP!
ULONG32 size = GetSize();
ExportILToNativeMap(cMapToCopy, map, rgMapInt, size);
}
// return the full count of map entries
if (pcMap)
{
*pcMap = cMapIntCount;
}
}
EX_CATCH_HRESULT(hr);
return hr;
} // CordbNativeCode::GetILToNativeMapping
//-----------------------------------------------------------------------------
// CordbNativeCode::GetCodeChunks
// Public method to get the code regions of code. If the code
// is broken into discontinuous regions (hot + cold), this lets a debugger
// find the number of regions, and (start,size) of each.
//
// Parameters:
// cbufSize - size of incoming chunks array (in elements).
// pcnumChunks - OUT param: the number of elements written to in the chunk array.//
// chunks - caller allocated storage to hold the code chunks.
//
// Returns:
// S_OK if successfully copied elements to Chunk array.
//-----------------------------------------------------------------------------
HRESULT CordbNativeCode::GetCodeChunks(
ULONG32 cbufSize,
ULONG32 * pcnumChunks,
CodeChunkInfo chunks[]
)
{
PUBLIC_API_ENTRY(this);
if (pcnumChunks == NULL)
{
return E_INVALIDARG;
}
if ((chunks == NULL) != (cbufSize == 0))
{
return E_INVALIDARG;
}
// Current V2.0 implementation has at most 2 possible chunks right now (1 hot, and 1 cold).
ULONG32 cActualChunks = HasColdRegion() ? 2 : 1;
// If no buf size, then we're querying the total number of chunks.
if (cbufSize == 0)
{
*pcnumChunks = cActualChunks;
return S_OK;
}
// Else give them as many as they asked for.
for (CodeBlobRegion index = kHot; (index < MAX_REGIONS) && ((int)cbufSize > index); ++index)
{
// Fill in the region information
chunks[index].startAddr = m_rgCodeRegions[index].pAddress;
chunks[index].length = (ULONG32) (m_rgCodeRegions[index].cbSize);
*pcnumChunks = cbufSize;
}
return S_OK;
} // CordbNativeCode::GetCodeChunks
//-----------------------------------------------------------------------------
// CordbNativeCode::GetCompilerFlags
// Public entry point to get code flags for this Code object.
// Originally, ICDCode had this method implemented independently from the
// ICDModule method GetJitCompilerFlags. This was because it was considered that
// the flags would be per function, rather than per module.
// In addition, GetCompilerFlags did two different things depending on whether
// the code had a native image. It turned out that was the wrong thing to do
// .
//
// Parameters:
// pdwFlags - OUT: code gen flags (see CorDebugJITCompilerFlags)
//
// Return value:
// S_OK if pdwFlags is set properly.
//-----------------------------------------------------------------------------
HRESULT CordbNativeCode::GetCompilerFlags(DWORD * pdwFlags)
{
PUBLIC_API_ENTRY(this);
FAIL_IF_NEUTERED(this);
VALIDATE_POINTER_TO_OBJECT(pdwFlags, DWORD *);
*pdwFlags = 0;
ATT_REQUIRE_STOPPED_MAY_FAIL(GetProcess());
return GetFunction()->GetModule()->GetJITCompilerFlags(pdwFlags);
} // CordbNativeCode::GetCompilerFlags
//-----------------------------------------------------------------------------
// Given an IL local variable number and a native IP offset, return the
// location of the variable in jitted code.
//-----------------------------------------------------------------------------
HRESULT CordbNativeCode::ILVariableToNative(DWORD dwIndex,
SIZE_T ip,
const ICorDebugInfo::NativeVarInfo ** ppNativeInfo)
{
_ASSERTE(m_nativeVarData.IsInitialized());
return FindNativeInfoInILVariableArray(dwIndex,
ip,
m_nativeVarData.GetOffsetInfoList(),
ppNativeInfo);
} // CordbNativeCode::ILVariableToNative
HRESULT CordbNativeCode::GetReturnValueLiveOffset(ULONG32 ILoffset, ULONG32 bufferSize, ULONG32 *pFetched, ULONG32 *pOffsets)
{
HRESULT hr = S_OK;
PUBLIC_API_ENTRY(this);
FAIL_IF_NEUTERED(this);
VALIDATE_POINTER_TO_OBJECT(pFetched, ULONG32 *);
ATT_REQUIRE_STOPPED_MAY_FAIL(GetProcess());
EX_TRY
{
hr = GetReturnValueLiveOffsetImpl(NULL, ILoffset, bufferSize, pFetched, pOffsets);
}
EX_CATCH_HRESULT(hr);
return hr;
}
//-----------------------------------------------------------------------------
// CordbNativeCode::EnumerateVariableHomes
// Public method to get an enumeration of native variable homes. This may
// include multiple ICorDebugVariableHomes for the same slot or argument index
// if they have different homes at different points in the function.
//
// Parameters:
// ppEnum - OUT: returns the enum of variable homes.
//
// Returns:
// HRESULT for success or failure.
//-----------------------------------------------------------------------------
HRESULT CordbNativeCode::EnumerateVariableHomes(ICorDebugVariableHomeEnum **ppEnum)
{
PUBLIC_REENTRANT_API_ENTRY(this);
FAIL_IF_NEUTERED(this);
VALIDATE_POINTER_TO_OBJECT(ppEnum, ICorDebugVariableHomeEnum **);
ATT_REQUIRE_STOPPED_MAY_FAIL(GetProcess());
HRESULT hr = S_OK;
// Get the argument count
ULONG argCount = 0;
CordbFunction *func = GetFunction();
_ASSERTE(func != NULL);
IfFailRet(func->GetSig(NULL, &argCount, NULL));
#ifdef _DEBUG
// Get the number of locals
ULONG localCount = 0;
EX_TRY
{
GetFunction()->GetILCode()->GetLocalVarSig(NULL, &localCount);
}
EX_CATCH_HRESULT(hr);
IfFailRet(hr);
#endif
RSSmartPtr<CordbVariableHome> *rsHomes = NULL;
EX_TRY
{
CordbProcess *pProcess = GetProcess();
_ASSERTE(pProcess != NULL);
const DacDbiArrayList<ICorDebugInfo::NativeVarInfo> *pOffsetInfoList = m_nativeVarData.GetOffsetInfoList();
_ASSERTE(pOffsetInfoList != NULL);
DWORD countHomes = 0;
for (unsigned int i = 0; i < pOffsetInfoList->Count(); i++)
{
const ICorDebugInfo::NativeVarInfo *pNativeVarInfo = &((*pOffsetInfoList)[i]);
_ASSERTE(pNativeVarInfo != NULL);
// The variable information list can include variables
// with special varNumbers representing, for instance, the
// parameter types for generic methods. Here we are only
// interested in local variables and arguments.
if (pNativeVarInfo->varNumber < (DWORD)ICorDebugInfo::MAX_ILNUM)
{
countHomes++;
}
}
rsHomes = new RSSmartPtr<CordbVariableHome>[countHomes];
DWORD varHomeInd = 0;
for (unsigned int i = 0; i < pOffsetInfoList->Count(); i++)
{
const ICorDebugInfo::NativeVarInfo *pNativeVarInfo = &((*pOffsetInfoList)[i]);
// Again, only look for native var info representing local
// variables and arguments.
if (pNativeVarInfo->varNumber < (DWORD)ICorDebugInfo::MAX_ILNUM)
{
// determine whether this variable home represents and argument or local variable
BOOL isLocal = ((ULONG)pNativeVarInfo->varNumber >= argCount);
// determine the argument-index or slot-index of this variable home
ULONG argOrSlotIndex;
if (isLocal) {
argOrSlotIndex = pNativeVarInfo->varNumber - argCount;
_ASSERTE(argOrSlotIndex < localCount);
} else {
argOrSlotIndex = pNativeVarInfo->varNumber;
}
RSInitHolder<CordbVariableHome> pCVH(new CordbVariableHome(this,
(*pOffsetInfoList)[i],
isLocal,
argOrSlotIndex));
pProcess->GetContinueNeuterList()->Add(pProcess, pCVH);
_ASSERTE(varHomeInd < countHomes);
rsHomes[varHomeInd].Assign(pCVH);
pCVH.ClearAndMarkDontNeuter();
varHomeInd++;
}
}
RSInitHolder<CordbVariableHomeEnumerator> pCDVHE(
new CordbVariableHomeEnumerator(GetProcess(), &rsHomes, countHomes));
pProcess->GetContinueNeuterList()->Add(pProcess, pCDVHE);
pCDVHE.TransferOwnershipExternal(ppEnum);
}
EX_CATCH_HRESULT(hr);
return hr;
}
int CordbNativeCode::GetCallInstructionLength(BYTE *ip, ULONG32 count)
{
#if defined(TARGET_ARM)
if (Is32BitInstruction(*(WORD*)ip))
return 4;
else
return 2;
#elif defined(TARGET_ARM64)
return MAX_INSTRUCTION_LENGTH;
#elif defined(TARGET_X86)
if (count < 2)
return -1;
// Skip instruction prefixes
do
{
switch (*ip)
{
// Segment overrides
case 0x26: // ES
case 0x2E: // CS
case 0x36: // SS
case 0x3E: // DS
case 0x64: // FS
case 0x65: // GS
// Size overrides
case 0x66: // Operand-Size
case 0x67: // Address-Size
// Lock
case 0xf0:
// String REP prefixes
case 0xf1:
case 0xf2: // REPNE/REPNZ
case 0xf3:
ip++;
count--;
continue;
default:
break;
}
} while (0);
// Read the opcode
BYTE opcode = *ip++;
if (opcode == 0xcc)
{
// todo: Can we actually get this result? Doesn't ICorDebug hand out un-patched assembly?
_ASSERTE(!"Hit break opcode!");
return -1;
}
// Analyze what we can of the opcode
switch (opcode)
{
case 0xff:
{
// Count may have been decremented by prefixes.
if (count < 2)
return -1;
BYTE modrm = *ip++;
BYTE mod = (modrm & 0xC0) >> 6;
BYTE reg = (modrm & 0x38) >> 3;
BYTE rm = (modrm & 0x07);
int displace = -1;
if ((reg != 2) && (reg != 3) && (reg != 4) && (reg != 5))
{
//
// This is not a CALL or JMP instruction, return, unknown.
//
_ASSERTE(!"Unhandled opcode!");
return -1;
}
// Only try to decode registers if we actually have reg sets.
switch (mod)
{
case 0:
case 1:
case 2:
if (rm == 4)
{
if (count < 3)
return -1;
//
// Get values from the SIB byte
//
BYTE ss = (*ip & 0xC0) >> 6;
BYTE index = (*ip & 0x38) >> 3;
BYTE base = (*ip & 0x7);
//
// Finally add in the offset
//
if (mod == 0)
{
if (base == 5)
displace = 7;
else
displace = 3;
}
else if (mod == 1)
{
displace = 4;
}
else
{
displace = 7;
}
}
else
{
if (mod == 0)
{
if (rm == 5)
displace = 6;
else
displace = 2;
}
else if (mod == 1)
{
displace = 3;
}
else
{
displace = 6;
}
}
break;
case 3:
default:
displace = 2;
break;
}
return displace;
} // end of 0xFF case
case 0xe8:
return 5;
default:
break;
}
_ASSERTE(!"Unhandled opcode!");
return -1;
#elif defined(TARGET_AMD64)
BYTE rex = NULL;
BYTE prefix = *ip;
BOOL fContainsPrefix = FALSE;
// Should not happen.
if (prefix == 0xcc)
return -1;
// Skip instruction prefixes
//@TODO by euzem:
//This "loop" can't be really executed more than once so if CALL can really have more than one prefix we'll crash.
//Some of these prefixes are not allowed for CALL instruction and we should treat them as invalid code.
//It appears that this code was mostly copy/pasted from \NDP\clr\src\Debug\EE\amd64\amd64walker.cpp
//with very minimum fixes.
do
{
switch (prefix)
{
// Segment overrides
case 0x26: // ES
case 0x2E: // CS
case 0x36: // SS
case 0x3E: // DS
case 0x64: // FS
case 0x65: // GS
// Size overrides
case 0x66: // Operand-Size
case 0x67: // Address-Size
// Lock
case 0xf0:
// String REP prefixes
case 0xf2: // REPNE/REPNZ
case 0xf3:
ip++;
fContainsPrefix = TRUE;
continue;
// REX register extension prefixes
case 0x40:
case 0x41:
case 0x42:
case 0x43:
case 0x44:
case 0x45:
case 0x46:
case 0x47:
case 0x48:
case 0x49:
case 0x4a:
case 0x4b:
case 0x4c:
case 0x4d:
case 0x4e:
case 0x4f:
// make sure to set rex to prefix, not *ip because *ip still represents the
// codestream which has a 0xcc in it.
rex = prefix;
ip++;
fContainsPrefix = TRUE;
continue;
default:
break;
}
} while (0);
// Read the opcode
BYTE opcode = *ip++;
// Should not happen.
if (opcode == 0xcc)
return -1;
// Setup rex bits if needed
BYTE rex_b = 0;
BYTE rex_x = 0;
BYTE rex_r = 0;
if (rex != NULL)
{
rex_b = (rex & 0x1); // high bit to modrm r/m field or SIB base field or OPCODE reg field -- Hmm, when which?
rex_x = (rex & 0x2) >> 1; // high bit to sib index field
rex_r = (rex & 0x4) >> 2; // high bit to modrm reg field
}
// Analyze what we can of the opcode
switch (opcode)
{
case 0xff:
{
BYTE modrm = *ip++;
_ASSERT(modrm != NULL);
BYTE mod = (modrm & 0xC0) >> 6;
BYTE reg = (modrm & 0x38) >> 3;
BYTE rm = (modrm & 0x07);
reg |= (rex_r << 3);
rm |= (rex_b << 3);
if ((reg < 2) || (reg > 5 && reg < 8) || (reg > 15)) {
// not a valid register for a CALL or BRANCH
_ASSERTE(!"Invalid opcode!");
return -1;
}
SHORT displace = -1;
// See: Tables A-15,16,17 in AMD Dev Manual 3 for information
// about how the ModRM/SIB/REX bytes interact.
switch (mod)
{
case 0:
case 1:
case 2:
if ((rm & 0x07) == 4) // we have an SIB byte following
{
//
// Get values from the SIB byte
//
BYTE sib = *ip;
_ASSERT(sib != NULL);
BYTE base = (sib & 0x07);
base |= (rex_b << 3);
ip++;
//
// Finally add in the offset
//
if (mod == 0)
{
if ((base & 0x07) == 5)
displace = 7;
else
displace = 3;
}
else if (mod == 1)
{
displace = 4;
}
else // mod == 2
{
displace = 7;
}
}
else
{
//
// Get the value we need from the register.
//
// Check for RIP-relative addressing mode.
if ((mod == 0) && ((rm & 0x07) == 5))
{
displace = 6; // 1 byte opcode + 1 byte modrm + 4 byte displacement (signed)
}
else
{
if (mod == 0)
displace = 2;
else if (mod == 1)
displace = 3;
else // mod == 2
displace = 6;
}
}
break;
case 3:
default:
displace = 2;
}
// Displace should be set by one of the cases above
if (displace == -1)
{
_ASSERTE(!"GetCallInstructionLength() encountered unexpected call instruction");
return -1;
}
// Account for the 1 byte prefix (REX or otherwise)
if (fContainsPrefix)
displace++;
// reg == 4 or 5 means that it is not a CALL, but JMP instruction
// so we will fall back to ASSERT after break
if ((reg != 4) && (reg != 5))
return displace;
break;
}
case 0xe8:
{
//Near call with the target specified by a 32-bit relative displacement.
//[maybe 1 byte prefix] + [1 byte opcode E8h] + [4 bytes offset]
return 5 + (fContainsPrefix ? 1 : 0);
}
default:
break;
}
_ASSERTE(!"Invalid opcode!");
return -1;
#else
#error Platform not implemented
#endif
}
HRESULT CordbNativeCode::GetSigParserFromFunction(mdToken mdFunction, mdToken *pClass, SigParser &parser, SigParser &methodGenerics)
{
// mdFunction may be a MemberRef, a MethodDef, or a MethodSpec. We must handle all three cases.
HRESULT hr = S_OK;
IMetaDataImport* pImport = m_pFunction->GetModule()->GetMetaDataImporter();
RSExtSmartPtr<IMetaDataImport2> pImport2;
IfFailRet(pImport->QueryInterface(IID_IMetaDataImport2, (void**)&pImport2));
if (TypeFromToken(mdFunction) == mdtMemberRef)
{
PCCOR_SIGNATURE sig = 0;
ULONG sigSize = 0;
IfFailRet(pImport->GetMemberRefProps(mdFunction, pClass, NULL, 0, 0, &sig, &sigSize));
parser = SigParser(sig, sigSize);
}
else if (TypeFromToken(mdFunction) == mdtMethodDef)
{
PCCOR_SIGNATURE sig = 0;
ULONG sigSize = 0;
IfFailRet(pImport->GetMethodProps(mdFunction, pClass, NULL, 0, NULL, NULL, &sig, &sigSize, NULL, NULL));
parser = SigParser(sig, sigSize);
}
else if (TypeFromToken(mdFunction) == mdtMethodSpec)
{
// For a method spec, we use GetMethodSpecProps to get the generic singature and the parent token
// (which is a MethodDef token). We'll recurse to get the other properties from the parent token.
PCCOR_SIGNATURE sig = 0;
ULONG sigSize = 0;
mdToken parentToken = 0;
IfFailRet(pImport2->GetMethodSpecProps(mdFunction, &parentToken, &sig, &sigSize));
methodGenerics = SigParser(sig, sigSize);
if (pClass)
*pClass = parentToken;
return GetSigParserFromFunction(parentToken, pClass, parser, methodGenerics);
}
else
{
// According to ECMA III.3.19, this can never happen.
return E_UNEXPECTED;
}
return S_OK;
}
HRESULT CordbNativeCode::EnsureReturnValueAllowed(Instantiation *currentInstantiation, mdToken targetClass, SigParser &parser, SigParser &methodGenerics)
{
HRESULT hr = S_OK;
uint32_t genCount = 0;
IfFailRet(SkipToReturn(parser, &genCount));
return EnsureReturnValueAllowedWorker(currentInstantiation, targetClass, parser, methodGenerics, genCount);
}
HRESULT CordbNativeCode::EnsureReturnValueAllowedWorker(Instantiation *currentInstantiation, mdToken targetClass, SigParser &parser, SigParser &methodGenerics, ULONG genCount)
{
// There are a few considerations here:
// 1. Generic instantiations. This is a "Foo<T>", and we need to check if that "Foo"
// fits one of the categories we disallow (such as a struct).
// 2. Void return.
// 3. ValueType - Unsupported this release.
// 4. MVAR - Method generics. We need to get the actual generic type and recursively
// check if we allow that.
// 5. VAR - Class generics. We need to get the actual generic type and recurse.
SigParser original(parser);
HRESULT hr = S_OK;
CorElementType returnType;
IfFailRet(parser.GetElemType(&returnType));
if (returnType == ELEMENT_TYPE_GENERICINST)
{
IfFailRet(parser.GetElemType(&returnType));
if (returnType == ELEMENT_TYPE_CLASS)
return S_OK;
if (returnType != ELEMENT_TYPE_VALUETYPE)
return META_E_BAD_SIGNATURE;
if (currentInstantiation == NULL)
return S_OK; // We will check again when we have the instantiation.
NewArrayHolder<CordbType*> types;
Instantiation inst;
IfFailRet(CordbJITILFrame::BuildInstantiationForCallsite(GetModule(), types, inst, currentInstantiation, targetClass, SigParser(methodGenerics)));
CordbType *pType = 0;
IfFailRet(CordbType::SigToType(GetModule(), &original, &inst, &pType));
IfFailRet(pType->ReturnedByValue());
if (hr == S_OK) // not S_FALSE
return S_OK;
return CORDBG_E_UNSUPPORTED;
}
if (returnType == ELEMENT_TYPE_VALUETYPE)
{
Instantiation inst;
CordbType *pType = 0;
IfFailRet(CordbType::SigToType(GetModule(), &original, &inst, &pType));
IfFailRet(pType->ReturnedByValue());
if (hr == S_OK) // not S_FALSE
return S_OK;
return CORDBG_E_UNSUPPORTED;
}
if (returnType == ELEMENT_TYPE_TYPEDBYREF)
return CORDBG_E_UNSUPPORTED;
if (returnType == ELEMENT_TYPE_VOID)
return E_UNEXPECTED;
if (returnType == ELEMENT_TYPE_MVAR)
{
// Get which generic parameter is referenced.
uint32_t genParam = 0;
IfFailRet(parser.GetData(&genParam));
// Grab the calling convention of the method, ensure it's GENERICINST.
uint32_t callingConv = 0;
IfFailRet(methodGenerics.GetCallingConvInfo(&callingConv));
if (callingConv != IMAGE_CEE_CS_CALLCONV_GENERICINST)
return META_E_BAD_SIGNATURE;
// Ensure sensible bounds.
SigParser generics(methodGenerics); // Make a copy since operations are destructive.
uint32_t maxCount = 0;
IfFailRet(generics.GetData(&maxCount));
if (maxCount <= genParam || genParam > 1024)
return META_E_BAD_SIGNATURE;
// Walk to the parameter referenced.
while (genParam--)
IfFailRet(generics.SkipExactlyOne());
// Now recurse with "generics" at the location to continue parsing.
return EnsureReturnValueAllowedWorker(currentInstantiation, targetClass, generics, methodGenerics, genCount);
}
if (returnType == ELEMENT_TYPE_VAR)
{
// Get which type parameter is reference.
uint32_t typeParam = 0;
parser.GetData(&typeParam);
// Ensure something reasonable.
if (typeParam > 1024)
return META_E_BAD_SIGNATURE;
// Lookup the containing class's signature so we can get the referenced generic parameter.
IMetaDataImport *pImport = m_pFunction->GetModule()->GetMetaDataImporter();
PCCOR_SIGNATURE sig;
ULONG countSig;
IfFailRet(pImport->GetTypeSpecFromToken(targetClass, &sig, &countSig));
// Enusre the type's typespec is GENERICINST.
SigParser typeParser(sig, countSig);
CorElementType et;
IfFailRet(typeParser.GetElemType(&et));
if (et != ELEMENT_TYPE_GENERICINST)
return META_E_BAD_SIGNATURE;
// Move to the correct location.
IfFailRet(typeParser.GetElemType(&et));
if (et != ELEMENT_TYPE_VALUETYPE && et != ELEMENT_TYPE_CLASS)
return META_E_BAD_SIGNATURE;
IfFailRet(typeParser.GetToken(NULL));
uint32_t totalTypeCount = 0;
IfFailRet(typeParser.GetData(&totalTypeCount));
if (totalTypeCount < typeParam)
return META_E_BAD_SIGNATURE;
while (typeParam--)
IfFailRet(typeParser.SkipExactlyOne());
// This is a temporary workaround for an infinite recursion here. ALL of this code will
// go away when we allow struct return values, but in the mean time this avoids a corner
// case in the type system we haven't solved yet.
IfFailRet(typeParser.PeekElemType(&et));
if (et == ELEMENT_TYPE_VAR)
return E_FAIL;
// Now that typeParser is at the location of the correct generic parameter, recurse.
return EnsureReturnValueAllowedWorker(currentInstantiation, targetClass, typeParser, methodGenerics, genCount);
}
// Everything else supported
return S_OK;
}
HRESULT CordbNativeCode::SkipToReturn(SigParser &parser, uint32_t *genCount)
{
// Takes a method signature parser (at the beginning of a signature) and skips to the
// return value.
HRESULT hr = S_OK;
// Skip calling convention
uint32_t uCallConv;
IfFailRet(parser.GetCallingConvInfo(&uCallConv));
if ((uCallConv == IMAGE_CEE_CS_CALLCONV_FIELD) || (uCallConv == IMAGE_CEE_CS_CALLCONV_LOCAL_SIG))
return META_E_BAD_SIGNATURE;
// Skip type parameter count if function is generic
if (uCallConv & IMAGE_CEE_CS_CALLCONV_GENERIC)
IfFailRet(parser.GetData(genCount));
// Skip argument count
IfFailRet(parser.GetData(NULL));
return S_OK;
}
HRESULT CordbNativeCode::GetCallSignature(ULONG32 ILoffset, mdToken *pClass, mdToken *pFunction, SigParser &parser, SigParser &generics)
{
// check if specified IL offset is at a call instruction
CordbILCode *pCode = this->m_pFunction->GetILCode();
BYTE buffer[3];
ULONG32 fetched = 0;
HRESULT hr = pCode->GetCode(ILoffset, ILoffset+ARRAY_SIZE(buffer), ARRAY_SIZE(buffer), buffer, &fetched);
if (FAILED(hr))
return hr;
else if (fetched != ARRAY_SIZE(buffer))
return CORDBG_E_INVALID_OPCODE;
// tail. - fe 14 (ECMA III.2.4)
BYTE instruction = buffer[0];
if (buffer[0] == 0xfe && buffer[1] == 0x14)
{
// tail call case. We don't allow managed return values for tailcalls.
return CORDBG_E_INVALID_OPCODE;
}
// call - 28 (ECMA III.3.19)
// callvirt - 6f (ECMA III.4.2)
if (instruction != 0x28 && instruction != 0x6f)
return CORDBG_E_INVALID_OPCODE;
// Now grab the MD token of the call
mdToken mdFunction = 0;
const ULONG32 offset = ILoffset + 1;
hr = pCode->GetCode(offset, offset+sizeof(mdToken), sizeof(mdToken), (BYTE*)&mdFunction, &fetched);
if (FAILED(hr) || fetched != sizeof(mdToken))
return CORDBG_E_INVALID_OPCODE;
if (pFunction)
*pFunction = mdFunction;
// Convert to a signature parser
return GetSigParserFromFunction(mdFunction, pClass, parser, generics);
}
HRESULT CordbNativeCode::GetReturnValueLiveOffsetImpl(Instantiation *currentInstantiation, ULONG32 ILoffset, ULONG32 bufferSize, ULONG32 *pFetched, ULONG32 *pOffsets)
{
if (pFetched == NULL)
return E_INVALIDARG;
HRESULT hr = S_OK;
ULONG32 found = 0;
// verify that the call target actually returns something we allow
SigParser signature, generics;
mdToken mdClass = 0;
IfFailRet(GetCallSignature(ILoffset, &mdClass, NULL, signature, generics));
IfFailRet(EnsureReturnValueAllowed(currentInstantiation, mdClass, signature, generics));
// now find the native offset
SequencePoints *pSP = GetSequencePoints();
DebuggerILToNativeMap *pMap = pSP->GetCallsiteMapAddr();
for (ULONG32 i = 0; i < pSP->GetCallsiteEntryCount() && pMap; ++i, pMap++)
{
if (pMap->ilOffset == ILoffset && (pMap->source & ICorDebugInfo::CALL_INSTRUCTION) == ICorDebugInfo::CALL_INSTRUCTION)
{
// if we have a buffer, fill it in.
if (pOffsets && found < bufferSize)
{
// Fetch the actual assembly instructions
BYTE nativeBuffer[8];
ULONG32 fetched = 0;
IfFailRet(GetCode(pMap->nativeStartOffset, pMap->nativeStartOffset+ARRAY_SIZE(nativeBuffer), ARRAY_SIZE(nativeBuffer), nativeBuffer, &fetched));
int skipBytes = 0;
#if defined(PSEUDORANDOM_NOP_INSERTION)
// Skip nop sleds the JIT adds. These instructions as a security measure,
// and incorrectly reports to us the wrong offset of the call instruction.
const BYTE nop_opcode = 0x90;
while (fetched && nativeBuffer[0] == nop_opcode)
{
skipBytes++;
for (int j = 1; j < ARRAY_SIZE(nativeBuffer) && nativeBuffer[j] == nop_opcode; ++j)
skipBytes++;
// We must have at least one skip byte since the outer while ensures it. Thus we always need to reread
// the buffer at the end of this loop.
IfFailRet(GetCode(pMap->nativeStartOffset+skipBytes, pMap->nativeStartOffset+skipBytes+ARRAY_SIZE(nativeBuffer), ARRAY_SIZE(nativeBuffer), nativeBuffer, &fetched));
}
#endif
// Get the length of the call instruction.
int offset = GetCallInstructionLength(nativeBuffer, fetched);
if (offset == -1)
return E_UNEXPECTED; // Could not decode instruction, this should never happen.
pOffsets[found] = pMap->nativeStartOffset + offset + skipBytes;
}
found++;
}
}
if (pOffsets)
*pFetched = found < bufferSize ? found : bufferSize;
else
*pFetched = found;
if (found == 0)
return E_FAIL;
if (pOffsets && found > bufferSize)
return S_FALSE;
return S_OK;
}
//-----------------------------------------------------------------------------
// Creates a CordbNativeCode (if it's not already created) and adds it to the
// hash table of CordbNativeCode instances belonging to this module.
// Used by CordbFunction::InitNativeCodeInfo.
//
// Arguments:
// Input:
// methodToken - the methodDef token of the function this native code belongs to
// methodDesc - the methodDesc for the jitted method
// startAddress - the hot code startAddress for this method
// Return value:
// found or created CordbNativeCode pointer
// Assumptions: methodToken is in the metadata for this module
// methodDesc and startAddress should be consistent for
// a jitted instance of methodToken's method
//-----------------------------------------------------------------------------
CordbNativeCode * CordbModule::LookupOrCreateNativeCode(mdMethodDef methodToken,
VMPTR_MethodDesc methodDesc,
CORDB_ADDRESS startAddress)
{
INTERNAL_SYNC_API_ENTRY(GetProcess());
_ASSERTE(startAddress != NULL);
_ASSERTE(methodDesc != VMPTR_MethodDesc::NullPtr());
CordbNativeCode * pNativeCode = NULL;
NativeCodeFunctionData codeInfo;
RSLockHolder lockHolder(GetProcess()->GetProcessLock());
// see if we already have this--if not, we'll make an instance, otherwise we'll just return the one we have.
pNativeCode = m_nativeCodeTable.GetBase((UINT_PTR) startAddress);
if (pNativeCode == NULL)
{
GetProcess()->GetDAC()->GetNativeCodeInfoForAddr(methodDesc, startAddress, &codeInfo);
// We didn't have an instance, so we'll build one and add it to the hash table
LOG((LF_CORDB,
LL_INFO10000,
"R:CT::RSCreating code w/ ver:0x%x, md:0x%x, nativeStart=0x%08x, nativeSize=0x%08x\n",
codeInfo.encVersion,
VmPtrToCookie(codeInfo.vmNativeCodeMethodDescToken),
codeInfo.m_rgCodeRegions[kHot].pAddress,
codeInfo.m_rgCodeRegions[kHot].cbSize));
// Lookup the function object that this code should be bound to
CordbFunction* pFunction = CordbModule::LookupOrCreateFunction(methodToken, codeInfo.encVersion);
_ASSERTE(pFunction != NULL);
// There are bugs with the on-demand class load performed by CordbFunction in some cases. The old stack
// tracing code avoided them by eagerly loading the parent class so I am following suit
pFunction->InitParentClassOfFunction();
// First, create a new CordbNativeCode instance--we'll need this to make the CordbJITInfo instance
pNativeCode = new (nothrow)CordbNativeCode(pFunction, &codeInfo, codeInfo.isInstantiatedGeneric != 0);
_ASSERTE(pNativeCode != NULL);
m_nativeCodeTable.AddBaseOrThrow(pNativeCode);
}
return pNativeCode;
} // CordbNativeCode::LookupOrCreateFromJITData
// LoadNativeInfo loads from the left side any native variable info
// from the JIT.
//
void CordbNativeCode::LoadNativeInfo()
{
THROW_IF_NEUTERED(this);
INTERNAL_API_ENTRY(this->GetProcess());
// If we've either never done this before (no info), or we have, but the version number has increased, we
// should try and get a newer version of our JIT info.
if(m_nativeVarData.IsInitialized())
{
return;
}
// You can't do this if the function is implemented as part of the Runtime.
if (GetFunction()->IsNativeImpl() == CordbFunction::kNativeOnly)
{
ThrowHR(CORDBG_E_FUNCTION_NOT_IL);
}
CordbProcess *pProcess = GetProcess();
// Get everything via the DAC
if (m_fCodeAvailable)
{
RSLockHolder lockHolder(pProcess->GetProcessLock());
pProcess->GetDAC()->GetNativeCodeSequencePointsAndVarInfo(GetVMNativeCodeMethodDescToken(),
GetAddress(),
m_fCodeAvailable,
&m_nativeVarData,
&m_sequencePoints);
}
} // CordbNativeCode::LoadNativeInfo
| -1 |
dotnet/runtime | 66,257 | Cloning improvements | Remove loop cloning variable initialization condition:
Assume that any pre-existing initialization is acceptable.
Check condition against zero if necessary. Const inits remain as before.
Lots of diffs due to more cloning for cases of `for (i = expression...`
where `expression` is not just a constant or local var.
Also, fix various comments that were no longer correct (e.g., "first" block
concept is gone) | BruceForstall | 2022-03-06T02:30:47Z | 2022-03-21T16:36:12Z | a1f26fbc29e645eda585fd3a4093311101521855 | a1bf79e06b64edef0957a9cc907180c25fa8aab9 | Cloning improvements. Remove loop cloning variable initialization condition:
Assume that any pre-existing initialization is acceptable.
Check condition against zero if necessary. Const inits remain as before.
Lots of diffs due to more cloning for cases of `for (i = expression...`
where `expression` is not just a constant or local var.
Also, fix various comments that were no longer correct (e.g., "first" block
concept is gone) | ./src/libraries/Common/src/Microsoft/Win32/SafeHandles/SafeEventStreamHandle.OSX.cs | // Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
using System;
using System.Diagnostics;
using System.Runtime.InteropServices;
namespace Microsoft.Win32.SafeHandles
{
/// <summary>
/// This class is a wrapper around the EventStream and Create pattern.
/// Usually, the Create pattern has the caller call Create* to allocate
/// and CFRelease to free; however, FSEventStream has it's own release
/// function, so we need to extend the pattern to account for that.
/// </summary>
internal sealed partial class SafeEventStreamHandle : SafeHandle
{
public SafeEventStreamHandle() : base(IntPtr.Zero, true) { }
internal SafeEventStreamHandle(IntPtr ptr) : base(IntPtr.Zero, true)
{
this.SetHandle(ptr);
}
protected override bool ReleaseHandle()
{
Interop.EventStream.FSEventStreamStop(handle);
Interop.EventStream.FSEventStreamInvalidate(handle);
Interop.EventStream.FSEventStreamRelease(handle);
return true;
}
public override bool IsInvalid
{
get
{
return handle == IntPtr.Zero;
}
}
}
}
| // Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
using System;
using System.Diagnostics;
using System.Runtime.InteropServices;
namespace Microsoft.Win32.SafeHandles
{
/// <summary>
/// This class is a wrapper around the EventStream and Create pattern.
/// Usually, the Create pattern has the caller call Create* to allocate
/// and CFRelease to free; however, FSEventStream has it's own release
/// function, so we need to extend the pattern to account for that.
/// </summary>
internal sealed partial class SafeEventStreamHandle : SafeHandle
{
public SafeEventStreamHandle() : base(IntPtr.Zero, true) { }
internal SafeEventStreamHandle(IntPtr ptr) : base(IntPtr.Zero, true)
{
this.SetHandle(ptr);
}
protected override bool ReleaseHandle()
{
Interop.EventStream.FSEventStreamStop(handle);
Interop.EventStream.FSEventStreamInvalidate(handle);
Interop.EventStream.FSEventStreamRelease(handle);
return true;
}
public override bool IsInvalid
{
get
{
return handle == IntPtr.Zero;
}
}
}
}
| -1 |
dotnet/runtime | 66,251 | Do not generate `TYP_STRUCT` `LCL_FLD` nodes | Today, the morpher can fold `IND(struct ADDR(LCL))` into `LCL_FLD` `TYP_STRUCT` layout-less nodes, these end up being locations, always (well -- there is one case where that's not true, such a node can end up under a return of a small struct later to be retyped by lowering, but this is rare).
These will be a significant impediment to actual, proper `TYP_STRUCT` local field nodes, as it would mean handling the case, everywhere in code, where the layout would be missing, that we don't want to allow in the first place.
This change fixed that by introducing the folding that the creation of these location nodes achieves at the address level, where we can choose the type of the location node arbitrarily and freely.
Along the way, `IsLocalAddrExpr` was fixed to not forget to look at zero-offset sequences attached to `ADDR`s, to avoid regressions.
We are expecting some positive diffs here, from the `IsLocalAddrExpr` change (enabling more precise VNs) and the folding change itself (the old code path would, ever so rarely, miss out on some things).
We are also expecting one small regression due to call retyping of reg-sized `IND struct(ADDR(LCL))` nodes losing the field sequence. I did not fix it because a) it was small, and rare, b) I do not want to permeate the zero-offset code any more than is strictly required.
[Diffs](https://dev.azure.com/dnceng/public/_build/results?buildId=1653792&view=results). | SingleAccretion | 2022-03-05T21:01:45Z | 2022-03-30T04:48:39Z | cbf3f9c43649035f59e626792f82e7b579c44865 | 2453f16807b85b279efc26d17d6f20de87801c09 | Do not generate `TYP_STRUCT` `LCL_FLD` nodes. Today, the morpher can fold `IND(struct ADDR(LCL))` into `LCL_FLD` `TYP_STRUCT` layout-less nodes, these end up being locations, always (well -- there is one case where that's not true, such a node can end up under a return of a small struct later to be retyped by lowering, but this is rare).
These will be a significant impediment to actual, proper `TYP_STRUCT` local field nodes, as it would mean handling the case, everywhere in code, where the layout would be missing, that we don't want to allow in the first place.
This change fixed that by introducing the folding that the creation of these location nodes achieves at the address level, where we can choose the type of the location node arbitrarily and freely.
Along the way, `IsLocalAddrExpr` was fixed to not forget to look at zero-offset sequences attached to `ADDR`s, to avoid regressions.
We are expecting some positive diffs here, from the `IsLocalAddrExpr` change (enabling more precise VNs) and the folding change itself (the old code path would, ever so rarely, miss out on some things).
We are also expecting one small regression due to call retyping of reg-sized `IND struct(ADDR(LCL))` nodes losing the field sequence. I did not fix it because a) it was small, and rare, b) I do not want to permeate the zero-offset code any more than is strictly required.
[Diffs](https://dev.azure.com/dnceng/public/_build/results?buildId=1653792&view=results). | ./src/coreclr/jit/gentree.cpp | // Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
/*XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XX XX
XX GenTree XX
XX XX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
*/
#include "jitpch.h"
#include "hwintrinsic.h"
#include "simd.h"
#ifdef _MSC_VER
#pragma hdrstop
#endif
/*****************************************************************************/
const unsigned char GenTree::gtOperKindTable[] = {
#define GTNODE(en, st, cm, ok) ((ok)>K_MASK) + GTK_COMMUTE *cm,
#include "gtlist.h"
};
#ifdef DEBUG
const GenTreeDebugOperKind GenTree::gtDebugOperKindTable[] = {
#define GTNODE(en, st, cm, ok) static_cast<GenTreeDebugOperKind>((ok)&DBK_MASK),
#include "gtlist.h"
};
#endif // DEBUG
/*****************************************************************************
*
* The types of different GenTree nodes
*/
#ifdef DEBUG
#define INDENT_SIZE 3
//--------------------------------------------
//
// IndentStack: This struct is used, along with its related enums and strings,
// to control both the indendtation and the printing of arcs.
//
// Notes:
// The mode of printing is set in the Constructor, using its 'compiler' argument.
// Currently it only prints arcs when fgOrder == fgOrderLinear.
// The type of arc to print is specified by the IndentInfo enum, and is controlled
// by the caller of the Push() method.
enum IndentChars
{
ICVertical,
ICBottom,
ICTop,
ICMiddle,
ICDash,
ICTerminal,
ICError,
IndentCharCount
};
// clang-format off
// Sets of strings for different dumping options vert bot top mid dash embedded terminal error
static const char* emptyIndents[IndentCharCount] = { " ", " ", " ", " ", " ", "", "?" };
static const char* asciiIndents[IndentCharCount] = { "|", "\\", "/", "+", "-", "*", "?" };
static const char* unicodeIndents[IndentCharCount] = { "\xe2\x94\x82", "\xe2\x94\x94", "\xe2\x94\x8c", "\xe2\x94\x9c", "\xe2\x94\x80", "\xe2\x96\x8c", "?" };
// clang-format on
typedef ArrayStack<Compiler::IndentInfo> IndentInfoStack;
struct IndentStack
{
IndentInfoStack stack;
const char** indents;
// Constructor for IndentStack. Uses 'compiler' to determine the mode of printing.
IndentStack(Compiler* compiler) : stack(compiler->getAllocator(CMK_DebugOnly))
{
if (compiler->asciiTrees)
{
indents = asciiIndents;
}
else
{
indents = unicodeIndents;
}
}
// Return the depth of the current indentation.
unsigned Depth()
{
return stack.Height();
}
// Push a new indentation onto the stack, of the given type.
void Push(Compiler::IndentInfo info)
{
stack.Push(info);
}
// Pop the most recent indentation type off the stack.
Compiler::IndentInfo Pop()
{
return stack.Pop();
}
// Print the current indentation and arcs.
void print()
{
unsigned indentCount = Depth();
for (unsigned i = 0; i < indentCount; i++)
{
unsigned index = indentCount - 1 - i;
switch (stack.Top(index))
{
case Compiler::IndentInfo::IINone:
printf(" ");
break;
case Compiler::IndentInfo::IIArc:
if (index == 0)
{
printf("%s%s%s", indents[ICMiddle], indents[ICDash], indents[ICDash]);
}
else
{
printf("%s ", indents[ICVertical]);
}
break;
case Compiler::IndentInfo::IIArcBottom:
printf("%s%s%s", indents[ICBottom], indents[ICDash], indents[ICDash]);
break;
case Compiler::IndentInfo::IIArcTop:
printf("%s%s%s", indents[ICTop], indents[ICDash], indents[ICDash]);
break;
case Compiler::IndentInfo::IIError:
printf("%s%s%s", indents[ICError], indents[ICDash], indents[ICDash]);
break;
default:
unreached();
}
}
printf("%s", indents[ICTerminal]);
}
};
//------------------------------------------------------------------------
// printIndent: This is a static method which simply invokes the 'print'
// method on its 'indentStack' argument.
//
// Arguments:
// indentStack - specifies the information for the indentation & arcs to be printed
//
// Notes:
// This method exists to localize the checking for the case where indentStack is null.
static void printIndent(IndentStack* indentStack)
{
if (indentStack == nullptr)
{
return;
}
indentStack->print();
}
#endif
#if defined(DEBUG) || NODEBASH_STATS || MEASURE_NODE_SIZE || COUNT_AST_OPERS || DUMP_FLOWGRAPHS
static const char* opNames[] = {
#define GTNODE(en, st, cm, ok) #en,
#include "gtlist.h"
};
const char* GenTree::OpName(genTreeOps op)
{
assert((unsigned)op < ArrLen(opNames));
return opNames[op];
}
#endif
#if MEASURE_NODE_SIZE
static const char* opStructNames[] = {
#define GTNODE(en, st, cm, ok) #st,
#include "gtlist.h"
};
const char* GenTree::OpStructName(genTreeOps op)
{
assert((unsigned)op < ArrLen(opStructNames));
return opStructNames[op];
}
#endif
//
// We allocate tree nodes in 2 different sizes:
// - TREE_NODE_SZ_SMALL for most nodes
// - TREE_NODE_SZ_LARGE for the few nodes (such as calls) that have
// more fields and take up a lot more space.
//
/* GT_COUNT'th oper is overloaded as 'undefined oper', so allocate storage for GT_COUNT'th oper also */
/* static */
unsigned char GenTree::s_gtNodeSizes[GT_COUNT + 1];
#if NODEBASH_STATS || MEASURE_NODE_SIZE || COUNT_AST_OPERS
unsigned char GenTree::s_gtTrueSizes[GT_COUNT + 1]{
#define GTNODE(en, st, cm, ok) sizeof(st),
#include "gtlist.h"
};
#endif // NODEBASH_STATS || MEASURE_NODE_SIZE || COUNT_AST_OPERS
#if COUNT_AST_OPERS
unsigned GenTree::s_gtNodeCounts[GT_COUNT + 1] = {0};
#endif // COUNT_AST_OPERS
/* static */
void GenTree::InitNodeSize()
{
/* Set all sizes to 'small' first */
for (unsigned op = 0; op <= GT_COUNT; op++)
{
GenTree::s_gtNodeSizes[op] = TREE_NODE_SZ_SMALL;
}
// Now set all of the appropriate entries to 'large'
CLANG_FORMAT_COMMENT_ANCHOR;
// clang-format off
if (GlobalJitOptions::compFeatureHfa
#if defined(UNIX_AMD64_ABI)
|| true
#endif // defined(UNIX_AMD64_ABI)
)
{
// On ARM32, ARM64 and System V for struct returning
// there is code that does GT_ASG-tree.CopyObj call.
// CopyObj is a large node and the GT_ASG is small, which triggers an exception.
GenTree::s_gtNodeSizes[GT_ASG] = TREE_NODE_SZ_LARGE;
GenTree::s_gtNodeSizes[GT_RETURN] = TREE_NODE_SZ_LARGE;
}
GenTree::s_gtNodeSizes[GT_CALL] = TREE_NODE_SZ_LARGE;
GenTree::s_gtNodeSizes[GT_CAST] = TREE_NODE_SZ_LARGE;
GenTree::s_gtNodeSizes[GT_FTN_ADDR] = TREE_NODE_SZ_LARGE;
GenTree::s_gtNodeSizes[GT_BOX] = TREE_NODE_SZ_LARGE;
GenTree::s_gtNodeSizes[GT_INDEX] = TREE_NODE_SZ_LARGE;
GenTree::s_gtNodeSizes[GT_INDEX_ADDR] = TREE_NODE_SZ_LARGE;
GenTree::s_gtNodeSizes[GT_BOUNDS_CHECK] = TREE_NODE_SZ_SMALL;
GenTree::s_gtNodeSizes[GT_ARR_ELEM] = TREE_NODE_SZ_LARGE;
GenTree::s_gtNodeSizes[GT_ARR_INDEX] = TREE_NODE_SZ_LARGE;
GenTree::s_gtNodeSizes[GT_ARR_OFFSET] = TREE_NODE_SZ_LARGE;
GenTree::s_gtNodeSizes[GT_RET_EXPR] = TREE_NODE_SZ_LARGE;
GenTree::s_gtNodeSizes[GT_FIELD] = TREE_NODE_SZ_LARGE;
GenTree::s_gtNodeSizes[GT_CMPXCHG] = TREE_NODE_SZ_LARGE;
GenTree::s_gtNodeSizes[GT_QMARK] = TREE_NODE_SZ_LARGE;
GenTree::s_gtNodeSizes[GT_STORE_DYN_BLK] = TREE_NODE_SZ_LARGE;
GenTree::s_gtNodeSizes[GT_INTRINSIC] = TREE_NODE_SZ_LARGE;
GenTree::s_gtNodeSizes[GT_ALLOCOBJ] = TREE_NODE_SZ_LARGE;
#if USE_HELPERS_FOR_INT_DIV
GenTree::s_gtNodeSizes[GT_DIV] = TREE_NODE_SZ_LARGE;
GenTree::s_gtNodeSizes[GT_UDIV] = TREE_NODE_SZ_LARGE;
GenTree::s_gtNodeSizes[GT_MOD] = TREE_NODE_SZ_LARGE;
GenTree::s_gtNodeSizes[GT_UMOD] = TREE_NODE_SZ_LARGE;
#endif
#ifdef FEATURE_PUT_STRUCT_ARG_STK
// TODO-Throughput: This should not need to be a large node. The object info should be
// obtained from the child node.
GenTree::s_gtNodeSizes[GT_PUTARG_STK] = TREE_NODE_SZ_LARGE;
#if FEATURE_ARG_SPLIT
GenTree::s_gtNodeSizes[GT_PUTARG_SPLIT] = TREE_NODE_SZ_LARGE;
#endif // FEATURE_ARG_SPLIT
#endif // FEATURE_PUT_STRUCT_ARG_STK
assert(GenTree::s_gtNodeSizes[GT_RETURN] == GenTree::s_gtNodeSizes[GT_ASG]);
// This list of assertions should come to contain all GenTree subtypes that are declared
// "small".
assert(sizeof(GenTreeLclFld) <= GenTree::s_gtNodeSizes[GT_LCL_FLD]);
assert(sizeof(GenTreeLclVar) <= GenTree::s_gtNodeSizes[GT_LCL_VAR]);
static_assert_no_msg(sizeof(GenTree) <= TREE_NODE_SZ_SMALL);
static_assert_no_msg(sizeof(GenTreeUnOp) <= TREE_NODE_SZ_SMALL);
static_assert_no_msg(sizeof(GenTreeOp) <= TREE_NODE_SZ_SMALL);
static_assert_no_msg(sizeof(GenTreeVal) <= TREE_NODE_SZ_SMALL);
static_assert_no_msg(sizeof(GenTreeIntConCommon) <= TREE_NODE_SZ_SMALL);
static_assert_no_msg(sizeof(GenTreePhysReg) <= TREE_NODE_SZ_SMALL);
static_assert_no_msg(sizeof(GenTreeIntCon) <= TREE_NODE_SZ_SMALL);
static_assert_no_msg(sizeof(GenTreeLngCon) <= TREE_NODE_SZ_SMALL);
static_assert_no_msg(sizeof(GenTreeDblCon) <= TREE_NODE_SZ_SMALL);
static_assert_no_msg(sizeof(GenTreeStrCon) <= TREE_NODE_SZ_SMALL);
static_assert_no_msg(sizeof(GenTreeLclVarCommon) <= TREE_NODE_SZ_SMALL);
static_assert_no_msg(sizeof(GenTreeLclVar) <= TREE_NODE_SZ_SMALL);
static_assert_no_msg(sizeof(GenTreeLclFld) <= TREE_NODE_SZ_SMALL);
static_assert_no_msg(sizeof(GenTreeCC) <= TREE_NODE_SZ_SMALL);
static_assert_no_msg(sizeof(GenTreeCast) <= TREE_NODE_SZ_LARGE); // *** large node
static_assert_no_msg(sizeof(GenTreeBox) <= TREE_NODE_SZ_LARGE); // *** large node
static_assert_no_msg(sizeof(GenTreeField) <= TREE_NODE_SZ_LARGE); // *** large node
static_assert_no_msg(sizeof(GenTreeFieldList) <= TREE_NODE_SZ_SMALL);
static_assert_no_msg(sizeof(GenTreeColon) <= TREE_NODE_SZ_SMALL);
static_assert_no_msg(sizeof(GenTreeCall) <= TREE_NODE_SZ_LARGE); // *** large node
static_assert_no_msg(sizeof(GenTreeCmpXchg) <= TREE_NODE_SZ_LARGE); // *** large node
static_assert_no_msg(sizeof(GenTreeFptrVal) <= TREE_NODE_SZ_LARGE); // *** large node
static_assert_no_msg(sizeof(GenTreeQmark) <= TREE_NODE_SZ_LARGE); // *** large node
static_assert_no_msg(sizeof(GenTreeIntrinsic) <= TREE_NODE_SZ_LARGE); // *** large node
static_assert_no_msg(sizeof(GenTreeIndex) <= TREE_NODE_SZ_LARGE); // *** large node
static_assert_no_msg(sizeof(GenTreeIndexAddr) <= TREE_NODE_SZ_LARGE); // *** large node
static_assert_no_msg(sizeof(GenTreeArrLen) <= TREE_NODE_SZ_LARGE); // *** large node
static_assert_no_msg(sizeof(GenTreeBoundsChk) <= TREE_NODE_SZ_SMALL);
static_assert_no_msg(sizeof(GenTreeArrElem) <= TREE_NODE_SZ_LARGE); // *** large node
static_assert_no_msg(sizeof(GenTreeArrIndex) <= TREE_NODE_SZ_LARGE); // *** large node
static_assert_no_msg(sizeof(GenTreeArrOffs) <= TREE_NODE_SZ_LARGE); // *** large node
static_assert_no_msg(sizeof(GenTreeIndir) <= TREE_NODE_SZ_SMALL);
static_assert_no_msg(sizeof(GenTreeStoreInd) <= TREE_NODE_SZ_SMALL);
static_assert_no_msg(sizeof(GenTreeAddrMode) <= TREE_NODE_SZ_SMALL);
static_assert_no_msg(sizeof(GenTreeObj) <= TREE_NODE_SZ_SMALL);
static_assert_no_msg(sizeof(GenTreeBlk) <= TREE_NODE_SZ_SMALL);
static_assert_no_msg(sizeof(GenTreeStoreDynBlk) <= TREE_NODE_SZ_LARGE); // *** large node
static_assert_no_msg(sizeof(GenTreeRetExpr) <= TREE_NODE_SZ_LARGE); // *** large node
static_assert_no_msg(sizeof(GenTreeILOffset) <= TREE_NODE_SZ_SMALL);
static_assert_no_msg(sizeof(GenTreeClsVar) <= TREE_NODE_SZ_SMALL);
static_assert_no_msg(sizeof(GenTreeArgPlace) <= TREE_NODE_SZ_SMALL);
static_assert_no_msg(sizeof(GenTreePhiArg) <= TREE_NODE_SZ_SMALL);
static_assert_no_msg(sizeof(GenTreeAllocObj) <= TREE_NODE_SZ_LARGE); // *** large node
#ifndef FEATURE_PUT_STRUCT_ARG_STK
static_assert_no_msg(sizeof(GenTreePutArgStk) <= TREE_NODE_SZ_SMALL);
#else // FEATURE_PUT_STRUCT_ARG_STK
// TODO-Throughput: This should not need to be a large node. The object info should be
// obtained from the child node.
static_assert_no_msg(sizeof(GenTreePutArgStk) <= TREE_NODE_SZ_LARGE);
#if FEATURE_ARG_SPLIT
static_assert_no_msg(sizeof(GenTreePutArgSplit) <= TREE_NODE_SZ_LARGE);
#endif // FEATURE_ARG_SPLIT
#endif // FEATURE_PUT_STRUCT_ARG_STK
#ifdef FEATURE_SIMD
static_assert_no_msg(sizeof(GenTreeSIMD) <= TREE_NODE_SZ_SMALL);
#endif // FEATURE_SIMD
#ifdef FEATURE_HW_INTRINSICS
static_assert_no_msg(sizeof(GenTreeHWIntrinsic) <= TREE_NODE_SZ_SMALL);
#endif // FEATURE_HW_INTRINSICS
// clang-format on
}
size_t GenTree::GetNodeSize() const
{
return GenTree::s_gtNodeSizes[gtOper];
}
#ifdef DEBUG
bool GenTree::IsNodeProperlySized() const
{
size_t size;
if (gtDebugFlags & GTF_DEBUG_NODE_SMALL)
{
size = TREE_NODE_SZ_SMALL;
}
else
{
assert(gtDebugFlags & GTF_DEBUG_NODE_LARGE);
size = TREE_NODE_SZ_LARGE;
}
return GenTree::s_gtNodeSizes[gtOper] <= size;
}
#endif
//------------------------------------------------------------------------
// ReplaceWith: replace this with the src node. The source must be an isolated node
// and cannot be used after the replacement.
//
// Arguments:
// src - source tree, that replaces this.
// comp - the compiler instance to transfer annotations for arrays.
//
void GenTree::ReplaceWith(GenTree* src, Compiler* comp)
{
// The source may be big only if the target is also a big node
assert((gtDebugFlags & GTF_DEBUG_NODE_LARGE) || GenTree::s_gtNodeSizes[src->gtOper] == TREE_NODE_SZ_SMALL);
// The check is effective only if nodes have been already threaded.
assert((src->gtPrev == nullptr) && (src->gtNext == nullptr));
RecordOperBashing(OperGet(), src->OperGet()); // nop unless NODEBASH_STATS is enabled
GenTree* prev = gtPrev;
GenTree* next = gtNext;
// The VTable pointer is copied intentionally here
memcpy((void*)this, (void*)src, src->GetNodeSize());
this->gtPrev = prev;
this->gtNext = next;
#ifdef DEBUG
gtSeqNum = 0;
#endif
// Transfer any annotations.
if (src->OperGet() == GT_IND && src->gtFlags & GTF_IND_ARR_INDEX)
{
ArrayInfo arrInfo;
bool b = comp->GetArrayInfoMap()->Lookup(src, &arrInfo);
assert(b);
comp->GetArrayInfoMap()->Set(this, arrInfo);
}
DEBUG_DESTROY_NODE(src);
}
/*****************************************************************************
*
* When 'NODEBASH_STATS' is enabled in "jit.h" we record all instances of
* an existing GenTree node having its operator changed. This can be useful
* for two (related) things - to see what is being bashed (and what isn't),
* and to verify that the existing choices for what nodes are marked 'large'
* are reasonable (to minimize "wasted" space).
*
* And yes, the hash function / logic is simplistic, but it is conflict-free
* and transparent for what we need.
*/
#if NODEBASH_STATS
#define BASH_HASH_SIZE 211
inline unsigned hashme(genTreeOps op1, genTreeOps op2)
{
return ((op1 * 104729) ^ (op2 * 56569)) % BASH_HASH_SIZE;
}
struct BashHashDsc
{
unsigned __int32 bhFullHash; // the hash value (unique for all old->new pairs)
unsigned __int32 bhCount; // the same old->new bashings seen so far
unsigned __int8 bhOperOld; // original gtOper
unsigned __int8 bhOperNew; // new gtOper
};
static BashHashDsc BashHash[BASH_HASH_SIZE];
void GenTree::RecordOperBashing(genTreeOps operOld, genTreeOps operNew)
{
unsigned hash = hashme(operOld, operNew);
BashHashDsc* desc = BashHash + hash;
if (desc->bhFullHash != hash)
{
noway_assert(desc->bhCount == 0); // if this ever fires, need fix the hash fn
desc->bhFullHash = hash;
}
desc->bhCount += 1;
desc->bhOperOld = operOld;
desc->bhOperNew = operNew;
}
void GenTree::ReportOperBashing(FILE* f)
{
unsigned total = 0;
fflush(f);
fprintf(f, "\n");
fprintf(f, "Bashed gtOper stats:\n");
fprintf(f, "\n");
fprintf(f, " Old operator New operator #bytes old->new Count\n");
fprintf(f, " ---------------------------------------------------------------\n");
for (unsigned h = 0; h < BASH_HASH_SIZE; h++)
{
unsigned count = BashHash[h].bhCount;
if (count == 0)
continue;
unsigned opOld = BashHash[h].bhOperOld;
unsigned opNew = BashHash[h].bhOperNew;
fprintf(f, " GT_%-13s -> GT_%-13s [size: %3u->%3u] %c %7u\n", OpName((genTreeOps)opOld),
OpName((genTreeOps)opNew), s_gtTrueSizes[opOld], s_gtTrueSizes[opNew],
(s_gtTrueSizes[opOld] < s_gtTrueSizes[opNew]) ? 'X' : ' ', count);
total += count;
}
fprintf(f, "\n");
fprintf(f, "Total bashings: %u\n", total);
fprintf(f, "\n");
fflush(f);
}
#endif // NODEBASH_STATS
/*****************************************************************************/
#if MEASURE_NODE_SIZE
void GenTree::DumpNodeSizes(FILE* fp)
{
// Dump the sizes of the various GenTree flavors
fprintf(fp, "Small tree node size = %zu bytes\n", TREE_NODE_SZ_SMALL);
fprintf(fp, "Large tree node size = %zu bytes\n", TREE_NODE_SZ_LARGE);
fprintf(fp, "\n");
// Verify that node sizes are set kosherly and dump sizes
for (unsigned op = GT_NONE + 1; op < GT_COUNT; op++)
{
unsigned needSize = s_gtTrueSizes[op];
unsigned nodeSize = s_gtNodeSizes[op];
const char* structNm = OpStructName((genTreeOps)op);
const char* operName = OpName((genTreeOps)op);
bool repeated = false;
// Have we seen this struct flavor before?
for (unsigned mop = GT_NONE + 1; mop < op; mop++)
{
if (strcmp(structNm, OpStructName((genTreeOps)mop)) == 0)
{
repeated = true;
break;
}
}
// Don't repeat the same GenTree flavor unless we have an error
if (!repeated || needSize > nodeSize)
{
unsigned sizeChar = '?';
if (nodeSize == TREE_NODE_SZ_SMALL)
sizeChar = 'S';
else if (nodeSize == TREE_NODE_SZ_LARGE)
sizeChar = 'L';
fprintf(fp, "GT_%-16s ... %-19s = %3u bytes (%c)", operName, structNm, needSize, sizeChar);
if (needSize > nodeSize)
{
fprintf(fp, " -- ERROR -- allocation is only %u bytes!", nodeSize);
}
else if (needSize <= TREE_NODE_SZ_SMALL && nodeSize == TREE_NODE_SZ_LARGE)
{
fprintf(fp, " ... could be small");
}
fprintf(fp, "\n");
}
}
}
#endif // MEASURE_NODE_SIZE
/*****************************************************************************
*
* Walk all basic blocks and call the given function pointer for all tree
* nodes contained therein.
*/
void Compiler::fgWalkAllTreesPre(fgWalkPreFn* visitor, void* pCallBackData)
{
for (BasicBlock* const block : Blocks())
{
for (Statement* const stmt : block->Statements())
{
fgWalkTreePre(stmt->GetRootNodePointer(), visitor, pCallBackData);
}
}
}
//-----------------------------------------------------------
// CopyReg: Copy the _gtRegNum/gtRegTag fields.
//
// Arguments:
// from - GenTree node from which to copy
//
// Return Value:
// None
void GenTree::CopyReg(GenTree* from)
{
_gtRegNum = from->_gtRegNum;
INDEBUG(gtRegTag = from->gtRegTag;)
// Also copy multi-reg state if this is a call node
if (IsCall())
{
assert(from->IsCall());
this->AsCall()->CopyOtherRegs(from->AsCall());
}
else if (IsCopyOrReload())
{
this->AsCopyOrReload()->CopyOtherRegs(from->AsCopyOrReload());
}
}
//------------------------------------------------------------------
// gtHasReg: Whether node been assigned a register by LSRA
//
// Arguments:
// comp - Compiler instance. Required for multi-reg lcl var; ignored otherwise.
//
// Return Value:
// Returns true if the node was assigned a register.
//
// In case of multi-reg call nodes, it is considered having a reg if regs are allocated for ALL its
// return values.
// REVIEW: why is this ALL and the other cases are ANY? Explain.
//
// In case of GT_COPY or GT_RELOAD of a multi-reg call, GT_COPY/GT_RELOAD is considered having a reg if it
// has a reg assigned to ANY of its positions.
//
// In case of multi-reg local vars, it is considered having a reg if it has a reg assigned for ANY
// of its positions.
//
bool GenTree::gtHasReg(Compiler* comp) const
{
bool hasReg = false;
if (IsMultiRegCall())
{
const GenTreeCall* call = AsCall();
const unsigned regCount = call->GetReturnTypeDesc()->GetReturnRegCount();
// A Multi-reg call node is said to have regs, if it has
// reg assigned to each of its result registers.
for (unsigned i = 0; i < regCount; ++i)
{
hasReg = (call->GetRegNumByIdx(i) != REG_NA);
if (!hasReg)
{
break;
}
}
}
else if (IsCopyOrReloadOfMultiRegCall())
{
const GenTreeCopyOrReload* copyOrReload = AsCopyOrReload();
const GenTreeCall* call = copyOrReload->gtGetOp1()->AsCall();
const unsigned regCount = call->GetReturnTypeDesc()->GetReturnRegCount();
// A Multi-reg copy or reload node is said to have regs,
// if it has valid regs in any of the positions.
for (unsigned i = 0; i < regCount; ++i)
{
hasReg = (copyOrReload->GetRegNumByIdx(i) != REG_NA);
if (hasReg)
{
break;
}
}
}
else if (IsMultiRegLclVar())
{
assert(comp != nullptr);
const GenTreeLclVar* lclNode = AsLclVar();
const unsigned regCount = GetMultiRegCount(comp);
// A Multi-reg local vars is said to have regs,
// if it has valid regs in any of the positions.
for (unsigned i = 0; i < regCount; i++)
{
hasReg = (lclNode->GetRegNumByIdx(i) != REG_NA);
if (hasReg)
{
break;
}
}
}
else
{
hasReg = (GetRegNum() != REG_NA);
}
return hasReg;
}
//-----------------------------------------------------------------------------
// GetRegisterDstCount: Get the number of registers defined by the node.
//
// Arguments:
// None
//
// Return Value:
// The number of registers that this node defines.
//
// Notes:
// This should not be called on a contained node.
// This does not look at the actual register assignments, if any, and so
// is valid after Lowering.
//
int GenTree::GetRegisterDstCount(Compiler* compiler) const
{
assert(!isContained());
if (!IsMultiRegNode())
{
return (IsValue()) ? 1 : 0;
}
else if (IsMultiRegCall())
{
return AsCall()->GetReturnTypeDesc()->GetReturnRegCount();
}
else if (IsCopyOrReload())
{
return gtGetOp1()->GetRegisterDstCount(compiler);
}
#if FEATURE_ARG_SPLIT
else if (OperIsPutArgSplit())
{
return (const_cast<GenTree*>(this))->AsPutArgSplit()->gtNumRegs;
}
#endif
#if !defined(TARGET_64BIT)
else if (OperIsMultiRegOp())
{
// A MultiRegOp is a GT_MUL_LONG, GT_PUTARG_REG, or GT_BITCAST.
// For the latter two (ARM-only), they only have multiple registers if they produce a long value
// (GT_MUL_LONG always produces a long value).
CLANG_FORMAT_COMMENT_ANCHOR;
#ifdef TARGET_ARM
return (TypeGet() == TYP_LONG) ? 2 : 1;
#else
assert(OperIs(GT_MUL_LONG));
return 2;
#endif
}
#endif
#ifdef FEATURE_HW_INTRINSICS
else if (OperIsHWIntrinsic())
{
assert(TypeIs(TYP_STRUCT));
const GenTreeHWIntrinsic* intrinsic = AsHWIntrinsic();
const NamedIntrinsic intrinsicId = intrinsic->GetHWIntrinsicId();
assert(HWIntrinsicInfo::IsMultiReg(intrinsicId));
return HWIntrinsicInfo::GetMultiRegCount(intrinsicId);
}
#endif // FEATURE_HW_INTRINSICS
if (OperIsScalarLocal())
{
return AsLclVar()->GetFieldCount(compiler);
}
assert(!"Unexpected multi-reg node");
return 0;
}
//-----------------------------------------------------------------------------------
// IsMultiRegNode: whether a node returning its value in more than one register
//
// Arguments:
// None
//
// Return Value:
// Returns true if this GenTree is a multi-reg node.
//
// Notes:
// All targets that support multi-reg ops of any kind also support multi-reg return
// values for calls. Should that change with a future target, this method will need
// to change accordingly.
//
bool GenTree::IsMultiRegNode() const
{
#if FEATURE_MULTIREG_RET
if (IsMultiRegCall())
{
return true;
}
#if FEATURE_ARG_SPLIT
if (OperIsPutArgSplit())
{
return true;
}
#endif
#if !defined(TARGET_64BIT)
if (OperIsMultiRegOp())
{
return true;
}
#endif
if (OperIs(GT_COPY, GT_RELOAD))
{
return true;
}
#endif // FEATURE_MULTIREG_RET
#ifdef FEATURE_HW_INTRINSICS
if (OperIsHWIntrinsic())
{
return HWIntrinsicInfo::IsMultiReg(AsHWIntrinsic()->GetHWIntrinsicId());
}
#endif // FEATURE_HW_INTRINSICS
if (IsMultiRegLclVar())
{
return true;
}
return false;
}
//-----------------------------------------------------------------------------------
// GetMultiRegCount: Return the register count for a multi-reg node.
//
// Arguments:
// comp - Compiler instance. Required for MultiRegLclVar, unused otherwise.
//
// Return Value:
// Returns the number of registers defined by this node.
//
unsigned GenTree::GetMultiRegCount(Compiler* comp) const
{
#if FEATURE_MULTIREG_RET
if (IsMultiRegCall())
{
return AsCall()->GetReturnTypeDesc()->GetReturnRegCount();
}
#if FEATURE_ARG_SPLIT
if (OperIsPutArgSplit())
{
return AsPutArgSplit()->gtNumRegs;
}
#endif
#if !defined(TARGET_64BIT)
if (OperIsMultiRegOp())
{
return AsMultiRegOp()->GetRegCount();
}
#endif
if (OperIs(GT_COPY, GT_RELOAD))
{
return AsCopyOrReload()->GetRegCount();
}
#endif // FEATURE_MULTIREG_RET
#ifdef FEATURE_HW_INTRINSICS
if (OperIsHWIntrinsic())
{
return HWIntrinsicInfo::GetMultiRegCount(AsHWIntrinsic()->GetHWIntrinsicId());
}
#endif // FEATURE_HW_INTRINSICS
if (IsMultiRegLclVar())
{
assert(comp != nullptr);
return AsLclVar()->GetFieldCount(comp);
}
assert(!"GetMultiRegCount called with non-multireg node");
return 1;
}
//---------------------------------------------------------------
// gtGetContainedRegMask: Get the reg mask of the node including
// contained nodes (recursive).
//
// Arguments:
// None
//
// Return Value:
// Reg Mask of GenTree node.
//
regMaskTP GenTree::gtGetContainedRegMask()
{
if (!isContained())
{
return gtGetRegMask();
}
regMaskTP mask = 0;
for (GenTree* operand : Operands())
{
mask |= operand->gtGetContainedRegMask();
}
return mask;
}
//---------------------------------------------------------------
// gtGetRegMask: Get the reg mask of the node.
//
// Arguments:
// None
//
// Return Value:
// Reg Mask of GenTree node.
//
regMaskTP GenTree::gtGetRegMask() const
{
regMaskTP resultMask;
if (IsMultiRegCall())
{
resultMask = genRegMask(GetRegNum());
resultMask |= AsCall()->GetOtherRegMask();
}
else if (IsCopyOrReloadOfMultiRegCall())
{
// A multi-reg copy or reload, will have valid regs for only those
// positions that need to be copied or reloaded. Hence we need
// to consider only those registers for computing reg mask.
const GenTreeCopyOrReload* copyOrReload = AsCopyOrReload();
const GenTreeCall* call = copyOrReload->gtGetOp1()->AsCall();
const unsigned regCount = call->GetReturnTypeDesc()->GetReturnRegCount();
resultMask = RBM_NONE;
for (unsigned i = 0; i < regCount; ++i)
{
regNumber reg = copyOrReload->GetRegNumByIdx(i);
if (reg != REG_NA)
{
resultMask |= genRegMask(reg);
}
}
}
#if FEATURE_ARG_SPLIT
else if (compFeatureArgSplit() && OperIsPutArgSplit())
{
const GenTreePutArgSplit* splitArg = AsPutArgSplit();
const unsigned regCount = splitArg->gtNumRegs;
resultMask = RBM_NONE;
for (unsigned i = 0; i < regCount; ++i)
{
regNumber reg = splitArg->GetRegNumByIdx(i);
assert(reg != REG_NA);
resultMask |= genRegMask(reg);
}
}
#endif // FEATURE_ARG_SPLIT
else
{
resultMask = genRegMask(GetRegNum());
}
return resultMask;
}
void GenTreeFieldList::AddField(Compiler* compiler, GenTree* node, unsigned offset, var_types type)
{
m_uses.AddUse(new (compiler, CMK_ASTNode) Use(node, offset, type));
gtFlags |= node->gtFlags & GTF_ALL_EFFECT;
}
void GenTreeFieldList::AddFieldLIR(Compiler* compiler, GenTree* node, unsigned offset, var_types type)
{
m_uses.AddUse(new (compiler, CMK_ASTNode) Use(node, offset, type));
}
void GenTreeFieldList::InsertField(Compiler* compiler, Use* insertAfter, GenTree* node, unsigned offset, var_types type)
{
m_uses.InsertUse(insertAfter, new (compiler, CMK_ASTNode) Use(node, offset, type));
gtFlags |= node->gtFlags & GTF_ALL_EFFECT;
}
void GenTreeFieldList::InsertFieldLIR(
Compiler* compiler, Use* insertAfter, GenTree* node, unsigned offset, var_types type)
{
m_uses.InsertUse(insertAfter, new (compiler, CMK_ASTNode) Use(node, offset, type));
}
//---------------------------------------------------------------
// GetOtherRegMask: Get the reg mask of gtOtherRegs of call node
//
// Arguments:
// None
//
// Return Value:
// Reg mask of gtOtherRegs of call node.
//
regMaskTP GenTreeCall::GetOtherRegMask() const
{
regMaskTP resultMask = RBM_NONE;
#if FEATURE_MULTIREG_RET
for (unsigned i = 0; i < MAX_RET_REG_COUNT - 1; ++i)
{
if (gtOtherRegs[i] != REG_NA)
{
resultMask |= genRegMask((regNumber)gtOtherRegs[i]);
continue;
}
break;
}
#endif
return resultMask;
}
//-------------------------------------------------------------------------
// IsPure:
// Returns true if this call is pure. For now, this uses the same
// definition of "pure" that is that used by HelperCallProperties: a
// pure call does not read or write any aliased (e.g. heap) memory or
// have other global side effects (e.g. class constructors, finalizers),
// but is allowed to throw an exception.
//
// NOTE: this call currently only returns true if the call target is a
// helper method that is known to be pure. No other analysis is
// performed.
//
// Arguments:
// Copiler - the compiler context.
//
// Returns:
// True if the call is pure; false otherwise.
//
bool GenTreeCall::IsPure(Compiler* compiler) const
{
return (gtCallType == CT_HELPER) &&
compiler->s_helperCallProperties.IsPure(compiler->eeGetHelperNum(gtCallMethHnd));
}
//-------------------------------------------------------------------------
// HasSideEffects:
// Returns true if this call has any side effects. All non-helpers are considered to have side-effects. Only helpers
// that do not mutate the heap, do not run constructors, may not throw, and are either a) pure or b) non-finalizing
// allocation functions are considered side-effect-free.
//
// Arguments:
// compiler - the compiler instance
// ignoreExceptions - when `true`, ignores exception side effects
// ignoreCctors - when `true`, ignores class constructor side effects
//
// Return Value:
// true if this call has any side-effects; false otherwise.
bool GenTreeCall::HasSideEffects(Compiler* compiler, bool ignoreExceptions, bool ignoreCctors) const
{
// Generally all GT_CALL nodes are considered to have side-effects, but we may have extra information about helper
// calls that can prove them side-effect-free.
if (gtCallType != CT_HELPER)
{
return true;
}
CorInfoHelpFunc helper = compiler->eeGetHelperNum(gtCallMethHnd);
HelperCallProperties& helperProperties = compiler->s_helperCallProperties;
// We definitely care about the side effects if MutatesHeap is true
if (helperProperties.MutatesHeap(helper))
{
return true;
}
// Unless we have been instructed to ignore cctors (CSE, for example, ignores cctors), consider them side effects.
if (!ignoreCctors && helperProperties.MayRunCctor(helper))
{
return true;
}
// If we also care about exceptions then check if the helper can throw
if (!ignoreExceptions && !helperProperties.NoThrow(helper))
{
return true;
}
// If this is not a Pure helper call or an allocator (that will not need to run a finalizer)
// then this call has side effects.
return !helperProperties.IsPure(helper) &&
(!helperProperties.IsAllocator(helper) || ((gtCallMoreFlags & GTF_CALL_M_ALLOC_SIDE_EFFECTS) != 0));
}
//-------------------------------------------------------------------------
// HasNonStandardAddedArgs: Return true if the method has non-standard args added to the call
// argument list during argument morphing (fgMorphArgs), e.g., passed in R10 or R11 on AMD64.
// See also GetNonStandardAddedArgCount().
//
// Arguments:
// compiler - the compiler instance
//
// Return Value:
// true if there are any such args, false otherwise.
//
bool GenTreeCall::HasNonStandardAddedArgs(Compiler* compiler) const
{
return GetNonStandardAddedArgCount(compiler) != 0;
}
//-------------------------------------------------------------------------
// GetNonStandardAddedArgCount: Get the count of non-standard arguments that have been added
// during call argument morphing (fgMorphArgs). Do not count non-standard args that are already
// counted in the argument list prior to morphing.
//
// This function is used to help map the caller and callee arguments during tail call setup.
//
// Arguments:
// compiler - the compiler instance
//
// Return Value:
// The count of args, as described.
//
// Notes:
// It would be more general to have fgMorphArgs set a bit on the call node when such
// args are added to a call, and a bit on each such arg, and then have this code loop
// over the call args when the special call bit is set, counting the args with the special
// arg bit. This seems pretty heavyweight, though. Instead, this logic needs to be kept
// in sync with fgMorphArgs.
//
int GenTreeCall::GetNonStandardAddedArgCount(Compiler* compiler) const
{
if (IsUnmanaged() && !compiler->opts.ShouldUsePInvokeHelpers())
{
// R11 = PInvoke cookie param
return 1;
}
else if (IsVirtualStub())
{
// R11 = Virtual stub param
return 1;
}
else if ((gtCallType == CT_INDIRECT) && (gtCallCookie != nullptr))
{
// R10 = PInvoke target param
// R11 = PInvoke cookie param
return 2;
}
return 0;
}
//-------------------------------------------------------------------------
// TreatAsHasRetBufArg:
//
// Arguments:
// compiler, the compiler instance so that we can call eeGetHelperNum
//
// Return Value:
// Returns true if we treat the call as if it has a retBuf argument
// This method may actually have a retBuf argument
// or it could be a JIT helper that we are still transforming during
// the importer phase.
//
// Notes:
// On ARM64 marking the method with the GTF_CALL_M_RETBUFFARG flag
// will make HasRetBufArg() return true, but will also force the
// use of register x8 to pass the RetBuf argument.
//
// These two Jit Helpers that we handle here by returning true
// aren't actually defined to return a struct, so they don't expect
// their RetBuf to be passed in x8, instead they expect it in x0.
//
bool GenTreeCall::TreatAsHasRetBufArg(Compiler* compiler) const
{
if (HasRetBufArg())
{
return true;
}
else
{
// If we see a Jit helper call that returns a TYP_STRUCT we will
// transform it as if it has a Return Buffer Argument
//
if (IsHelperCall() && (gtReturnType == TYP_STRUCT))
{
// There are two possible helper calls that use this path:
// CORINFO_HELP_GETFIELDSTRUCT and CORINFO_HELP_UNBOX_NULLABLE
//
CorInfoHelpFunc helpFunc = compiler->eeGetHelperNum(gtCallMethHnd);
if (helpFunc == CORINFO_HELP_GETFIELDSTRUCT)
{
return true;
}
else if (helpFunc == CORINFO_HELP_UNBOX_NULLABLE)
{
return true;
}
else
{
assert(!"Unexpected JIT helper in TreatAsHasRetBufArg");
}
}
}
return false;
}
//-------------------------------------------------------------------------
// IsHelperCall: Determine if this GT_CALL node is a specific helper call.
//
// Arguments:
// compiler - the compiler instance so that we can call eeFindHelper
//
// Return Value:
// Returns true if this GT_CALL node is a call to the specified helper.
//
bool GenTreeCall::IsHelperCall(Compiler* compiler, unsigned helper) const
{
return IsHelperCall(compiler->eeFindHelper(helper));
}
//------------------------------------------------------------------------
// GenTreeCall::ReplaceCallOperand:
// Replaces a given operand to a call node and updates the call
// argument table if necessary.
//
// Arguments:
// useEdge - the use edge that points to the operand to be replaced.
// replacement - the replacement node.
//
void GenTreeCall::ReplaceCallOperand(GenTree** useEdge, GenTree* replacement)
{
assert(useEdge != nullptr);
assert(replacement != nullptr);
assert(TryGetUse(*useEdge, &useEdge));
GenTree* originalOperand = *useEdge;
*useEdge = replacement;
const bool isArgument =
(replacement != gtControlExpr) &&
((gtCallType != CT_INDIRECT) || ((replacement != gtCallCookie) && (replacement != gtCallAddr)));
if (isArgument)
{
if ((originalOperand->gtFlags & GTF_LATE_ARG) != 0)
{
replacement->gtFlags |= GTF_LATE_ARG;
}
else
{
assert((replacement->gtFlags & GTF_LATE_ARG) == 0);
fgArgTabEntry* fp = Compiler::gtArgEntryByNode(this, replacement);
assert(fp->GetNode() == replacement);
}
}
}
//-------------------------------------------------------------------------
// AreArgsComplete: Determine if this GT_CALL node's arguments have been processed.
//
// Return Value:
// Returns true if fgMorphArgs has processed the arguments.
//
bool GenTreeCall::AreArgsComplete() const
{
if (fgArgInfo == nullptr)
{
return false;
}
if (fgArgInfo->AreArgsComplete())
{
assert((gtCallLateArgs != nullptr) || !fgArgInfo->HasRegArgs());
return true;
}
#if defined(FEATURE_FASTTAILCALL)
// If we have FEATURE_FASTTAILCALL, 'fgCanFastTailCall()' can call 'fgInitArgInfo()', and in that
// scenario it is valid to have 'fgArgInfo' be non-null when 'fgMorphArgs()' first queries this,
// when it hasn't yet morphed the arguments.
#else
assert(gtCallArgs == nullptr);
#endif
return false;
}
//-------------------------------------------------------------------------
// SetRetBufArg: Sets the "return buffer" argument use.
//
void GenTreeCall::SetLclRetBufArg(Use* retBufArg)
{
assert(retBufArg->GetNode()->TypeIs(TYP_I_IMPL, TYP_BYREF) && retBufArg->GetNode()->OperIs(GT_ADDR, GT_ASG));
assert(HasRetBufArg());
gtRetBufArg = retBufArg;
}
//--------------------------------------------------------------------------
// Equals: Check if 2 CALL nodes are equal.
//
// Arguments:
// c1 - The first call node
// c2 - The second call node
//
// Return Value:
// true if the 2 CALL nodes have the same type and operands
//
bool GenTreeCall::Equals(GenTreeCall* c1, GenTreeCall* c2)
{
assert(c1->OperGet() == c2->OperGet());
if (c1->TypeGet() != c2->TypeGet())
{
return false;
}
if (c1->gtCallType != c2->gtCallType)
{
return false;
}
if (c1->gtCallType != CT_INDIRECT)
{
if (c1->gtCallMethHnd != c2->gtCallMethHnd)
{
return false;
}
#ifdef FEATURE_READYTORUN
if (c1->gtEntryPoint.addr != c2->gtEntryPoint.addr)
{
return false;
}
#endif
}
else
{
if (!Compare(c1->gtCallAddr, c2->gtCallAddr))
{
return false;
}
}
if ((c1->gtCallThisArg != nullptr) != (c2->gtCallThisArg != nullptr))
{
return false;
}
if ((c1->gtCallThisArg != nullptr) && !Compare(c1->gtCallThisArg->GetNode(), c2->gtCallThisArg->GetNode()))
{
return false;
}
GenTreeCall::UseIterator i1 = c1->Args().begin();
GenTreeCall::UseIterator end1 = c1->Args().end();
GenTreeCall::UseIterator i2 = c2->Args().begin();
GenTreeCall::UseIterator end2 = c2->Args().end();
for (; (i1 != end1) && (i2 != end2); ++i1, ++i2)
{
if (!Compare(i1->GetNode(), i2->GetNode()))
{
return false;
}
}
if ((i1 != end1) || (i2 != end2))
{
return false;
}
i1 = c1->LateArgs().begin();
end1 = c1->LateArgs().end();
i2 = c2->LateArgs().begin();
end2 = c2->LateArgs().end();
for (; (i1 != end1) && (i2 != end2); ++i1, ++i2)
{
if (!Compare(i1->GetNode(), i2->GetNode()))
{
return false;
}
}
if ((i1 != end1) || (i2 != end2))
{
return false;
}
if (!Compare(c1->gtControlExpr, c2->gtControlExpr))
{
return false;
}
return true;
}
//--------------------------------------------------------------------------
// ResetArgInfo: The argument info needs to be reset so it can be recomputed based on some change
// in conditions, such as changing the return type of a call due to giving up on doing a tailcall.
// If there is no fgArgInfo computed yet for this call, then there is nothing to reset.
//
void GenTreeCall::ResetArgInfo()
{
if (fgArgInfo == nullptr)
{
return;
}
// We would like to just set `fgArgInfo = nullptr`. But `fgInitArgInfo()` not
// only sets up fgArgInfo, it also adds non-standard args to the IR, and we need
// to remove that extra IR so it doesn't get added again.
//
unsigned argNum = 0;
if (gtCallThisArg != nullptr)
{
argNum++;
}
Use** link = >CallArgs;
while ((*link) != nullptr)
{
const fgArgTabEntry* entry = fgArgInfo->GetArgEntry(argNum);
if (entry->isNonStandard() && entry->isNonStandardArgAddedLate())
{
JITDUMP("Removing non-standarg arg %s [%06u] to prepare for re-morphing call [%06u]\n",
getNonStandardArgKindName(entry->nonStandardArgKind), Compiler::dspTreeID((*link)->GetNode()),
gtTreeID);
*link = (*link)->GetNext();
}
else
{
link = &(*link)->NextRef();
}
argNum++;
}
fgArgInfo = nullptr;
}
#if !defined(FEATURE_PUT_STRUCT_ARG_STK)
unsigned GenTreePutArgStk::GetStackByteSize() const
{
return genTypeSize(genActualType(gtOp1->gtType));
}
#endif // !defined(FEATURE_PUT_STRUCT_ARG_STK)
/*****************************************************************************
*
* Returns non-zero if the two trees are identical.
*/
bool GenTree::Compare(GenTree* op1, GenTree* op2, bool swapOK)
{
genTreeOps oper;
unsigned kind;
// printf("tree1:\n"); gtDispTree(op1);
// printf("tree2:\n"); gtDispTree(op2);
AGAIN:
if (op1 == nullptr)
{
return (op2 == nullptr);
}
if (op2 == nullptr)
{
return false;
}
if (op1 == op2)
{
return true;
}
oper = op1->OperGet();
/* The operators must be equal */
if (oper != op2->gtOper)
{
return false;
}
/* The types must be equal */
if (op1->gtType != op2->gtType)
{
return false;
}
/* Overflow must be equal */
if (op1->gtOverflowEx() != op2->gtOverflowEx())
{
return false;
}
/* Sensible flags must be equal */
if ((op1->gtFlags & (GTF_UNSIGNED)) != (op2->gtFlags & (GTF_UNSIGNED)))
{
return false;
}
/* Figure out what kind of nodes we're comparing */
kind = op1->OperKind();
/* Is this a constant node? */
if (op1->OperIsConst())
{
switch (oper)
{
case GT_CNS_INT:
if (op1->AsIntCon()->gtIconVal == op2->AsIntCon()->gtIconVal)
{
return true;
}
break;
case GT_CNS_STR:
if ((op1->AsStrCon()->gtSconCPX == op2->AsStrCon()->gtSconCPX) &&
(op1->AsStrCon()->gtScpHnd == op2->AsStrCon()->gtScpHnd))
{
return true;
}
break;
#if 0
// TODO-CQ: Enable this in the future
case GT_CNS_LNG:
if (op1->AsLngCon()->gtLconVal == op2->AsLngCon()->gtLconVal)
return true;
break;
case GT_CNS_DBL:
if (op1->AsDblCon()->gtDconVal == op2->AsDblCon()->gtDconVal)
return true;
break;
#endif
default:
break;
}
return false;
}
/* Is this a leaf node? */
if (kind & GTK_LEAF)
{
switch (oper)
{
case GT_LCL_VAR:
if (op1->AsLclVarCommon()->GetLclNum() != op2->AsLclVarCommon()->GetLclNum())
{
break;
}
return true;
case GT_LCL_FLD:
if ((op1->AsLclFld()->GetLclNum() != op2->AsLclFld()->GetLclNum()) ||
(op1->AsLclFld()->GetLclOffs() != op2->AsLclFld()->GetLclOffs()))
{
break;
}
return true;
case GT_CLS_VAR:
if (op1->AsClsVar()->gtClsVarHnd != op2->AsClsVar()->gtClsVarHnd)
{
break;
}
return true;
case GT_LABEL:
return true;
case GT_ARGPLACE:
if ((op1->gtType == TYP_STRUCT) &&
(op1->AsArgPlace()->gtArgPlaceClsHnd != op2->AsArgPlace()->gtArgPlaceClsHnd))
{
break;
}
return true;
default:
break;
}
return false;
}
/* Is it a 'simple' unary/binary operator? */
if (kind & GTK_UNOP)
{
if (IsExOp(kind))
{
// ExOp operators extend unary operator with extra, non-GenTree* members. In many cases,
// these should be included in the comparison.
switch (oper)
{
case GT_ARR_LENGTH:
if (op1->AsArrLen()->ArrLenOffset() != op2->AsArrLen()->ArrLenOffset())
{
return false;
}
break;
case GT_CAST:
if (op1->AsCast()->gtCastType != op2->AsCast()->gtCastType)
{
return false;
}
break;
case GT_BLK:
case GT_OBJ:
if (op1->AsBlk()->GetLayout() != op2->AsBlk()->GetLayout())
{
return false;
}
break;
case GT_FIELD:
if (op1->AsField()->gtFldHnd != op2->AsField()->gtFldHnd)
{
return false;
}
break;
// For the ones below no extra argument matters for comparison.
case GT_BOX:
case GT_RUNTIMELOOKUP:
break;
default:
assert(!"unexpected unary ExOp operator");
}
}
return Compare(op1->AsOp()->gtOp1, op2->AsOp()->gtOp1);
}
if (kind & GTK_BINOP)
{
if (IsExOp(kind))
{
// ExOp operators extend unary operator with extra, non-GenTree* members. In many cases,
// these should be included in the hash code.
switch (oper)
{
case GT_INTRINSIC:
if (op1->AsIntrinsic()->gtIntrinsicName != op2->AsIntrinsic()->gtIntrinsicName)
{
return false;
}
break;
case GT_LEA:
if (op1->AsAddrMode()->gtScale != op2->AsAddrMode()->gtScale)
{
return false;
}
if (op1->AsAddrMode()->Offset() != op2->AsAddrMode()->Offset())
{
return false;
}
break;
case GT_BOUNDS_CHECK:
if (op1->AsBoundsChk()->gtThrowKind != op2->AsBoundsChk()->gtThrowKind)
{
return false;
}
break;
case GT_INDEX:
if (op1->AsIndex()->gtIndElemSize != op2->AsIndex()->gtIndElemSize)
{
return false;
}
break;
case GT_INDEX_ADDR:
if (op1->AsIndexAddr()->gtElemSize != op2->AsIndexAddr()->gtElemSize)
{
return false;
}
break;
// For the ones below no extra argument matters for comparison.
case GT_QMARK:
break;
default:
assert(!"unexpected binary ExOp operator");
}
}
if (op1->AsOp()->gtOp2)
{
if (!Compare(op1->AsOp()->gtOp1, op2->AsOp()->gtOp1, swapOK))
{
if (swapOK && OperIsCommutative(oper) &&
((op1->AsOp()->gtOp1->gtFlags | op1->AsOp()->gtOp2->gtFlags | op2->AsOp()->gtOp1->gtFlags |
op2->AsOp()->gtOp2->gtFlags) &
GTF_ALL_EFFECT) == 0)
{
if (Compare(op1->AsOp()->gtOp1, op2->AsOp()->gtOp2, swapOK))
{
op1 = op1->AsOp()->gtOp2;
op2 = op2->AsOp()->gtOp1;
goto AGAIN;
}
}
return false;
}
op1 = op1->AsOp()->gtOp2;
op2 = op2->AsOp()->gtOp2;
goto AGAIN;
}
else
{
op1 = op1->AsOp()->gtOp1;
op2 = op2->AsOp()->gtOp1;
if (!op1)
{
return (op2 == nullptr);
}
if (!op2)
{
return false;
}
goto AGAIN;
}
}
/* See what kind of a special operator we have here */
switch (oper)
{
case GT_CALL:
return GenTreeCall::Equals(op1->AsCall(), op2->AsCall());
#ifdef FEATURE_SIMD
case GT_SIMD:
return GenTreeSIMD::Equals(op1->AsSIMD(), op2->AsSIMD());
#endif // FEATURE_SIMD
#ifdef FEATURE_HW_INTRINSICS
case GT_HWINTRINSIC:
return GenTreeHWIntrinsic::Equals(op1->AsHWIntrinsic(), op2->AsHWIntrinsic());
#endif
case GT_ARR_ELEM:
if (op1->AsArrElem()->gtArrRank != op2->AsArrElem()->gtArrRank)
{
return false;
}
// NOTE: gtArrElemSize may need to be handled
unsigned dim;
for (dim = 0; dim < op1->AsArrElem()->gtArrRank; dim++)
{
if (!Compare(op1->AsArrElem()->gtArrInds[dim], op2->AsArrElem()->gtArrInds[dim]))
{
return false;
}
}
op1 = op1->AsArrElem()->gtArrObj;
op2 = op2->AsArrElem()->gtArrObj;
goto AGAIN;
case GT_ARR_OFFSET:
if (op1->AsArrOffs()->gtCurrDim != op2->AsArrOffs()->gtCurrDim ||
op1->AsArrOffs()->gtArrRank != op2->AsArrOffs()->gtArrRank)
{
return false;
}
return (Compare(op1->AsArrOffs()->gtOffset, op2->AsArrOffs()->gtOffset) &&
Compare(op1->AsArrOffs()->gtIndex, op2->AsArrOffs()->gtIndex) &&
Compare(op1->AsArrOffs()->gtArrObj, op2->AsArrOffs()->gtArrObj));
case GT_PHI:
return GenTreePhi::Equals(op1->AsPhi(), op2->AsPhi());
case GT_FIELD_LIST:
return GenTreeFieldList::Equals(op1->AsFieldList(), op2->AsFieldList());
case GT_CMPXCHG:
return Compare(op1->AsCmpXchg()->gtOpLocation, op2->AsCmpXchg()->gtOpLocation) &&
Compare(op1->AsCmpXchg()->gtOpValue, op2->AsCmpXchg()->gtOpValue) &&
Compare(op1->AsCmpXchg()->gtOpComparand, op2->AsCmpXchg()->gtOpComparand);
case GT_STORE_DYN_BLK:
return Compare(op1->AsStoreDynBlk()->Addr(), op2->AsStoreDynBlk()->Addr()) &&
Compare(op1->AsStoreDynBlk()->Data(), op2->AsStoreDynBlk()->Data()) &&
Compare(op1->AsStoreDynBlk()->gtDynamicSize, op2->AsStoreDynBlk()->gtDynamicSize);
default:
assert(!"unexpected operator");
}
return false;
}
//------------------------------------------------------------------------
// gtHasRef: Find out whether the given tree contains a local/field.
//
// Arguments:
// tree - tree to find the local in
// lclNum - the local's number, *or* the handle for the field
//
// Return Value:
// Whether "tree" has any LCL_VAR/LCL_FLD nodes that refer to the
// local, LHS or RHS, or FIELD nodes with the specified handle.
//
// Notes:
// Does not pay attention to local address nodes.
//
/* static */ bool Compiler::gtHasRef(GenTree* tree, ssize_t lclNum)
{
if (tree == nullptr)
{
return false;
}
if (tree->OperIsLeaf())
{
if (tree->OperIs(GT_LCL_VAR, GT_LCL_FLD) && (tree->AsLclVarCommon()->GetLclNum() == (unsigned)lclNum))
{
return true;
}
if (tree->OperIs(GT_RET_EXPR))
{
return gtHasRef(tree->AsRetExpr()->gtInlineCandidate, lclNum);
}
return false;
}
if (tree->OperIsUnary())
{
// Code in importation (see CEE_STFLD in impImportBlockCode), when
// spilling, can pass us "lclNum" that is actually a field handle...
if (tree->OperIs(GT_FIELD) && (lclNum == (ssize_t)tree->AsField()->gtFldHnd))
{
return true;
}
return gtHasRef(tree->AsUnOp()->gtGetOp1(), lclNum);
}
if (tree->OperIsBinary())
{
return gtHasRef(tree->AsOp()->gtGetOp1(), lclNum) || gtHasRef(tree->AsOp()->gtGetOp2(), lclNum);
}
bool result = false;
tree->VisitOperands([lclNum, &result](GenTree* operand) -> GenTree::VisitResult {
if (gtHasRef(operand, lclNum))
{
result = true;
return GenTree::VisitResult::Abort;
}
return GenTree::VisitResult::Continue;
});
return result;
}
struct AddrTakenDsc
{
Compiler* comp;
bool hasAddrTakenLcl;
};
/* static */
Compiler::fgWalkResult Compiler::gtHasLocalsWithAddrOpCB(GenTree** pTree, fgWalkData* data)
{
GenTree* tree = *pTree;
Compiler* comp = data->compiler;
if (tree->gtOper == GT_LCL_VAR)
{
const LclVarDsc* varDsc = comp->lvaGetDesc(tree->AsLclVarCommon());
if (varDsc->lvHasLdAddrOp || varDsc->IsAddressExposed())
{
((AddrTakenDsc*)data->pCallbackData)->hasAddrTakenLcl = true;
return WALK_ABORT;
}
}
return WALK_CONTINUE;
}
/*****************************************************************************
*
* Return true if this tree contains locals with lvHasLdAddrOp or IsAddressExposed()
* flag(s) set.
*/
bool Compiler::gtHasLocalsWithAddrOp(GenTree* tree)
{
AddrTakenDsc desc;
desc.comp = this;
desc.hasAddrTakenLcl = false;
fgWalkTreePre(&tree, gtHasLocalsWithAddrOpCB, &desc);
return desc.hasAddrTakenLcl;
}
#ifdef DEBUG
/*****************************************************************************
*
* Helper used to compute hash values for trees.
*/
inline unsigned genTreeHashAdd(unsigned old, unsigned add)
{
return (old + old / 2) ^ add;
}
inline unsigned genTreeHashAdd(unsigned old, void* add)
{
return genTreeHashAdd(old, (unsigned)(size_t)add);
}
/*****************************************************************************
*
* Given an arbitrary expression tree, compute a hash value for it.
*/
unsigned Compiler::gtHashValue(GenTree* tree)
{
genTreeOps oper;
unsigned kind;
unsigned hash = 0;
GenTree* temp;
AGAIN:
assert(tree);
/* Figure out what kind of a node we have */
oper = tree->OperGet();
kind = tree->OperKind();
/* Include the operator value in the hash */
hash = genTreeHashAdd(hash, oper);
/* Is this a leaf node? */
if (kind & GTK_LEAF)
{
size_t add;
switch (oper)
{
UINT64 bits;
case GT_LCL_VAR:
add = tree->AsLclVar()->GetLclNum();
break;
case GT_LCL_FLD:
hash = genTreeHashAdd(hash, tree->AsLclFld()->GetLclNum());
add = tree->AsLclFld()->GetLclOffs();
break;
case GT_CNS_INT:
add = tree->AsIntCon()->gtIconVal;
break;
case GT_CNS_LNG:
bits = (UINT64)tree->AsLngCon()->gtLconVal;
#ifdef HOST_64BIT
add = bits;
#else // 32-bit host
add = genTreeHashAdd(uhi32(bits), ulo32(bits));
#endif
break;
case GT_CNS_DBL:
bits = *(UINT64*)(&tree->AsDblCon()->gtDconVal);
#ifdef HOST_64BIT
add = bits;
#else // 32-bit host
add = genTreeHashAdd(uhi32(bits), ulo32(bits));
#endif
break;
case GT_CNS_STR:
add = tree->AsStrCon()->gtSconCPX;
break;
case GT_JMP:
add = tree->AsVal()->gtVal1;
break;
default:
add = 0;
break;
}
// clang-format off
// narrow 'add' into a 32-bit 'val'
unsigned val;
#ifdef HOST_64BIT
val = genTreeHashAdd(uhi32(add), ulo32(add));
#else // 32-bit host
val = add;
#endif
// clang-format on
hash = genTreeHashAdd(hash, val);
goto DONE;
}
/* Is it a 'simple' unary/binary operator? */
GenTree* op1;
if (kind & GTK_UNOP)
{
op1 = tree->AsOp()->gtOp1;
/* Special case: no sub-operand at all */
if (GenTree::IsExOp(kind))
{
// ExOp operators extend operators with extra, non-GenTree* members. In many cases,
// these should be included in the hash code.
switch (oper)
{
case GT_ARR_LENGTH:
hash += tree->AsArrLen()->ArrLenOffset();
break;
case GT_CAST:
hash ^= tree->AsCast()->gtCastType;
break;
case GT_INDEX:
hash += tree->AsIndex()->gtIndElemSize;
break;
case GT_INDEX_ADDR:
hash += tree->AsIndexAddr()->gtElemSize;
break;
case GT_ALLOCOBJ:
hash = genTreeHashAdd(hash, static_cast<unsigned>(
reinterpret_cast<uintptr_t>(tree->AsAllocObj()->gtAllocObjClsHnd)));
hash = genTreeHashAdd(hash, tree->AsAllocObj()->gtNewHelper);
break;
case GT_RUNTIMELOOKUP:
hash = genTreeHashAdd(hash, static_cast<unsigned>(
reinterpret_cast<uintptr_t>(tree->AsRuntimeLookup()->gtHnd)));
break;
case GT_BLK:
case GT_OBJ:
hash =
genTreeHashAdd(hash,
static_cast<unsigned>(reinterpret_cast<uintptr_t>(tree->AsBlk()->GetLayout())));
break;
case GT_FIELD:
hash = genTreeHashAdd(hash, tree->AsField()->gtFldHnd);
break;
// For the ones below no extra argument matters for comparison.
case GT_BOX:
break;
default:
assert(!"unexpected unary ExOp operator");
}
}
if (!op1)
{
goto DONE;
}
tree = op1;
goto AGAIN;
}
if (kind & GTK_BINOP)
{
if (GenTree::IsExOp(kind))
{
// ExOp operators extend operators with extra, non-GenTree* members. In many cases,
// these should be included in the hash code.
switch (oper)
{
case GT_INTRINSIC:
hash += tree->AsIntrinsic()->gtIntrinsicName;
break;
case GT_LEA:
hash += static_cast<unsigned>(tree->AsAddrMode()->Offset() << 3) + tree->AsAddrMode()->gtScale;
break;
case GT_BOUNDS_CHECK:
hash = genTreeHashAdd(hash, tree->AsBoundsChk()->gtThrowKind);
break;
case GT_STORE_BLK:
case GT_STORE_OBJ:
hash ^= PtrToUlong(tree->AsBlk()->GetLayout());
break;
// For the ones below no extra argument matters for comparison.
case GT_ARR_INDEX:
case GT_QMARK:
case GT_INDEX:
case GT_INDEX_ADDR:
break;
#ifdef FEATURE_SIMD
case GT_SIMD:
hash += tree->AsSIMD()->GetSIMDIntrinsicId();
hash += tree->AsSIMD()->GetSimdBaseType();
hash += tree->AsSIMD()->GetSimdSize();
break;
#endif // FEATURE_SIMD
#ifdef FEATURE_HW_INTRINSICS
case GT_HWINTRINSIC:
hash += tree->AsHWIntrinsic()->GetHWIntrinsicId();
hash += tree->AsHWIntrinsic()->GetSimdBaseType();
hash += tree->AsHWIntrinsic()->GetSimdSize();
hash += tree->AsHWIntrinsic()->GetAuxiliaryType();
hash += tree->AsHWIntrinsic()->GetOtherReg();
break;
#endif // FEATURE_HW_INTRINSICS
default:
assert(!"unexpected binary ExOp operator");
}
}
op1 = tree->AsOp()->gtOp1;
GenTree* op2 = tree->AsOp()->gtOp2;
/* Is there a second sub-operand? */
if (!op2)
{
/* Special case: no sub-operands at all */
if (!op1)
{
goto DONE;
}
/* This is a unary operator */
tree = op1;
goto AGAIN;
}
/* This is a binary operator */
unsigned hsh1 = gtHashValue(op1);
/* Add op1's hash to the running value and continue with op2 */
hash = genTreeHashAdd(hash, hsh1);
tree = op2;
goto AGAIN;
}
/* See what kind of a special operator we have here */
switch (tree->gtOper)
{
case GT_ARR_ELEM:
hash = genTreeHashAdd(hash, gtHashValue(tree->AsArrElem()->gtArrObj));
unsigned dim;
for (dim = 0; dim < tree->AsArrElem()->gtArrRank; dim++)
{
hash = genTreeHashAdd(hash, gtHashValue(tree->AsArrElem()->gtArrInds[dim]));
}
break;
case GT_ARR_OFFSET:
hash = genTreeHashAdd(hash, gtHashValue(tree->AsArrOffs()->gtOffset));
hash = genTreeHashAdd(hash, gtHashValue(tree->AsArrOffs()->gtIndex));
hash = genTreeHashAdd(hash, gtHashValue(tree->AsArrOffs()->gtArrObj));
break;
case GT_CALL:
if ((tree->AsCall()->gtCallThisArg != nullptr) && !tree->AsCall()->gtCallThisArg->GetNode()->OperIs(GT_NOP))
{
hash = genTreeHashAdd(hash, gtHashValue(tree->AsCall()->gtCallThisArg->GetNode()));
}
for (GenTreeCall::Use& use : tree->AsCall()->Args())
{
hash = genTreeHashAdd(hash, gtHashValue(use.GetNode()));
}
if (tree->AsCall()->gtCallType == CT_INDIRECT)
{
temp = tree->AsCall()->gtCallAddr;
assert(temp);
hash = genTreeHashAdd(hash, gtHashValue(temp));
}
else
{
hash = genTreeHashAdd(hash, tree->AsCall()->gtCallMethHnd);
}
for (GenTreeCall::Use& use : tree->AsCall()->LateArgs())
{
hash = genTreeHashAdd(hash, gtHashValue(use.GetNode()));
}
break;
#if defined(FEATURE_SIMD) || defined(FEATURE_HW_INTRINSICS)
#if defined(FEATURE_SIMD)
case GT_SIMD:
#endif
#if defined(FEATURE_HW_INTRINSICS)
case GT_HWINTRINSIC:
#endif
// TODO-List: rewrite with a general visitor / iterator?
for (GenTree* operand : tree->AsMultiOp()->Operands())
{
hash = genTreeHashAdd(hash, gtHashValue(operand));
}
break;
#endif // defined(FEATURE_SIMD) || defined(FEATURE_HW_INTRINSICS)
case GT_PHI:
for (GenTreePhi::Use& use : tree->AsPhi()->Uses())
{
hash = genTreeHashAdd(hash, gtHashValue(use.GetNode()));
}
break;
case GT_FIELD_LIST:
for (GenTreeFieldList::Use& use : tree->AsFieldList()->Uses())
{
hash = genTreeHashAdd(hash, gtHashValue(use.GetNode()));
}
break;
case GT_CMPXCHG:
hash = genTreeHashAdd(hash, gtHashValue(tree->AsCmpXchg()->gtOpLocation));
hash = genTreeHashAdd(hash, gtHashValue(tree->AsCmpXchg()->gtOpValue));
hash = genTreeHashAdd(hash, gtHashValue(tree->AsCmpXchg()->gtOpComparand));
break;
case GT_STORE_DYN_BLK:
hash = genTreeHashAdd(hash, gtHashValue(tree->AsStoreDynBlk()->Data()));
hash = genTreeHashAdd(hash, gtHashValue(tree->AsStoreDynBlk()->Addr()));
hash = genTreeHashAdd(hash, gtHashValue(tree->AsStoreDynBlk()->gtDynamicSize));
break;
default:
#ifdef DEBUG
gtDispTree(tree);
#endif
assert(!"unexpected operator");
break;
}
DONE:
return hash;
}
#endif // DEBUG
/*****************************************************************************
*
* Return a relational operator that is the reverse of the given one.
*/
/* static */
genTreeOps GenTree::ReverseRelop(genTreeOps relop)
{
static const genTreeOps reverseOps[] = {
GT_NE, // GT_EQ
GT_EQ, // GT_NE
GT_GE, // GT_LT
GT_GT, // GT_LE
GT_LT, // GT_GE
GT_LE, // GT_GT
GT_TEST_NE, // GT_TEST_EQ
GT_TEST_EQ, // GT_TEST_NE
};
assert(reverseOps[GT_EQ - GT_EQ] == GT_NE);
assert(reverseOps[GT_NE - GT_EQ] == GT_EQ);
assert(reverseOps[GT_LT - GT_EQ] == GT_GE);
assert(reverseOps[GT_LE - GT_EQ] == GT_GT);
assert(reverseOps[GT_GE - GT_EQ] == GT_LT);
assert(reverseOps[GT_GT - GT_EQ] == GT_LE);
assert(reverseOps[GT_TEST_EQ - GT_EQ] == GT_TEST_NE);
assert(reverseOps[GT_TEST_NE - GT_EQ] == GT_TEST_EQ);
assert(OperIsCompare(relop));
assert(relop >= GT_EQ && (unsigned)(relop - GT_EQ) < sizeof(reverseOps));
return reverseOps[relop - GT_EQ];
}
/*****************************************************************************
*
* Return a relational operator that will work for swapped operands.
*/
/* static */
genTreeOps GenTree::SwapRelop(genTreeOps relop)
{
static const genTreeOps swapOps[] = {
GT_EQ, // GT_EQ
GT_NE, // GT_NE
GT_GT, // GT_LT
GT_GE, // GT_LE
GT_LE, // GT_GE
GT_LT, // GT_GT
GT_TEST_EQ, // GT_TEST_EQ
GT_TEST_NE, // GT_TEST_NE
};
assert(swapOps[GT_EQ - GT_EQ] == GT_EQ);
assert(swapOps[GT_NE - GT_EQ] == GT_NE);
assert(swapOps[GT_LT - GT_EQ] == GT_GT);
assert(swapOps[GT_LE - GT_EQ] == GT_GE);
assert(swapOps[GT_GE - GT_EQ] == GT_LE);
assert(swapOps[GT_GT - GT_EQ] == GT_LT);
assert(swapOps[GT_TEST_EQ - GT_EQ] == GT_TEST_EQ);
assert(swapOps[GT_TEST_NE - GT_EQ] == GT_TEST_NE);
assert(OperIsCompare(relop));
assert(relop >= GT_EQ && (unsigned)(relop - GT_EQ) < sizeof(swapOps));
return swapOps[relop - GT_EQ];
}
/*****************************************************************************
*
* Reverse the meaning of the given test condition.
*/
GenTree* Compiler::gtReverseCond(GenTree* tree)
{
if (tree->OperIsCompare())
{
tree->SetOper(GenTree::ReverseRelop(tree->OperGet()));
// Flip the GTF_RELOP_NAN_UN bit
// a ord b === (a != NaN && b != NaN)
// a unord b === (a == NaN || b == NaN)
// => !(a ord b) === (a unord b)
if (varTypeIsFloating(tree->AsOp()->gtOp1->TypeGet()))
{
tree->gtFlags ^= GTF_RELOP_NAN_UN;
}
}
else if (tree->OperIs(GT_JCC, GT_SETCC))
{
GenTreeCC* cc = tree->AsCC();
cc->gtCondition = GenCondition::Reverse(cc->gtCondition);
}
else if (tree->OperIs(GT_JCMP))
{
// Flip the GTF_JCMP_EQ
//
// This causes switching
// cbz <=> cbnz
// tbz <=> tbnz
tree->gtFlags ^= GTF_JCMP_EQ;
}
else
{
tree = gtNewOperNode(GT_NOT, TYP_INT, tree);
}
return tree;
}
#if !defined(TARGET_64BIT) || defined(TARGET_ARM64)
//------------------------------------------------------------------------------
// IsValidLongMul : Check for long multiplication with 32 bit operands.
//
// Recognizes the following tree: MUL(CAST(long <- int), CAST(long <- int) or CONST),
// where CONST must be an integer constant that fits in 32 bits. Will try to detect
// cases when the multiplication cannot overflow and return "true" for them.
//
// This function does not change the state of the tree and is usable in LIR.
//
// Return Value:
// Whether this GT_MUL tree is a valid long multiplication candidate.
//
bool GenTreeOp::IsValidLongMul()
{
assert(OperIs(GT_MUL));
GenTree* op1 = gtGetOp1();
GenTree* op2 = gtGetOp2();
if (!TypeIs(TYP_LONG))
{
return false;
}
assert(op1->TypeIs(TYP_LONG));
assert(op2->TypeIs(TYP_LONG));
if (!(op1->OperIs(GT_CAST) && genActualTypeIsInt(op1->AsCast()->CastOp())))
{
return false;
}
if (!(op2->OperIs(GT_CAST) && genActualTypeIsInt(op2->AsCast()->CastOp())) &&
!(op2->IsIntegralConst() && FitsIn<int32_t>(op2->AsIntConCommon()->IntegralValue())))
{
return false;
}
if (op1->gtOverflow() || op2->gtOverflowEx())
{
return false;
}
if (gtOverflow())
{
auto getMaxValue = [this](GenTree* op) -> int64_t {
if (op->OperIs(GT_CAST))
{
if (op->IsUnsigned())
{
switch (op->AsCast()->CastOp()->TypeGet())
{
case TYP_UBYTE:
return UINT8_MAX;
case TYP_USHORT:
return UINT16_MAX;
default:
return UINT32_MAX;
}
}
return IsUnsigned() ? static_cast<int64_t>(UINT64_MAX) : INT32_MIN;
}
return op->AsIntConCommon()->IntegralValue();
};
int64_t maxOp1 = getMaxValue(op1);
int64_t maxOp2 = getMaxValue(op2);
if (CheckedOps::MulOverflows(maxOp1, maxOp2, IsUnsigned()))
{
return false;
}
}
// Both operands must extend the same way.
bool op1ZeroExtends = op1->IsUnsigned();
bool op2ZeroExtends = op2->OperIs(GT_CAST) ? op2->IsUnsigned() : op2->AsIntConCommon()->IntegralValue() >= 0;
bool op2AnyExtensionIsSuitable = op2->IsIntegralConst() && op2ZeroExtends;
if ((op1ZeroExtends != op2ZeroExtends) && !op2AnyExtensionIsSuitable)
{
return false;
}
return true;
}
#if !defined(TARGET_64BIT) && defined(DEBUG)
//------------------------------------------------------------------------------
// DebugCheckLongMul : Checks that a GTF_MUL_64RSLT tree is a valid MUL_LONG.
//
// Notes:
// This function is defined for 32 bit targets only because we *must* maintain
// the MUL_LONG-compatible tree shape throughout the compilation from morph to
// decomposition, since we do not have (great) ability to create new calls in LIR.
//
// It is for this reason that we recognize MUL_LONGs early in morph, mark them with
// a flag and then pessimize various places (e. g. assertion propagation) to not look
// at them. In contrast, on ARM64 we recognize MUL_LONGs late, in lowering, and thus
// do not need this function.
//
void GenTreeOp::DebugCheckLongMul()
{
assert(OperIs(GT_MUL));
assert(Is64RsltMul());
assert(TypeIs(TYP_LONG));
assert(!gtOverflow());
GenTree* op1 = gtGetOp1();
GenTree* op2 = gtGetOp2();
assert(op1->TypeIs(TYP_LONG));
assert(op2->TypeIs(TYP_LONG));
// op1 has to be CAST(long <- int)
assert(op1->OperIs(GT_CAST) && genActualTypeIsInt(op1->AsCast()->CastOp()));
assert(!op1->gtOverflow());
// op2 has to be CAST(long <- int) or a suitably small constant.
assert((op2->OperIs(GT_CAST) && genActualTypeIsInt(op2->AsCast()->CastOp())) ||
(op2->IsIntegralConst() && FitsIn<int32_t>(op2->AsIntConCommon()->IntegralValue())));
assert(!op2->gtOverflowEx());
// Both operands must extend the same way.
bool op1ZeroExtends = op1->IsUnsigned();
bool op2ZeroExtends = op2->OperIs(GT_CAST) ? op2->IsUnsigned() : op2->AsIntConCommon()->IntegralValue() >= 0;
bool op2AnyExtensionIsSuitable = op2->IsIntegralConst() && op2ZeroExtends;
assert((op1ZeroExtends == op2ZeroExtends) || op2AnyExtensionIsSuitable);
// Do unsigned mul iff both operands are zero-extending.
assert(op1->IsUnsigned() == IsUnsigned());
}
#endif // !defined(TARGET_64BIT) && defined(DEBUG)
#endif // !defined(TARGET_64BIT) || defined(TARGET_ARM64)
unsigned Compiler::gtSetCallArgsOrder(const GenTreeCall::UseList& args, bool lateArgs, int* callCostEx, int* callCostSz)
{
unsigned level = 0;
unsigned costEx = 0;
unsigned costSz = 0;
for (GenTreeCall::Use& use : args)
{
GenTree* argNode = use.GetNode();
unsigned argLevel = gtSetEvalOrder(argNode);
if (argLevel > level)
{
level = argLevel;
}
if (argNode->GetCostEx() != 0)
{
costEx += argNode->GetCostEx();
costEx += lateArgs ? 0 : IND_COST_EX;
}
if (argNode->GetCostSz() != 0)
{
costSz += argNode->GetCostSz();
#ifdef TARGET_XARCH
if (lateArgs) // push is smaller than mov to reg
#endif
{
costSz += 1;
}
}
}
*callCostEx += costEx;
*callCostSz += costSz;
return level;
}
#if defined(FEATURE_SIMD) || defined(FEATURE_HW_INTRINSICS)
//------------------------------------------------------------------------
// gtSetMultiOpOrder: Calculate the costs for a MultiOp.
//
// Currently this function just preserves the previous behavior.
// TODO-List-Cleanup: implement proper costing for these trees.
//
// Arguments:
// multiOp - The MultiOp tree in question
//
// Return Value:
// The Sethi "complexity" for this tree (the idealized number of
// registers needed to evaluate it).
//
unsigned Compiler::gtSetMultiOpOrder(GenTreeMultiOp* multiOp)
{
// These default costs preserve previous behavior.
// TODO-CQ: investigate opportunities for tuning them.
int costEx = 1;
int costSz = 1;
unsigned level = 0;
unsigned lvl2 = 0;
#if defined(FEATURE_HW_INTRINSICS)
if (multiOp->OperIs(GT_HWINTRINSIC))
{
GenTreeHWIntrinsic* hwTree = multiOp->AsHWIntrinsic();
#if defined(TARGET_XARCH)
if ((hwTree->GetOperandCount() == 1) && hwTree->OperIsMemoryLoadOrStore())
{
costEx = IND_COST_EX;
costSz = 2;
GenTree* const addrNode = hwTree->Op(1);
level = gtSetEvalOrder(addrNode);
GenTree* const addr = addrNode->gtEffectiveVal();
// See if we can form a complex addressing mode.
if (addr->OperIs(GT_ADD) && gtMarkAddrMode(addr, &costEx, &costSz, hwTree->TypeGet()))
{
// Nothing to do, costs have been set.
}
else
{
costEx += addr->GetCostEx();
costSz += addr->GetCostSz();
}
hwTree->SetCosts(costEx, costSz);
return level;
}
#endif
switch (hwTree->GetHWIntrinsicId())
{
#if defined(TARGET_XARCH)
case NI_Vector128_Create:
case NI_Vector256_Create:
#elif defined(TARGET_ARM64)
case NI_Vector64_Create:
case NI_Vector128_Create:
#endif
{
if ((hwTree->GetOperandCount() == 1) && hwTree->Op(1)->OperIsConst())
{
// Vector.Create(cns) is cheap but not that cheap to be (1,1)
costEx = IND_COST_EX;
costSz = 2;
level = gtSetEvalOrder(hwTree->Op(1));
hwTree->SetCosts(costEx, costSz);
return level;
}
break;
}
default:
break;
}
}
#endif // defined(FEATURE_SIMD) || defined(FEATURE_HW_INTRINSICS)
// This code is here to preserve previous behavior.
switch (multiOp->GetOperandCount())
{
case 0:
// This is a constant HWIntrinsic, we already have correct costs.
break;
case 1:
// A "unary" case.
level = gtSetEvalOrder(multiOp->Op(1));
costEx += multiOp->Op(1)->GetCostEx();
costSz += multiOp->Op(1)->GetCostSz();
break;
case 2:
// A "binary" case.
// This way we have "level" be the complexity of the
// first tree to be evaluated, and "lvl2" - the second.
if (multiOp->IsReverseOp())
{
level = gtSetEvalOrder(multiOp->Op(2));
lvl2 = gtSetEvalOrder(multiOp->Op(1));
}
else
{
level = gtSetEvalOrder(multiOp->Op(1));
lvl2 = gtSetEvalOrder(multiOp->Op(2));
}
// We want the more complex tree to be evaluated first.
if (level < lvl2)
{
bool canSwap = multiOp->IsReverseOp() ? gtCanSwapOrder(multiOp->Op(2), multiOp->Op(1))
: gtCanSwapOrder(multiOp->Op(1), multiOp->Op(2));
if (canSwap)
{
if (multiOp->IsReverseOp())
{
multiOp->ClearReverseOp();
}
else
{
multiOp->SetReverseOp();
}
std::swap(level, lvl2);
}
}
if (level < 1)
{
level = lvl2;
}
else if (level == lvl2)
{
level += 1;
}
costEx += (multiOp->Op(1)->GetCostEx() + multiOp->Op(2)->GetCostEx());
costSz += (multiOp->Op(1)->GetCostSz() + multiOp->Op(2)->GetCostSz());
break;
default:
// The former "ArgList" case... we'll be emulating it here.
// The old implementation pushed the nodes on the list, in pre-order.
// Then it popped and costed them in "reverse order", so that's what
// we'll be doing here as well.
unsigned nxtlvl = 0;
for (size_t i = multiOp->GetOperandCount(); i >= 1; i--)
{
GenTree* op = multiOp->Op(i);
unsigned lvl = gtSetEvalOrder(op);
if (lvl < 1)
{
level = nxtlvl;
}
else if (lvl == nxtlvl)
{
level = lvl + 1;
}
else
{
level = lvl;
}
costEx += op->GetCostEx();
costSz += op->GetCostSz();
// Preserving previous behavior...
CLANG_FORMAT_COMMENT_ANCHOR;
#ifndef TARGET_XARCH
if (op->GetCostSz() != 0)
{
costSz += 1;
}
#endif
nxtlvl = level;
}
break;
}
multiOp->SetCosts(costEx, costSz);
return level;
}
#endif
//-----------------------------------------------------------------------------
// gtWalkOp: Traverse and mark an address expression
//
// Arguments:
// op1WB - An out parameter which is either the address expression, or one
// of its operands.
// op2WB - An out parameter which starts as either null or one of the operands
// of the address expression.
// base - The base address of the addressing mode, or null if 'constOnly' is false
// constOnly - True if we will only traverse into ADDs with constant op2.
//
// This routine is a helper routine for gtSetEvalOrder() and is used to identify the
// base and index nodes, which will be validated against those identified by
// genCreateAddrMode().
// It also marks the ADD nodes involved in the address expression with the
// GTF_ADDRMODE_NO_CSE flag which prevents them from being considered for CSE's.
//
// Its two output parameters are modified under the following conditions:
//
// It is called once with the original address expression as 'op1WB', and
// with 'constOnly' set to false. On this first invocation, *op1WB is always
// an ADD node, and it will consider the operands of the ADD even if its op2 is
// not a constant. However, when it encounters a non-constant or the base in the
// op2 position, it stops iterating. That operand is returned in the 'op2WB' out
// parameter, and will be considered on the third invocation of this method if
// it is an ADD.
//
// It is called the second time with the two operands of the original expression, in
// the original order, and the third time in reverse order. For these invocations
// 'constOnly' is true, so it will only traverse cascaded ADD nodes if they have a
// constant op2.
//
// The result, after three invocations, is that the values of the two out parameters
// correspond to the base and index in some fashion. This method doesn't attempt
// to determine or validate the scale or offset, if any.
//
// Assumptions (presumed to be ensured by genCreateAddrMode()):
// If an ADD has a constant operand, it is in the op2 position.
//
// Notes:
// This method, and its invocation sequence, are quite confusing, and since they
// were not originally well-documented, this specification is a possibly-imperfect
// reconstruction.
// The motivation for the handling of the NOP case is unclear.
// Note that 'op2WB' is only modified in the initial (!constOnly) case,
// or if a NOP is encountered in the op1 position.
//
void Compiler::gtWalkOp(GenTree** op1WB, GenTree** op2WB, GenTree* base, bool constOnly)
{
GenTree* op1 = *op1WB;
GenTree* op2 = *op2WB;
op1 = op1->gtEffectiveVal();
// Now we look for op1's with non-overflow GT_ADDs [of constants]
while ((op1->gtOper == GT_ADD) && (!op1->gtOverflow()) && (!constOnly || (op1->AsOp()->gtOp2->IsCnsIntOrI())))
{
// mark it with GTF_ADDRMODE_NO_CSE
op1->gtFlags |= GTF_ADDRMODE_NO_CSE;
if (!constOnly)
{
op2 = op1->AsOp()->gtOp2;
}
op1 = op1->AsOp()->gtOp1;
// If op1 is a GT_NOP then swap op1 and op2.
// (Why? Also, presumably op2 is not a GT_NOP in this case?)
if (op1->gtOper == GT_NOP)
{
GenTree* tmp;
tmp = op1;
op1 = op2;
op2 = tmp;
}
if (!constOnly && ((op2 == base) || (!op2->IsCnsIntOrI())))
{
break;
}
op1 = op1->gtEffectiveVal();
}
*op1WB = op1;
*op2WB = op2;
}
#ifdef DEBUG
/*****************************************************************************
* This is a workaround. It is to help implement an assert in gtSetEvalOrder() that the values
* gtWalkOp() leaves in op1 and op2 correspond with the values of adr, idx, mul, and cns
* that are returned by genCreateAddrMode(). It's essentially impossible to determine
* what gtWalkOp() *should* return for all possible trees. This simply loosens one assert
* to handle the following case:
indir int
const(h) int 4 field
+ byref
lclVar byref V00 this <-- op2
comma byref <-- adr (base)
indir byte
lclVar byref V00 this
+ byref
const int 2 <-- mul == 4
<< int <-- op1
lclVar int V01 arg1 <-- idx
* Here, we are planning to generate the address mode [edx+4*eax], where eax = idx and edx = the GT_COMMA expression.
* To check adr equivalence with op2, we need to walk down the GT_ADD tree just like gtWalkOp() does.
*/
GenTree* Compiler::gtWalkOpEffectiveVal(GenTree* op)
{
for (;;)
{
op = op->gtEffectiveVal();
if ((op->gtOper != GT_ADD) || op->gtOverflow() || !op->AsOp()->gtOp2->IsCnsIntOrI())
{
break;
}
op = op->AsOp()->gtOp1;
}
return op;
}
#endif // DEBUG
/*****************************************************************************
*
* Given a tree, set the GetCostEx and GetCostSz() fields which
* are used to measure the relative costs of the codegen of the tree
*
*/
void Compiler::gtPrepareCost(GenTree* tree)
{
gtSetEvalOrder(tree);
}
bool Compiler::gtIsLikelyRegVar(GenTree* tree)
{
if (tree->gtOper != GT_LCL_VAR)
{
return false;
}
const LclVarDsc* varDsc = lvaGetDesc(tree->AsLclVar());
if (varDsc->lvDoNotEnregister)
{
return false;
}
// If this is an EH-live var, return false if it is a def,
// as it will have to go to memory.
if (varDsc->lvLiveInOutOfHndlr && ((tree->gtFlags & GTF_VAR_DEF) != 0))
{
return false;
}
// Be pessimistic if ref counts are not yet set up.
//
// Perhaps we should be optimistic though.
// See notes in GitHub issue 18969.
if (!lvaLocalVarRefCounted())
{
return false;
}
if (varDsc->lvRefCntWtd() < (BB_UNITY_WEIGHT * 3))
{
return false;
}
#ifdef TARGET_X86
if (varTypeUsesFloatReg(tree->TypeGet()))
return false;
if (varTypeIsLong(tree->TypeGet()))
return false;
#endif
return true;
}
//------------------------------------------------------------------------
// gtCanSwapOrder: Returns true iff the secondNode can be swapped with firstNode.
//
// Arguments:
// firstNode - An operand of a tree that can have GTF_REVERSE_OPS set.
// secondNode - The other operand of the tree.
//
// Return Value:
// Returns a boolean indicating whether it is safe to reverse the execution
// order of the two trees, considering any exception, global effects, or
// ordering constraints.
//
bool Compiler::gtCanSwapOrder(GenTree* firstNode, GenTree* secondNode)
{
// Relative of order of global / side effects can't be swapped.
bool canSwap = true;
if (optValnumCSE_phase)
{
canSwap = optCSE_canSwap(firstNode, secondNode);
}
// We cannot swap in the presence of special side effects such as GT_CATCH_ARG.
if (canSwap && (firstNode->gtFlags & GTF_ORDER_SIDEEFF))
{
canSwap = false;
}
// When strict side effect order is disabled we allow GTF_REVERSE_OPS to be set
// when one or both sides contains a GTF_CALL or GTF_EXCEPT.
// Currently only the C and C++ languages allow non strict side effect order.
unsigned strictEffects = GTF_GLOB_EFFECT;
if (canSwap && (firstNode->gtFlags & strictEffects))
{
// op1 has side efects that can't be reordered.
// Check for some special cases where we still may be able to swap.
if (secondNode->gtFlags & strictEffects)
{
// op2 has also has non reorderable side effects - can't swap.
canSwap = false;
}
else
{
// No side effects in op2 - we can swap iff op1 has no way of modifying op2,
// i.e. through byref assignments or calls or op2 is a constant.
if (firstNode->gtFlags & strictEffects & GTF_PERSISTENT_SIDE_EFFECTS)
{
// We have to be conservative - can swap iff op2 is constant.
if (!secondNode->IsInvariant())
{
canSwap = false;
}
}
}
}
return canSwap;
}
//------------------------------------------------------------------------
// Given an address expression, compute its costs and addressing mode opportunities,
// and mark addressing mode candidates as GTF_DONT_CSE.
//
// Arguments:
// addr - The address expression
// costEx - The execution cost of this address expression (in/out arg to be updated)
// costEx - The size cost of this address expression (in/out arg to be updated)
// type - The type of the value being referenced by the parent of this address expression.
//
// Return Value:
// Returns true if it finds an addressing mode.
//
// Notes:
// TODO-Throughput - Consider actually instantiating these early, to avoid
// having to re-run the algorithm that looks for them (might also improve CQ).
//
bool Compiler::gtMarkAddrMode(GenTree* addr, int* pCostEx, int* pCostSz, var_types type)
{
// These are "out" parameters on the call to genCreateAddrMode():
bool rev; // This will be true if the operands will need to be reversed. At this point we
// don't care about this because we're not yet instantiating this addressing mode.
unsigned mul; // This is the index (scale) value for the addressing mode
ssize_t cns; // This is the constant offset
GenTree* base; // This is the base of the address.
GenTree* idx; // This is the index.
if (codeGen->genCreateAddrMode(addr, false /*fold*/, &rev, &base, &idx, &mul, &cns))
{
#ifdef TARGET_ARMARCH
// Multiplier should be a "natural-scale" power of two number which is equal to target's width.
//
// *(ulong*)(data + index * 8); - can be optimized
// *(ulong*)(data + index * 7); - can not be optimized
// *(int*)(data + index * 2); - can not be optimized
//
if ((mul > 0) && (genTypeSize(type) != mul))
{
return false;
}
#endif
// We can form a complex addressing mode, so mark each of the interior
// nodes with GTF_ADDRMODE_NO_CSE and calculate a more accurate cost.
addr->gtFlags |= GTF_ADDRMODE_NO_CSE;
#ifdef TARGET_XARCH
// addrmodeCount is the count of items that we used to form
// an addressing mode. The maximum value is 4 when we have
// all of these: { base, idx, cns, mul }
//
unsigned addrmodeCount = 0;
if (base)
{
*pCostEx += base->GetCostEx();
*pCostSz += base->GetCostSz();
addrmodeCount++;
}
if (idx)
{
*pCostEx += idx->GetCostEx();
*pCostSz += idx->GetCostSz();
addrmodeCount++;
}
if (cns)
{
if (((signed char)cns) == ((int)cns))
{
*pCostSz += 1;
}
else
{
*pCostSz += 4;
}
addrmodeCount++;
}
if (mul)
{
addrmodeCount++;
}
// When we form a complex addressing mode we can reduced the costs
// associated with the interior GT_ADD and GT_LSH nodes:
//
// GT_ADD -- reduce this interior GT_ADD by (-3,-3)
// / \ --
// GT_ADD 'cns' -- reduce this interior GT_ADD by (-2,-2)
// / \ --
// 'base' GT_LSL -- reduce this interior GT_LSL by (-1,-1)
// / \ --
// 'idx' 'mul'
//
if (addrmodeCount > 1)
{
// The number of interior GT_ADD and GT_LSL will always be one less than addrmodeCount
//
addrmodeCount--;
GenTree* tmp = addr;
while (addrmodeCount > 0)
{
// decrement the gtCosts for the interior GT_ADD or GT_LSH node by the remaining
// addrmodeCount
tmp->SetCosts(tmp->GetCostEx() - addrmodeCount, tmp->GetCostSz() - addrmodeCount);
addrmodeCount--;
if (addrmodeCount > 0)
{
GenTree* tmpOp1 = tmp->AsOp()->gtOp1;
GenTree* tmpOp2 = tmp->gtGetOp2();
assert(tmpOp2 != nullptr);
if ((tmpOp1 != base) && (tmpOp1->OperGet() == GT_ADD))
{
tmp = tmpOp1;
}
else if (tmpOp2->OperGet() == GT_LSH)
{
tmp = tmpOp2;
}
else if (tmpOp1->OperGet() == GT_LSH)
{
tmp = tmpOp1;
}
else if (tmpOp2->OperGet() == GT_ADD)
{
tmp = tmpOp2;
}
else
{
// We can very rarely encounter a tree that has a GT_COMMA node
// that is difficult to walk, so we just early out without decrementing.
addrmodeCount = 0;
}
}
}
}
#elif defined TARGET_ARM
if (base)
{
*pCostEx += base->GetCostEx();
*pCostSz += base->GetCostSz();
if ((base->gtOper == GT_LCL_VAR) && ((idx == NULL) || (cns == 0)))
{
*pCostSz -= 1;
}
}
if (idx)
{
*pCostEx += idx->GetCostEx();
*pCostSz += idx->GetCostSz();
if (mul > 0)
{
*pCostSz += 2;
}
}
if (cns)
{
if (cns >= 128) // small offsets fits into a 16-bit instruction
{
if (cns < 4096) // medium offsets require a 32-bit instruction
{
if (!varTypeIsFloating(type))
{
*pCostSz += 2;
}
}
else
{
*pCostEx += 2; // Very large offsets require movw/movt instructions
*pCostSz += 8;
}
}
}
#elif defined TARGET_ARM64
if (base)
{
*pCostEx += base->GetCostEx();
*pCostSz += base->GetCostSz();
}
if (idx)
{
*pCostEx += idx->GetCostEx();
*pCostSz += idx->GetCostSz();
}
if (cns != 0)
{
if (cns >= (4096 * genTypeSize(type)))
{
*pCostEx += 1;
*pCostSz += 4;
}
}
#else
#error "Unknown TARGET"
#endif
assert(addr->gtOper == GT_ADD);
assert(!addr->gtOverflow());
assert(mul != 1);
// If we have an addressing mode, we have one of:
// [base + cns]
// [ idx * mul ] // mul >= 2, else we would use base instead of idx
// [ idx * mul + cns] // mul >= 2, else we would use base instead of idx
// [base + idx * mul ] // mul can be 0, 2, 4, or 8
// [base + idx * mul + cns] // mul can be 0, 2, 4, or 8
// Note that mul == 0 is semantically equivalent to mul == 1.
// Note that cns can be zero.
CLANG_FORMAT_COMMENT_ANCHOR;
assert((base != nullptr) || (idx != nullptr && mul >= 2));
INDEBUG(GenTree* op1Save = addr);
// Walk 'addr' identifying non-overflow ADDs that will be part of the address mode.
// Note that we will be modifying 'op1' and 'op2' so that eventually they should
// map to the base and index.
GenTree* op1 = addr;
GenTree* op2 = nullptr;
gtWalkOp(&op1, &op2, base, false);
// op1 and op2 are now descendents of the root GT_ADD of the addressing mode.
assert(op1 != op1Save);
assert(op2 != nullptr);
#if defined(TARGET_XARCH)
// Walk the operands again (the third operand is unused in this case).
// This time we will only consider adds with constant op2's, since
// we have already found either a non-ADD op1 or a non-constant op2.
// NOTE: we don't support ADD(op1, cns) addressing for ARM/ARM64 yet so
// this walk makes no sense there.
gtWalkOp(&op1, &op2, nullptr, true);
// For XARCH we will fold GT_ADDs in the op2 position into the addressing mode, so we call
// gtWalkOp on both operands of the original GT_ADD.
// This is not done for ARMARCH. Though the stated reason is that we don't try to create a
// scaled index, in fact we actually do create them (even base + index*scale + offset).
// At this point, 'op2' may itself be an ADD of a constant that should be folded
// into the addressing mode.
// Walk op2 looking for non-overflow GT_ADDs of constants.
gtWalkOp(&op2, &op1, nullptr, true);
#endif // defined(TARGET_XARCH)
// OK we are done walking the tree
// Now assert that op1 and op2 correspond with base and idx
// in one of the several acceptable ways.
// Note that sometimes op1/op2 is equal to idx/base
// and other times op1/op2 is a GT_COMMA node with
// an effective value that is idx/base
if (mul > 1)
{
if ((op1 != base) && (op1->gtOper == GT_LSH))
{
op1->gtFlags |= GTF_ADDRMODE_NO_CSE;
if (op1->AsOp()->gtOp1->gtOper == GT_MUL)
{
op1->AsOp()->gtOp1->gtFlags |= GTF_ADDRMODE_NO_CSE;
}
assert((base == nullptr) || (op2 == base) || (op2->gtEffectiveVal() == base->gtEffectiveVal()) ||
(gtWalkOpEffectiveVal(op2) == gtWalkOpEffectiveVal(base)));
}
else
{
assert(op2 != nullptr);
assert(op2->OperIs(GT_LSH, GT_MUL));
op2->gtFlags |= GTF_ADDRMODE_NO_CSE;
// We may have eliminated multiple shifts and multiplies in the addressing mode,
// so navigate down through them to get to "idx".
GenTree* op2op1 = op2->AsOp()->gtOp1;
while ((op2op1->gtOper == GT_LSH || op2op1->gtOper == GT_MUL) && op2op1 != idx)
{
op2op1->gtFlags |= GTF_ADDRMODE_NO_CSE;
op2op1 = op2op1->AsOp()->gtOp1;
}
assert(op1->gtEffectiveVal() == base);
assert(op2op1 == idx);
}
}
else
{
assert(mul == 0);
if ((op1 == idx) || (op1->gtEffectiveVal() == idx))
{
if (idx != nullptr)
{
if ((op1->gtOper == GT_MUL) || (op1->gtOper == GT_LSH))
{
GenTree* op1op1 = op1->AsOp()->gtOp1;
if ((op1op1->gtOper == GT_NOP) ||
(op1op1->gtOper == GT_MUL && op1op1->AsOp()->gtOp1->gtOper == GT_NOP))
{
op1->gtFlags |= GTF_ADDRMODE_NO_CSE;
if (op1op1->gtOper == GT_MUL)
{
op1op1->gtFlags |= GTF_ADDRMODE_NO_CSE;
}
}
}
}
assert((op2 == base) || (op2->gtEffectiveVal() == base));
}
else if ((op1 == base) || (op1->gtEffectiveVal() == base))
{
if (idx != nullptr)
{
assert(op2 != nullptr);
if (op2->OperIs(GT_MUL, GT_LSH))
{
GenTree* op2op1 = op2->AsOp()->gtOp1;
if ((op2op1->gtOper == GT_NOP) ||
(op2op1->gtOper == GT_MUL && op2op1->AsOp()->gtOp1->gtOper == GT_NOP))
{
op2->gtFlags |= GTF_ADDRMODE_NO_CSE;
if (op2op1->gtOper == GT_MUL)
{
op2op1->gtFlags |= GTF_ADDRMODE_NO_CSE;
}
}
}
assert((op2 == idx) || (op2->gtEffectiveVal() == idx));
}
}
else
{
// op1 isn't base or idx. Is this possible? Or should there be an assert?
}
}
return true;
} // end if (genCreateAddrMode(...))
return false;
}
/*****************************************************************************
*
* Given a tree, figure out the order in which its sub-operands should be
* evaluated. If the second operand of a binary operator is more expensive
* than the first operand, then try to swap the operand trees. Updates the
* GTF_REVERSE_OPS bit if necessary in this case.
*
* Returns the Sethi 'complexity' estimate for this tree (the higher
* the number, the higher is the tree's resources requirement).
*
* This function sets:
* 1. GetCostEx() to the execution complexity estimate
* 2. GetCostSz() to the code size estimate
* 3. Sometimes sets GTF_ADDRMODE_NO_CSE on nodes in the tree.
* 4. DEBUG-only: clears GTF_DEBUG_NODE_MORPHED.
*/
#ifdef _PREFAST_
#pragma warning(push)
#pragma warning(disable : 21000) // Suppress PREFast warning about overly large function
#endif
unsigned Compiler::gtSetEvalOrder(GenTree* tree)
{
assert(tree);
#ifdef DEBUG
/* Clear the GTF_DEBUG_NODE_MORPHED flag as well */
tree->gtDebugFlags &= ~GTF_DEBUG_NODE_MORPHED;
#endif
/* Is this a FP value? */
bool isflt = varTypeIsFloating(tree->TypeGet());
/* Figure out what kind of a node we have */
const genTreeOps oper = tree->OperGet();
const unsigned kind = tree->OperKind();
/* Assume no fixed registers will be trashed */
unsigned level;
int costEx;
int costSz;
#ifdef DEBUG
costEx = -1;
costSz = -1;
#endif
/* Is this a leaf node? */
if (kind & GTK_LEAF)
{
switch (oper)
{
#ifdef TARGET_ARM
case GT_CNS_STR:
// Uses movw/movt
costSz = 8;
costEx = 2;
goto COMMON_CNS;
case GT_CNS_LNG:
{
GenTreeIntConCommon* con = tree->AsIntConCommon();
INT64 lngVal = con->LngValue();
INT32 loVal = (INT32)(lngVal & 0xffffffff);
INT32 hiVal = (INT32)(lngVal >> 32);
if (lngVal == 0)
{
costSz = 1;
costEx = 1;
}
else
{
// Minimum of one instruction to setup hiVal,
// and one instruction to setup loVal
costSz = 4 + 4;
costEx = 1 + 1;
if (!codeGen->validImmForInstr(INS_mov, (target_ssize_t)hiVal) &&
!codeGen->validImmForInstr(INS_mvn, (target_ssize_t)hiVal))
{
// Needs extra instruction: movw/movt
costSz += 4;
costEx += 1;
}
if (!codeGen->validImmForInstr(INS_mov, (target_ssize_t)loVal) &&
!codeGen->validImmForInstr(INS_mvn, (target_ssize_t)loVal))
{
// Needs extra instruction: movw/movt
costSz += 4;
costEx += 1;
}
}
goto COMMON_CNS;
}
case GT_CNS_INT:
{
// If the constant is a handle then it will need to have a relocation
// applied to it.
// Any constant that requires a reloc must use the movw/movt sequence
//
GenTreeIntConCommon* con = tree->AsIntConCommon();
target_ssize_t conVal = (target_ssize_t)con->IconValue();
if (con->ImmedValNeedsReloc(this))
{
// Requires movw/movt
costSz = 8;
costEx = 2;
}
else if (codeGen->validImmForInstr(INS_add, conVal))
{
// Typically included with parent oper
costSz = 2;
costEx = 1;
}
else if (codeGen->validImmForInstr(INS_mov, conVal) || codeGen->validImmForInstr(INS_mvn, conVal))
{
// Uses mov or mvn
costSz = 4;
costEx = 1;
}
else
{
// Needs movw/movt
costSz = 8;
costEx = 2;
}
goto COMMON_CNS;
}
#elif defined TARGET_XARCH
case GT_CNS_STR:
#ifdef TARGET_AMD64
costSz = 10;
costEx = 2;
#else // TARGET_X86
costSz = 4;
costEx = 1;
#endif
goto COMMON_CNS;
case GT_CNS_LNG:
case GT_CNS_INT:
{
GenTreeIntConCommon* con = tree->AsIntConCommon();
ssize_t conVal = (oper == GT_CNS_LNG) ? (ssize_t)con->LngValue() : con->IconValue();
bool fitsInVal = true;
#ifdef TARGET_X86
if (oper == GT_CNS_LNG)
{
INT64 lngVal = con->LngValue();
conVal = (ssize_t)lngVal; // truncate to 32-bits
fitsInVal = ((INT64)conVal == lngVal);
}
#endif // TARGET_X86
// If the constant is a handle then it will need to have a relocation
// applied to it.
//
bool iconNeedsReloc = con->ImmedValNeedsReloc(this);
if (iconNeedsReloc)
{
costSz = 4;
costEx = 1;
}
else if (fitsInVal && GenTreeIntConCommon::FitsInI8(conVal))
{
costSz = 1;
costEx = 1;
}
#ifdef TARGET_AMD64
else if (!GenTreeIntConCommon::FitsInI32(conVal))
{
costSz = 10;
costEx = 2;
}
#endif // TARGET_AMD64
else
{
costSz = 4;
costEx = 1;
}
#ifdef TARGET_X86
if (oper == GT_CNS_LNG)
{
costSz += fitsInVal ? 1 : 4;
costEx += 1;
}
#endif // TARGET_X86
goto COMMON_CNS;
}
#elif defined(TARGET_ARM64)
case GT_CNS_STR:
case GT_CNS_LNG:
case GT_CNS_INT:
{
GenTreeIntConCommon* con = tree->AsIntConCommon();
bool iconNeedsReloc = con->ImmedValNeedsReloc(this);
INT64 imm = con->LngValue();
emitAttr size = EA_SIZE(emitActualTypeSize(tree));
if (iconNeedsReloc)
{
costSz = 8;
costEx = 2;
}
else if (emitter::emitIns_valid_imm_for_add(imm, size))
{
costSz = 2;
costEx = 1;
}
else if (emitter::emitIns_valid_imm_for_mov(imm, size))
{
costSz = 4;
costEx = 1;
}
else
{
// Arm64 allows any arbitrary 16-bit constant to be loaded into a register halfword
// There are three forms
// movk which loads into any halfword preserving the remaining halfwords
// movz which loads into any halfword zeroing the remaining halfwords
// movn which loads into any halfword zeroing the remaining halfwords then bitwise inverting
// the register
// In some cases it is preferable to use movn, because it has the side effect of filling the
// other halfwords
// with ones
// Determine whether movn or movz will require the fewest instructions to populate the immediate
bool preferMovz = false;
bool preferMovn = false;
int instructionCount = 4;
for (int i = (size == EA_8BYTE) ? 48 : 16; i >= 0; i -= 16)
{
if (!preferMovn && (uint16_t(imm >> i) == 0x0000))
{
preferMovz = true; // by using a movk to start we can save one instruction
instructionCount--;
}
else if (!preferMovz && (uint16_t(imm >> i) == 0xffff))
{
preferMovn = true; // by using a movn to start we can save one instruction
instructionCount--;
}
}
costEx = instructionCount;
costSz = 4 * instructionCount;
}
}
goto COMMON_CNS;
#else
case GT_CNS_STR:
case GT_CNS_LNG:
case GT_CNS_INT:
#error "Unknown TARGET"
#endif
COMMON_CNS:
/*
Note that some code below depends on constants always getting
moved to be the second operand of a binary operator. This is
easily accomplished by giving constants a level of 0, which
we do on the next line. If you ever decide to change this, be
aware that unless you make other arrangements for integer
constants to be moved, stuff will break.
*/
level = 0;
break;
case GT_CNS_DBL:
{
level = 0;
#if defined(TARGET_XARCH)
/* We use fldz and fld1 to load 0.0 and 1.0, but all other */
/* floating point constants are loaded using an indirection */
if ((*((__int64*)&(tree->AsDblCon()->gtDconVal)) == 0) ||
(*((__int64*)&(tree->AsDblCon()->gtDconVal)) == I64(0x3ff0000000000000)))
{
costEx = 1;
costSz = 1;
}
else
{
costEx = IND_COST_EX;
costSz = 4;
}
#elif defined(TARGET_ARM)
var_types targetType = tree->TypeGet();
if (targetType == TYP_FLOAT)
{
costEx = 1 + 2;
costSz = 2 + 4;
}
else
{
assert(targetType == TYP_DOUBLE);
costEx = 1 + 4;
costSz = 2 + 8;
}
#elif defined(TARGET_ARM64)
if ((*((__int64*)&(tree->AsDblCon()->gtDconVal)) == 0) ||
emitter::emitIns_valid_imm_for_fmov(tree->AsDblCon()->gtDconVal))
{
costEx = 1;
costSz = 1;
}
else
{
costEx = IND_COST_EX;
costSz = 4;
}
#else
#error "Unknown TARGET"
#endif
}
break;
case GT_LCL_VAR:
level = 1;
if (gtIsLikelyRegVar(tree))
{
costEx = 1;
costSz = 1;
/* Sign-extend and zero-extend are more expensive to load */
if (lvaTable[tree->AsLclVar()->GetLclNum()].lvNormalizeOnLoad())
{
costEx += 1;
costSz += 1;
}
}
else
{
costEx = IND_COST_EX;
costSz = 2;
/* Sign-extend and zero-extend are more expensive to load */
if (varTypeIsSmall(tree->TypeGet()))
{
costEx += 1;
costSz += 1;
}
}
#if defined(TARGET_AMD64)
// increase costSz for floating point locals
if (isflt)
{
costSz += 1;
if (!gtIsLikelyRegVar(tree))
{
costSz += 1;
}
}
#endif
break;
case GT_CLS_VAR:
#ifdef TARGET_ARM
// We generate movw/movt/ldr
level = 1;
costEx = 3 + IND_COST_EX; // 6
costSz = 4 + 4 + 2; // 10
break;
#endif
case GT_LCL_FLD:
level = 1;
costEx = IND_COST_EX;
costSz = 4;
if (varTypeIsSmall(tree->TypeGet()))
{
costEx += 1;
costSz += 1;
}
break;
case GT_LCL_FLD_ADDR:
case GT_LCL_VAR_ADDR:
level = 1;
costEx = 3;
costSz = 3;
break;
case GT_PHI_ARG:
case GT_ARGPLACE:
level = 0;
costEx = 0;
costSz = 0;
break;
default:
level = 1;
costEx = 1;
costSz = 1;
break;
}
goto DONE;
}
/* Is it a 'simple' unary/binary operator? */
if (kind & GTK_SMPOP)
{
int lvlb; // preference for op2
unsigned lvl2; // scratch variable
GenTree* op1 = tree->AsOp()->gtOp1;
GenTree* op2 = tree->gtGetOp2IfPresent();
costEx = 0;
costSz = 0;
if (tree->OperIsAddrMode())
{
if (op1 == nullptr)
{
op1 = op2;
op2 = nullptr;
}
}
/* Check for a nilary operator */
if (op1 == nullptr)
{
assert(op2 == nullptr);
level = 0;
goto DONE;
}
/* Is this a unary operator? */
if (op2 == nullptr)
{
/* Process the operand of the operator */
/* Most Unary ops have costEx of 1 */
costEx = 1;
costSz = 1;
level = gtSetEvalOrder(op1);
GenTreeIntrinsic* intrinsic;
/* Special handling for some operators */
switch (oper)
{
case GT_JTRUE:
costEx = 2;
costSz = 2;
break;
case GT_SWITCH:
costEx = 10;
costSz = 5;
break;
case GT_CAST:
#if defined(TARGET_ARM)
costEx = 1;
costSz = 1;
if (isflt || varTypeIsFloating(op1->TypeGet()))
{
costEx = 3;
costSz = 4;
}
#elif defined(TARGET_ARM64)
costEx = 1;
costSz = 2;
if (isflt || varTypeIsFloating(op1->TypeGet()))
{
costEx = 2;
costSz = 4;
}
#elif defined(TARGET_XARCH)
costEx = 1;
costSz = 2;
if (isflt || varTypeIsFloating(op1->TypeGet()))
{
/* cast involving floats always go through memory */
costEx = IND_COST_EX * 2;
costSz = 6;
}
#else
#error "Unknown TARGET"
#endif
/* Overflow casts are a lot more expensive */
if (tree->gtOverflow())
{
costEx += 6;
costSz += 6;
}
break;
case GT_NOP:
costEx = 0;
costSz = 0;
break;
case GT_INTRINSIC:
intrinsic = tree->AsIntrinsic();
// named intrinsic
assert(intrinsic->gtIntrinsicName != NI_Illegal);
// GT_INTRINSIC intrinsics Sin, Cos, Sqrt, Abs ... have higher costs.
// TODO: tune these costs target specific as some of these are
// target intrinsics and would cost less to generate code.
switch (intrinsic->gtIntrinsicName)
{
default:
assert(!"missing case for gtIntrinsicName");
costEx = 12;
costSz = 12;
break;
case NI_System_Math_Abs:
costEx = 5;
costSz = 15;
break;
case NI_System_Math_Acos:
case NI_System_Math_Acosh:
case NI_System_Math_Asin:
case NI_System_Math_Asinh:
case NI_System_Math_Atan:
case NI_System_Math_Atanh:
case NI_System_Math_Atan2:
case NI_System_Math_Cbrt:
case NI_System_Math_Ceiling:
case NI_System_Math_Cos:
case NI_System_Math_Cosh:
case NI_System_Math_Exp:
case NI_System_Math_Floor:
case NI_System_Math_FMod:
case NI_System_Math_FusedMultiplyAdd:
case NI_System_Math_ILogB:
case NI_System_Math_Log:
case NI_System_Math_Log2:
case NI_System_Math_Log10:
case NI_System_Math_Max:
case NI_System_Math_Min:
case NI_System_Math_Pow:
case NI_System_Math_Round:
case NI_System_Math_Sin:
case NI_System_Math_Sinh:
case NI_System_Math_Sqrt:
case NI_System_Math_Tan:
case NI_System_Math_Tanh:
case NI_System_Math_Truncate:
{
// Giving intrinsics a large fixed execution cost is because we'd like to CSE
// them, even if they are implemented by calls. This is different from modeling
// user calls since we never CSE user calls. We don't do this for target intrinsics
// however as they typically represent single instruction calls
if (IsIntrinsicImplementedByUserCall(intrinsic->gtIntrinsicName))
{
costEx = 36;
costSz = 4;
}
else
{
costEx = 3;
costSz = 4;
}
break;
}
case NI_System_Object_GetType:
// Giving intrinsics a large fixed execution cost is because we'd like to CSE
// them, even if they are implemented by calls. This is different from modeling
// user calls since we never CSE user calls.
costEx = 36;
costSz = 4;
break;
}
level++;
break;
case GT_NOT:
case GT_NEG:
// We need to ensure that -x is evaluated before x or else
// we get burned while adjusting genFPstkLevel in x*-x where
// the rhs x is the last use of the enregistered x.
//
// Even in the integer case we want to prefer to
// evaluate the side without the GT_NEG node, all other things
// being equal. Also a GT_NOT requires a scratch register
level++;
break;
case GT_ADDR:
costEx = 0;
costSz = 1;
// If we have a GT_ADDR of an GT_IND we can just copy the costs from indOp1
if (op1->OperGet() == GT_IND)
{
GenTree* indOp1 = op1->AsOp()->gtOp1;
costEx = indOp1->GetCostEx();
costSz = indOp1->GetCostSz();
}
break;
case GT_ARR_LENGTH:
level++;
/* Array Len should be the same as an indirections, which have a costEx of IND_COST_EX */
costEx = IND_COST_EX - 1;
costSz = 2;
break;
case GT_MKREFANY:
case GT_OBJ:
// We estimate the cost of a GT_OBJ or GT_MKREFANY to be two loads (GT_INDs)
costEx = 2 * IND_COST_EX;
costSz = 2 * 2;
break;
case GT_BOX:
// We estimate the cost of a GT_BOX to be two stores (GT_INDs)
costEx = 2 * IND_COST_EX;
costSz = 2 * 2;
break;
case GT_BLK:
case GT_IND:
/* An indirection should always have a non-zero level.
* Only constant leaf nodes have level 0.
*/
if (level == 0)
{
level = 1;
}
/* Indirections have a costEx of IND_COST_EX */
costEx = IND_COST_EX;
costSz = 2;
/* If we have to sign-extend or zero-extend, bump the cost */
if (varTypeIsSmall(tree->TypeGet()))
{
costEx += 1;
costSz += 1;
}
if (isflt)
{
if (tree->TypeGet() == TYP_DOUBLE)
{
costEx += 1;
}
#ifdef TARGET_ARM
costSz += 2;
#endif // TARGET_ARM
}
// Can we form an addressing mode with this indirection?
// TODO-CQ: Consider changing this to op1->gtEffectiveVal() to take into account
// addressing modes hidden under a comma node.
if (op1->gtOper == GT_ADD)
{
// See if we can form a complex addressing mode.
GenTree* addr = op1->gtEffectiveVal();
bool doAddrMode = true;
// See if we can form a complex addressing mode.
// Always use an addrMode for an array index indirection.
// TODO-1stClassStructs: Always do this, but first make sure it's
// done in Lowering as well.
if ((tree->gtFlags & GTF_IND_ARR_INDEX) == 0)
{
if (tree->TypeGet() == TYP_STRUCT)
{
doAddrMode = false;
}
else if (varTypeIsStruct(tree))
{
// This is a heuristic attempting to match prior behavior when indirections
// under a struct assignment would not be considered for addressing modes.
if (compCurStmt != nullptr)
{
GenTree* expr = compCurStmt->GetRootNode();
if ((expr->OperGet() == GT_ASG) &&
((expr->gtGetOp1() == tree) || (expr->gtGetOp2() == tree)))
{
doAddrMode = false;
}
}
}
}
#ifdef TARGET_ARM64
if (tree->gtFlags & GTF_IND_VOLATILE)
{
// For volatile store/loads when address is contained we always emit `dmb`
// if it's not - we emit one-way barriers i.e. ldar/stlr
doAddrMode = false;
}
#endif // TARGET_ARM64
if (doAddrMode && gtMarkAddrMode(addr, &costEx, &costSz, tree->TypeGet()))
{
goto DONE;
}
} // end if (op1->gtOper == GT_ADD)
else if (gtIsLikelyRegVar(op1))
{
/* Indirection of an enregister LCL_VAR, don't increase costEx/costSz */
goto DONE;
}
#ifdef TARGET_XARCH
else if (op1->IsCnsIntOrI())
{
// Indirection of a CNS_INT, subtract 1 from costEx
// makes costEx 3 for x86 and 4 for amd64
//
costEx += (op1->GetCostEx() - 1);
costSz += op1->GetCostSz();
goto DONE;
}
#endif
break;
default:
break;
}
costEx += op1->GetCostEx();
costSz += op1->GetCostSz();
goto DONE;
}
/* Binary operator - check for certain special cases */
lvlb = 0;
/* Default Binary ops have a cost of 1,1 */
costEx = 1;
costSz = 1;
#ifdef TARGET_ARM
if (isflt)
{
costSz += 2;
}
#endif
#ifndef TARGET_64BIT
if (varTypeIsLong(op1->TypeGet()))
{
/* Operations on longs are more expensive */
costEx += 3;
costSz += 3;
}
#endif
switch (oper)
{
case GT_MOD:
case GT_UMOD:
/* Modulo by a power of 2 is easy */
if (op2->IsCnsIntOrI())
{
size_t ival = op2->AsIntConCommon()->IconValue();
if (ival > 0 && ival == genFindLowestBit(ival))
{
break;
}
}
FALLTHROUGH;
case GT_DIV:
case GT_UDIV:
if (isflt)
{
/* fp division is very expensive to execute */
costEx = 36; // TYP_DOUBLE
costSz += 3;
}
else
{
/* integer division is also very expensive */
costEx = 20;
costSz += 2;
// Encourage the first operand to be evaluated (into EAX/EDX) first */
lvlb -= 3;
}
break;
case GT_MUL:
if (isflt)
{
/* FP multiplication instructions are more expensive */
costEx += 4;
costSz += 3;
}
else
{
/* Integer multiplication instructions are more expensive */
costEx += 3;
costSz += 2;
if (tree->gtOverflow())
{
/* Overflow check are more expensive */
costEx += 3;
costSz += 3;
}
#ifdef TARGET_X86
if ((tree->gtType == TYP_LONG) || tree->gtOverflow())
{
/* We use imulEAX for TYP_LONG and overflow multiplications */
// Encourage the first operand to be evaluated (into EAX/EDX) first */
lvlb -= 4;
/* The 64-bit imul instruction costs more */
costEx += 4;
}
#endif // TARGET_X86
}
break;
case GT_ADD:
case GT_SUB:
if (isflt)
{
/* FP instructions are a bit more expensive */
costEx += 4;
costSz += 3;
break;
}
/* Overflow check are more expensive */
if (tree->gtOverflow())
{
costEx += 3;
costSz += 3;
}
break;
case GT_BOUNDS_CHECK:
costEx = 4; // cmp reg,reg and jae throw (not taken)
costSz = 7; // jump to cold section
break;
case GT_COMMA:
/* Comma tosses the result of the left operand */
gtSetEvalOrder(op1);
level = gtSetEvalOrder(op2);
/* GT_COMMA cost is the sum of op1 and op2 costs */
costEx = (op1->GetCostEx() + op2->GetCostEx());
costSz = (op1->GetCostSz() + op2->GetCostSz());
goto DONE;
case GT_COLON:
level = gtSetEvalOrder(op1);
lvl2 = gtSetEvalOrder(op2);
if (level < lvl2)
{
level = lvl2;
}
else if (level == lvl2)
{
level += 1;
}
costEx = op1->GetCostEx() + op2->GetCostEx();
costSz = op1->GetCostSz() + op2->GetCostSz();
goto DONE;
case GT_INDEX_ADDR:
costEx = 6; // cmp reg,reg; jae throw; mov reg, [addrmode] (not taken)
costSz = 9; // jump to cold section
break;
case GT_ASG:
/* Assignments need a bit of special handling */
/* Process the target */
level = gtSetEvalOrder(op1);
if (gtIsLikelyRegVar(op1))
{
assert(lvlb == 0);
lvl2 = gtSetEvalOrder(op2);
/* Assignment to an enregistered LCL_VAR */
costEx = op2->GetCostEx();
costSz = max(3, op2->GetCostSz()); // 3 is an estimate for a reg-reg assignment
goto DONE_OP1_AFTER_COST;
}
goto DONE_OP1;
default:
break;
}
/* Process the sub-operands */
level = gtSetEvalOrder(op1);
if (lvlb < 0)
{
level -= lvlb; // lvlb is negative, so this increases level
lvlb = 0;
}
DONE_OP1:
assert(lvlb >= 0);
lvl2 = gtSetEvalOrder(op2) + lvlb;
costEx += (op1->GetCostEx() + op2->GetCostEx());
costSz += (op1->GetCostSz() + op2->GetCostSz());
DONE_OP1_AFTER_COST:
bool bReverseInAssignment = false;
if (oper == GT_ASG && (!optValnumCSE_phase || optCSE_canSwap(op1, op2)))
{
GenTree* op1Val = op1;
// Skip over the GT_IND/GT_ADDR tree (if one exists)
//
if ((op1->gtOper == GT_IND) && (op1->AsOp()->gtOp1->gtOper == GT_ADDR))
{
op1Val = op1->AsOp()->gtOp1->AsOp()->gtOp1;
}
switch (op1Val->gtOper)
{
case GT_IND:
case GT_BLK:
case GT_OBJ:
{
// In an ASG(IND(addr), ...), the "IND" is a pure syntactical element,
// the actual indirection will only be realized at the point of the ASG
// itself. As such, we can disard any side effects "induced" by it in
// this logic.
//
// Note that for local "addr"s, liveness depends on seeing the defs and
// uses in correct order, and so we MUST reverse the ASG in that case.
//
GenTree* op1Addr = op1->AsIndir()->Addr();
if (op1Addr->IsLocalAddrExpr() || op1Addr->IsInvariant())
{
bReverseInAssignment = true;
tree->gtFlags |= GTF_REVERSE_OPS;
break;
}
if (op1Addr->gtFlags & GTF_ALL_EFFECT)
{
break;
}
// In case op2 assigns to a local var that is used in op1Val, we have to evaluate op1Val first.
if (op2->gtFlags & GTF_ASG)
{
break;
}
// If op2 is simple then evaluate op1 first
if (op2->OperKind() & GTK_LEAF)
{
break;
}
}
// fall through and set GTF_REVERSE_OPS
FALLTHROUGH;
case GT_LCL_VAR:
case GT_LCL_FLD:
case GT_CLS_VAR:
// We evaluate op2 before op1
bReverseInAssignment = true;
tree->gtFlags |= GTF_REVERSE_OPS;
break;
default:
break;
}
}
else if (GenTree::OperIsCompare(oper))
{
/* Float compares remove both operands from the FP stack */
/* Also FP comparison uses EAX for flags */
if (varTypeIsFloating(op1->TypeGet()))
{
level++;
lvl2++;
}
if ((tree->gtFlags & GTF_RELOP_JMP_USED) == 0)
{
/* Using a setcc instruction is more expensive */
costEx += 3;
}
}
/* Check for other interesting cases */
switch (oper)
{
case GT_LSH:
case GT_RSH:
case GT_RSZ:
case GT_ROL:
case GT_ROR:
/* Variable sized shifts are more expensive and use REG_SHIFT */
if (!op2->IsCnsIntOrI())
{
costEx += 3;
#ifndef TARGET_64BIT
// Variable sized LONG shifts require the use of a helper call
//
if (tree->gtType == TYP_LONG)
{
level += 5;
lvl2 += 5;
costEx += 3 * IND_COST_EX;
costSz += 4;
}
#endif // !TARGET_64BIT
}
break;
case GT_INTRINSIC:
switch (tree->AsIntrinsic()->gtIntrinsicName)
{
case NI_System_Math_Atan2:
case NI_System_Math_Pow:
// These math intrinsics are actually implemented by user calls.
// Increase the Sethi 'complexity' by two to reflect the argument
// register requirement.
level += 2;
break;
case NI_System_Math_Max:
case NI_System_Math_Min:
level++;
break;
default:
assert(!"Unknown binary GT_INTRINSIC operator");
break;
}
break;
default:
break;
}
/* We need to evalutate constants later as many places in codegen
can't handle op1 being a constant. This is normally naturally
enforced as constants have the least level of 0. However,
sometimes we end up with a tree like "cns1 < nop(cns2)". In
such cases, both sides have a level of 0. So encourage constants
to be evaluated last in such cases */
if ((level == 0) && (level == lvl2) && op1->OperIsConst() &&
(tree->OperIsCommutative() || tree->OperIsCompare()))
{
lvl2++;
}
/* We try to swap operands if the second one is more expensive */
bool tryToSwap;
GenTree* opA;
GenTree* opB;
if (tree->gtFlags & GTF_REVERSE_OPS)
{
opA = op2;
opB = op1;
}
else
{
opA = op1;
opB = op2;
}
if (fgOrder == FGOrderLinear)
{
// Don't swap anything if we're in linear order; we're really just interested in the costs.
tryToSwap = false;
}
else if (bReverseInAssignment)
{
// Assignments are special, we want the reverseops flags
// so if possible it was set above.
tryToSwap = false;
}
else if ((oper == GT_INTRINSIC) && IsIntrinsicImplementedByUserCall(tree->AsIntrinsic()->gtIntrinsicName))
{
// We do not swap operand execution order for intrinsics that are implemented by user calls
// because of trickiness around ensuring the execution order does not change during rationalization.
tryToSwap = false;
}
else if (oper == GT_BOUNDS_CHECK)
{
// Bounds check nodes used to not be binary, thus GTF_REVERSE_OPS was
// not enabled for them. This condition preserves that behavior.
// Additionally, CQ analysis shows that enabling GTF_REVERSE_OPS
// for these nodes leads to mixed results at best.
tryToSwap = false;
}
else
{
if (tree->gtFlags & GTF_REVERSE_OPS)
{
tryToSwap = (level > lvl2);
}
else
{
tryToSwap = (level < lvl2);
}
// Try to force extra swapping when in the stress mode:
if (compStressCompile(STRESS_REVERSE_FLAG, 60) && ((tree->gtFlags & GTF_REVERSE_OPS) == 0) &&
!op2->OperIsConst())
{
tryToSwap = true;
}
}
if (tryToSwap)
{
bool canSwap = gtCanSwapOrder(opA, opB);
if (canSwap)
{
/* Can we swap the order by commuting the operands? */
switch (oper)
{
case GT_EQ:
case GT_NE:
case GT_LT:
case GT_LE:
case GT_GE:
case GT_GT:
if (GenTree::SwapRelop(oper) != oper)
{
tree->SetOper(GenTree::SwapRelop(oper), GenTree::PRESERVE_VN);
}
FALLTHROUGH;
case GT_ADD:
case GT_MUL:
case GT_OR:
case GT_XOR:
case GT_AND:
/* Swap the operands */
tree->AsOp()->gtOp1 = op2;
tree->AsOp()->gtOp2 = op1;
break;
case GT_QMARK:
case GT_COLON:
case GT_MKREFANY:
break;
default:
/* Mark the operand's evaluation order to be swapped */
if (tree->gtFlags & GTF_REVERSE_OPS)
{
tree->gtFlags &= ~GTF_REVERSE_OPS;
}
else
{
tree->gtFlags |= GTF_REVERSE_OPS;
}
break;
}
}
}
/* Swap the level counts */
if (tree->gtFlags & GTF_REVERSE_OPS)
{
unsigned tmpl;
tmpl = level;
level = lvl2;
lvl2 = tmpl;
}
/* Compute the sethi number for this binary operator */
if (level < 1)
{
level = lvl2;
}
else if (level == lvl2)
{
level += 1;
}
goto DONE;
}
/* See what kind of a special operator we have here */
switch (oper)
{
unsigned lvl2; // Scratch variable
case GT_CALL:
assert(tree->gtFlags & GTF_CALL);
level = 0;
costEx = 5;
costSz = 2;
GenTreeCall* call;
call = tree->AsCall();
/* Evaluate the 'this' argument, if present */
if (tree->AsCall()->gtCallThisArg != nullptr)
{
GenTree* thisVal = tree->AsCall()->gtCallThisArg->GetNode();
lvl2 = gtSetEvalOrder(thisVal);
if (level < lvl2)
{
level = lvl2;
}
costEx += thisVal->GetCostEx();
costSz += thisVal->GetCostSz() + 1;
}
/* Evaluate the arguments, right to left */
if (call->gtCallArgs != nullptr)
{
const bool lateArgs = false;
lvl2 = gtSetCallArgsOrder(call->Args(), lateArgs, &costEx, &costSz);
if (level < lvl2)
{
level = lvl2;
}
}
/* Evaluate the temp register arguments list
* This is a "hidden" list and its only purpose is to
* extend the life of temps until we make the call */
if (call->gtCallLateArgs != nullptr)
{
const bool lateArgs = true;
lvl2 = gtSetCallArgsOrder(call->LateArgs(), lateArgs, &costEx, &costSz);
if (level < lvl2)
{
level = lvl2;
}
}
if (call->gtCallType == CT_INDIRECT)
{
// pinvoke-calli cookie is a constant, or constant indirection
assert(call->gtCallCookie == nullptr || call->gtCallCookie->gtOper == GT_CNS_INT ||
call->gtCallCookie->gtOper == GT_IND);
GenTree* indirect = call->gtCallAddr;
lvl2 = gtSetEvalOrder(indirect);
if (level < lvl2)
{
level = lvl2;
}
costEx += indirect->GetCostEx() + IND_COST_EX;
costSz += indirect->GetCostSz();
}
else
{
if (call->IsVirtual())
{
GenTree* controlExpr = call->gtControlExpr;
if (controlExpr != nullptr)
{
lvl2 = gtSetEvalOrder(controlExpr);
if (level < lvl2)
{
level = lvl2;
}
costEx += controlExpr->GetCostEx();
costSz += controlExpr->GetCostSz();
}
}
#ifdef TARGET_ARM
if (call->IsVirtualStub())
{
// We generate movw/movt/ldr
costEx += (1 + IND_COST_EX);
costSz += 8;
if (call->gtCallMoreFlags & GTF_CALL_M_VIRTSTUB_REL_INDIRECT)
{
// Must use R12 for the ldr target -- REG_JUMP_THUNK_PARAM
costSz += 2;
}
}
else if (!opts.jitFlags->IsSet(JitFlags::JIT_FLAG_PREJIT))
{
costEx += 2;
costSz += 6;
}
costSz += 2;
#endif
#ifdef TARGET_XARCH
costSz += 3;
#endif
}
level += 1;
/* Virtual calls are a bit more expensive */
if (call->IsVirtual())
{
costEx += 2 * IND_COST_EX;
costSz += 2;
}
level += 5;
costEx += 3 * IND_COST_EX;
break;
#if defined(FEATURE_SIMD) || defined(FEATURE_HW_INTRINSICS)
#if defined(FEATURE_SIMD)
case GT_SIMD:
#endif
#if defined(FEATURE_HW_INTRINSICS)
case GT_HWINTRINSIC:
#endif
return gtSetMultiOpOrder(tree->AsMultiOp());
#endif // defined(FEATURE_SIMD) || defined(FEATURE_HW_INTRINSICS)
case GT_ARR_ELEM:
{
GenTreeArrElem* arrElem = tree->AsArrElem();
level = gtSetEvalOrder(arrElem->gtArrObj);
costEx = arrElem->gtArrObj->GetCostEx();
costSz = arrElem->gtArrObj->GetCostSz();
for (unsigned dim = 0; dim < arrElem->gtArrRank; dim++)
{
lvl2 = gtSetEvalOrder(arrElem->gtArrInds[dim]);
if (level < lvl2)
{
level = lvl2;
}
costEx += arrElem->gtArrInds[dim]->GetCostEx();
costSz += arrElem->gtArrInds[dim]->GetCostSz();
}
level += arrElem->gtArrRank;
costEx += 2 + (arrElem->gtArrRank * (IND_COST_EX + 1));
costSz += 2 + (arrElem->gtArrRank * 2);
}
break;
case GT_ARR_OFFSET:
level = gtSetEvalOrder(tree->AsArrOffs()->gtOffset);
costEx = tree->AsArrOffs()->gtOffset->GetCostEx();
costSz = tree->AsArrOffs()->gtOffset->GetCostSz();
lvl2 = gtSetEvalOrder(tree->AsArrOffs()->gtIndex);
level = max(level, lvl2);
costEx += tree->AsArrOffs()->gtIndex->GetCostEx();
costSz += tree->AsArrOffs()->gtIndex->GetCostSz();
lvl2 = gtSetEvalOrder(tree->AsArrOffs()->gtArrObj);
level = max(level, lvl2);
costEx += tree->AsArrOffs()->gtArrObj->GetCostEx();
costSz += tree->AsArrOffs()->gtArrObj->GetCostSz();
break;
case GT_PHI:
for (GenTreePhi::Use& use : tree->AsPhi()->Uses())
{
lvl2 = gtSetEvalOrder(use.GetNode());
// PHI args should always have cost 0 and level 0
assert(lvl2 == 0);
assert(use.GetNode()->GetCostEx() == 0);
assert(use.GetNode()->GetCostSz() == 0);
}
// Give it a level of 2, just to be sure that it's greater than the LHS of
// the parent assignment and the PHI gets evaluated first in linear order.
// See also SsaBuilder::InsertPhi and SsaBuilder::AddPhiArg.
level = 2;
costEx = 0;
costSz = 0;
break;
case GT_FIELD_LIST:
level = 0;
costEx = 0;
costSz = 0;
for (GenTreeFieldList::Use& use : tree->AsFieldList()->Uses())
{
unsigned opLevel = gtSetEvalOrder(use.GetNode());
level = max(level, opLevel);
gtSetEvalOrder(use.GetNode());
costEx += use.GetNode()->GetCostEx();
costSz += use.GetNode()->GetCostSz();
}
break;
case GT_CMPXCHG:
level = gtSetEvalOrder(tree->AsCmpXchg()->gtOpLocation);
costSz = tree->AsCmpXchg()->gtOpLocation->GetCostSz();
lvl2 = gtSetEvalOrder(tree->AsCmpXchg()->gtOpValue);
if (level < lvl2)
{
level = lvl2;
}
costSz += tree->AsCmpXchg()->gtOpValue->GetCostSz();
lvl2 = gtSetEvalOrder(tree->AsCmpXchg()->gtOpComparand);
if (level < lvl2)
{
level = lvl2;
}
costSz += tree->AsCmpXchg()->gtOpComparand->GetCostSz();
costEx = MAX_COST; // Seriously, what could be more expensive than lock cmpxchg?
costSz += 5; // size of lock cmpxchg [reg+C], reg
break;
case GT_STORE_DYN_BLK:
level = gtSetEvalOrder(tree->AsStoreDynBlk()->Addr());
costEx = tree->AsStoreDynBlk()->Addr()->GetCostEx();
costSz = tree->AsStoreDynBlk()->Addr()->GetCostSz();
lvl2 = gtSetEvalOrder(tree->AsStoreDynBlk()->Data());
level = max(level, lvl2);
costEx += tree->AsStoreDynBlk()->Data()->GetCostEx();
costSz += tree->AsStoreDynBlk()->Data()->GetCostSz();
lvl2 = gtSetEvalOrder(tree->AsStoreDynBlk()->gtDynamicSize);
level = max(level, lvl2);
costEx += tree->AsStoreDynBlk()->gtDynamicSize->GetCostEx();
costSz += tree->AsStoreDynBlk()->gtDynamicSize->GetCostSz();
break;
default:
JITDUMP("unexpected operator in this tree:\n");
DISPTREE(tree);
NO_WAY("unexpected operator");
}
DONE:
// Some path through this function must have set the costs.
assert(costEx != -1);
assert(costSz != -1);
tree->SetCosts(costEx, costSz);
return level;
}
#ifdef _PREFAST_
#pragma warning(pop)
#endif
#ifdef DEBUG
bool GenTree::OperSupportsReverseOpEvalOrder(Compiler* comp) const
{
if (OperIsBinary())
{
if ((AsOp()->gtGetOp1() == nullptr) || (AsOp()->gtGetOp2() == nullptr))
{
return false;
}
if (OperIs(GT_COMMA, GT_BOUNDS_CHECK))
{
return false;
}
if (OperIs(GT_INTRINSIC))
{
return !comp->IsIntrinsicImplementedByUserCall(AsIntrinsic()->gtIntrinsicName);
}
return true;
}
#if defined(FEATURE_SIMD) || defined(FEATURE_HW_INTRINSICS)
if (OperIsMultiOp())
{
return AsMultiOp()->GetOperandCount() == 2;
}
#endif // FEATURE_SIMD || FEATURE_HW_INTRINSICS
return false;
}
#endif // DEBUG
/*****************************************************************************
*
* If the given tree is an integer constant that can be used
* in a scaled index address mode as a multiplier (e.g. "[4*index]"), then return
* the scale factor: 2, 4, or 8. Otherwise, return 0. Note that we never return 1,
* to match the behavior of GetScaleIndexShf().
*/
unsigned GenTree::GetScaleIndexMul()
{
if (IsCnsIntOrI() && jitIsScaleIndexMul(AsIntConCommon()->IconValue()) && AsIntConCommon()->IconValue() != 1)
{
return (unsigned)AsIntConCommon()->IconValue();
}
return 0;
}
/*****************************************************************************
*
* If the given tree is the right-hand side of a left shift (that is,
* 'y' in the tree 'x' << 'y'), and it is an integer constant that can be used
* in a scaled index address mode as a multiplier (e.g. "[4*index]"), then return
* the scale factor: 2, 4, or 8. Otherwise, return 0.
*/
unsigned GenTree::GetScaleIndexShf()
{
if (IsCnsIntOrI() && jitIsScaleIndexShift(AsIntConCommon()->IconValue()))
{
return (unsigned)(1 << AsIntConCommon()->IconValue());
}
return 0;
}
/*****************************************************************************
*
* If the given tree is a scaled index (i.e. "op * 4" or "op << 2"), returns
* the multiplier: 2, 4, or 8; otherwise returns 0. Note that "1" is never
* returned.
*/
unsigned GenTree::GetScaledIndex()
{
// with (!opts.OptEnabled(CLFLG_CONSTANTFOLD) we can have
// CNS_INT * CNS_INT
//
if (AsOp()->gtOp1->IsCnsIntOrI())
{
return 0;
}
switch (gtOper)
{
case GT_MUL:
return AsOp()->gtOp2->GetScaleIndexMul();
case GT_LSH:
return AsOp()->gtOp2->GetScaleIndexShf();
default:
assert(!"GenTree::GetScaledIndex() called with illegal gtOper");
break;
}
return 0;
}
//------------------------------------------------------------------------
// TryGetUse: Get the use edge for an operand of this tree.
//
// Arguments:
// operand - the node to find the use for
// pUse - [out] parameter for the use
//
// Return Value:
// Whether "operand" is a child of this node. If it is, "*pUse" is set,
// allowing for the replacement of "operand" with some other node.
//
bool GenTree::TryGetUse(GenTree* operand, GenTree*** pUse)
{
assert(operand != nullptr);
assert(pUse != nullptr);
switch (OperGet())
{
// Leaf nodes
case GT_LCL_VAR:
case GT_LCL_FLD:
case GT_LCL_VAR_ADDR:
case GT_LCL_FLD_ADDR:
case GT_CATCH_ARG:
case GT_LABEL:
case GT_FTN_ADDR:
case GT_RET_EXPR:
case GT_CNS_INT:
case GT_CNS_LNG:
case GT_CNS_DBL:
case GT_CNS_STR:
case GT_MEMORYBARRIER:
case GT_JMP:
case GT_JCC:
case GT_SETCC:
case GT_NO_OP:
case GT_START_NONGC:
case GT_START_PREEMPTGC:
case GT_PROF_HOOK:
#if !defined(FEATURE_EH_FUNCLETS)
case GT_END_LFIN:
#endif // !FEATURE_EH_FUNCLETS
case GT_PHI_ARG:
case GT_JMPTABLE:
case GT_CLS_VAR:
case GT_CLS_VAR_ADDR:
case GT_ARGPLACE:
case GT_PHYSREG:
case GT_EMITNOP:
case GT_PINVOKE_PROLOG:
case GT_PINVOKE_EPILOG:
case GT_IL_OFFSET:
return false;
// Standard unary operators
case GT_STORE_LCL_VAR:
case GT_STORE_LCL_FLD:
case GT_NOT:
case GT_NEG:
case GT_COPY:
case GT_RELOAD:
case GT_ARR_LENGTH:
case GT_CAST:
case GT_BITCAST:
case GT_CKFINITE:
case GT_LCLHEAP:
case GT_ADDR:
case GT_IND:
case GT_OBJ:
case GT_BLK:
case GT_BOX:
case GT_ALLOCOBJ:
case GT_RUNTIMELOOKUP:
case GT_INIT_VAL:
case GT_JTRUE:
case GT_SWITCH:
case GT_NULLCHECK:
case GT_PUTARG_REG:
case GT_PUTARG_STK:
case GT_PUTARG_TYPE:
case GT_RETURNTRAP:
case GT_NOP:
case GT_RETURN:
case GT_RETFILT:
case GT_BSWAP:
case GT_BSWAP16:
case GT_KEEPALIVE:
case GT_INC_SATURATE:
if (operand == this->AsUnOp()->gtOp1)
{
*pUse = &this->AsUnOp()->gtOp1;
return true;
}
return false;
// Variadic nodes
#if FEATURE_ARG_SPLIT
case GT_PUTARG_SPLIT:
if (this->AsUnOp()->gtOp1->gtOper == GT_FIELD_LIST)
{
return this->AsUnOp()->gtOp1->TryGetUse(operand, pUse);
}
if (operand == this->AsUnOp()->gtOp1)
{
*pUse = &this->AsUnOp()->gtOp1;
return true;
}
return false;
#endif // FEATURE_ARG_SPLIT
#if defined(FEATURE_SIMD) || defined(FEATURE_HW_INTRINSICS)
#if defined(FEATURE_SIMD)
case GT_SIMD:
#endif
#if defined(FEATURE_HW_INTRINSICS)
case GT_HWINTRINSIC:
#endif
for (GenTree** opUse : this->AsMultiOp()->UseEdges())
{
if (*opUse == operand)
{
*pUse = opUse;
return true;
}
}
return false;
#endif // defined(FEATURE_SIMD) || defined(FEATURE_HW_INTRINSICS)
// Special nodes
case GT_PHI:
for (GenTreePhi::Use& phiUse : AsPhi()->Uses())
{
if (phiUse.GetNode() == operand)
{
*pUse = &phiUse.NodeRef();
return true;
}
}
return false;
case GT_FIELD_LIST:
for (GenTreeFieldList::Use& fieldUse : AsFieldList()->Uses())
{
if (fieldUse.GetNode() == operand)
{
*pUse = &fieldUse.NodeRef();
return true;
}
}
return false;
case GT_CMPXCHG:
{
GenTreeCmpXchg* const cmpXchg = this->AsCmpXchg();
if (operand == cmpXchg->gtOpLocation)
{
*pUse = &cmpXchg->gtOpLocation;
return true;
}
if (operand == cmpXchg->gtOpValue)
{
*pUse = &cmpXchg->gtOpValue;
return true;
}
if (operand == cmpXchg->gtOpComparand)
{
*pUse = &cmpXchg->gtOpComparand;
return true;
}
return false;
}
case GT_ARR_ELEM:
{
GenTreeArrElem* const arrElem = this->AsArrElem();
if (operand == arrElem->gtArrObj)
{
*pUse = &arrElem->gtArrObj;
return true;
}
for (unsigned i = 0; i < arrElem->gtArrRank; i++)
{
if (operand == arrElem->gtArrInds[i])
{
*pUse = &arrElem->gtArrInds[i];
return true;
}
}
return false;
}
case GT_ARR_OFFSET:
{
GenTreeArrOffs* const arrOffs = this->AsArrOffs();
if (operand == arrOffs->gtOffset)
{
*pUse = &arrOffs->gtOffset;
return true;
}
if (operand == arrOffs->gtIndex)
{
*pUse = &arrOffs->gtIndex;
return true;
}
if (operand == arrOffs->gtArrObj)
{
*pUse = &arrOffs->gtArrObj;
return true;
}
return false;
}
case GT_STORE_DYN_BLK:
{
GenTreeStoreDynBlk* const dynBlock = this->AsStoreDynBlk();
if (operand == dynBlock->gtOp1)
{
*pUse = &dynBlock->gtOp1;
return true;
}
if (operand == dynBlock->gtOp2)
{
*pUse = &dynBlock->gtOp2;
return true;
}
if (operand == dynBlock->gtDynamicSize)
{
*pUse = &dynBlock->gtDynamicSize;
return true;
}
return false;
}
case GT_CALL:
{
GenTreeCall* const call = this->AsCall();
if ((call->gtCallThisArg != nullptr) && (operand == call->gtCallThisArg->GetNode()))
{
*pUse = &call->gtCallThisArg->NodeRef();
return true;
}
if (operand == call->gtControlExpr)
{
*pUse = &call->gtControlExpr;
return true;
}
if (call->gtCallType == CT_INDIRECT)
{
if (operand == call->gtCallCookie)
{
*pUse = &call->gtCallCookie;
return true;
}
if (operand == call->gtCallAddr)
{
*pUse = &call->gtCallAddr;
return true;
}
}
for (GenTreeCall::Use& argUse : call->Args())
{
if (argUse.GetNode() == operand)
{
*pUse = &argUse.NodeRef();
return true;
}
}
for (GenTreeCall::Use& argUse : call->LateArgs())
{
if (argUse.GetNode() == operand)
{
*pUse = &argUse.NodeRef();
return true;
}
}
return false;
}
// Binary nodes
default:
assert(this->OperIsBinary());
return TryGetUseBinOp(operand, pUse);
}
}
bool GenTree::TryGetUseBinOp(GenTree* operand, GenTree*** pUse)
{
assert(operand != nullptr);
assert(pUse != nullptr);
assert(this->OperIsBinary());
GenTreeOp* const binOp = this->AsOp();
if (operand == binOp->gtOp1)
{
*pUse = &binOp->gtOp1;
return true;
}
if (operand == binOp->gtOp2)
{
*pUse = &binOp->gtOp2;
return true;
}
return false;
}
//------------------------------------------------------------------------
// GenTree::ReplaceOperand:
// Replace a given operand to this node with a new operand. If the
// current node is a call node, this will also udpate the call
// argument table if necessary.
//
// Arguments:
// useEdge - the use edge that points to the operand to be replaced.
// replacement - the replacement node.
//
void GenTree::ReplaceOperand(GenTree** useEdge, GenTree* replacement)
{
assert(useEdge != nullptr);
assert(replacement != nullptr);
assert(TryGetUse(*useEdge, &useEdge));
if (OperGet() == GT_CALL)
{
AsCall()->ReplaceCallOperand(useEdge, replacement);
}
else
{
*useEdge = replacement;
}
}
//------------------------------------------------------------------------
// gtGetParent: Get the parent of this node, and optionally capture the
// pointer to the child so that it can be modified.
//
// Arguments:
// pUse - A pointer to a GenTree** (yes, that's three
// levels, i.e. GenTree ***), which if non-null,
// will be set to point to the field in the parent
// that points to this node.
//
// Return value
// The parent of this node.
//
// Notes:
// This requires that the execution order must be defined (i.e. gtSetEvalOrder() has been called).
// To enable the child to be replaced, it accepts an argument, "pUse", that, if non-null,
// will be set to point to the child pointer in the parent that points to this node.
//
GenTree* GenTree::gtGetParent(GenTree*** pUse)
{
// Find the parent node; it must be after this node in the execution order.
GenTree* user;
GenTree** use = nullptr;
for (user = gtNext; user != nullptr; user = user->gtNext)
{
if (user->TryGetUse(this, &use))
{
break;
}
}
if (pUse != nullptr)
{
*pUse = use;
}
return user;
}
//-------------------------------------------------------------------------
// gtRetExprVal - walk back through GT_RET_EXPRs
//
// Arguments:
// pbbFlags - out-parameter that is set to the flags of the basic block
// containing the inlinee return value. The value is 0
// for unsuccessful inlines.
//
// Returns:
// tree representing return value from a successful inline,
// or original call for failed or yet to be determined inline.
//
// Notes:
// Multi-level inlines can form chains of GT_RET_EXPRs.
// This method walks back to the root of the chain.
//
GenTree* GenTree::gtRetExprVal(BasicBlockFlags* pbbFlags /* = nullptr */)
{
GenTree* retExprVal = this;
BasicBlockFlags bbFlags = BBF_EMPTY;
assert(!retExprVal->OperIs(GT_PUTARG_TYPE));
while (retExprVal->OperIs(GT_RET_EXPR))
{
const GenTreeRetExpr* retExpr = retExprVal->AsRetExpr();
bbFlags = retExpr->bbFlags;
retExprVal = retExpr->gtInlineCandidate;
}
if (pbbFlags != nullptr)
{
*pbbFlags = bbFlags;
}
return retExprVal;
}
//------------------------------------------------------------------------------
// OperRequiresAsgFlag : Check whether the operation requires GTF_ASG flag regardless
// of the children's flags.
//
bool GenTree::OperRequiresAsgFlag()
{
if (OperIs(GT_ASG, GT_STORE_DYN_BLK) ||
OperIs(GT_XADD, GT_XORR, GT_XAND, GT_XCHG, GT_LOCKADD, GT_CMPXCHG, GT_MEMORYBARRIER))
{
return true;
}
#ifdef FEATURE_HW_INTRINSICS
if (gtOper == GT_HWINTRINSIC)
{
GenTreeHWIntrinsic* hwIntrinsicNode = this->AsHWIntrinsic();
if (hwIntrinsicNode->OperIsMemoryStore())
{
// A MemoryStore operation is an assignment
return true;
}
}
#endif // FEATURE_HW_INTRINSICS
if (gtOper == GT_CALL)
{
// If the call has return buffer argument, it produced a definition and hence
// should be marked with assignment.
return AsCall()->GetLclRetBufArgNode() != nullptr;
}
return false;
}
//------------------------------------------------------------------------------
// OperRequiresCallFlag : Check whether the operation requires GTF_CALL flag regardless
// of the children's flags.
//
bool GenTree::OperRequiresCallFlag(Compiler* comp)
{
switch (gtOper)
{
case GT_CALL:
return true;
case GT_KEEPALIVE:
return true;
case GT_INTRINSIC:
return comp->IsIntrinsicImplementedByUserCall(this->AsIntrinsic()->gtIntrinsicName);
#if FEATURE_FIXED_OUT_ARGS && !defined(TARGET_64BIT)
case GT_LSH:
case GT_RSH:
case GT_RSZ:
// Variable shifts of a long end up being helper calls, so mark the tree as such in morph.
// This is potentially too conservative, since they'll get treated as having side effects.
// It is important to mark them as calls so if they are part of an argument list,
// they will get sorted and processed properly (for example, it is important to handle
// all nested calls before putting struct arguments in the argument registers). We
// could mark the trees just before argument processing, but it would require a full
// tree walk of the argument tree, so we just do it when morphing, instead, even though we'll
// mark non-argument trees (that will still get converted to calls, anyway).
return (this->TypeGet() == TYP_LONG) && (gtGetOp2()->OperGet() != GT_CNS_INT);
#endif // FEATURE_FIXED_OUT_ARGS && !TARGET_64BIT
default:
return false;
}
}
//------------------------------------------------------------------------------
// OperIsImplicitIndir : Check whether the operation contains an implicit
// indirection.
// Arguments:
// this - a GenTree node
//
// Return Value:
// True if the given node contains an implicit indirection
//
// Note that for the [HW]INTRINSIC nodes we have to examine the
// details of the node to determine its result.
//
bool GenTree::OperIsImplicitIndir() const
{
switch (gtOper)
{
case GT_LOCKADD:
case GT_XORR:
case GT_XAND:
case GT_XADD:
case GT_XCHG:
case GT_CMPXCHG:
case GT_BLK:
case GT_OBJ:
case GT_STORE_BLK:
case GT_STORE_OBJ:
case GT_STORE_DYN_BLK:
case GT_BOX:
case GT_ARR_INDEX:
case GT_ARR_ELEM:
case GT_ARR_OFFSET:
return true;
case GT_INTRINSIC:
return AsIntrinsic()->gtIntrinsicName == NI_System_Object_GetType;
#ifdef FEATURE_SIMD
case GT_SIMD:
{
return AsSIMD()->OperIsMemoryLoad();
}
#endif // FEATURE_SIMD
#ifdef FEATURE_HW_INTRINSICS
case GT_HWINTRINSIC:
{
return AsHWIntrinsic()->OperIsMemoryLoadOrStore();
}
#endif // FEATURE_HW_INTRINSICS
default:
return false;
}
}
//------------------------------------------------------------------------------
// OperMayThrow : Check whether the operation may throw.
//
//
// Arguments:
// comp - Compiler instance
//
// Return Value:
// True if the given operator may cause an exception
bool GenTree::OperMayThrow(Compiler* comp)
{
GenTree* op;
switch (gtOper)
{
case GT_MOD:
case GT_DIV:
case GT_UMOD:
case GT_UDIV:
/* Division with a non-zero, non-minus-one constant does not throw an exception */
op = AsOp()->gtOp2;
if (varTypeIsFloating(op->TypeGet()))
{
return false; // Floating point division does not throw.
}
// For integers only division by 0 or by -1 can throw
if (op->IsIntegralConst() && !op->IsIntegralConst(0) && !op->IsIntegralConst(-1))
{
return false;
}
return true;
case GT_INTRINSIC:
// If this is an intrinsic that represents the object.GetType(), it can throw an NullReferenceException.
// Currently, this is the only intrinsic that can throw an exception.
return AsIntrinsic()->gtIntrinsicName == NI_System_Object_GetType;
case GT_CALL:
CorInfoHelpFunc helper;
helper = comp->eeGetHelperNum(this->AsCall()->gtCallMethHnd);
return ((helper == CORINFO_HELP_UNDEF) || !comp->s_helperCallProperties.NoThrow(helper));
case GT_IND:
case GT_BLK:
case GT_OBJ:
case GT_NULLCHECK:
case GT_STORE_BLK:
case GT_STORE_DYN_BLK:
return (((this->gtFlags & GTF_IND_NONFAULTING) == 0) && comp->fgAddrCouldBeNull(this->AsIndir()->Addr()));
case GT_ARR_LENGTH:
return (((this->gtFlags & GTF_IND_NONFAULTING) == 0) &&
comp->fgAddrCouldBeNull(this->AsArrLen()->ArrRef()));
case GT_ARR_ELEM:
return comp->fgAddrCouldBeNull(this->AsArrElem()->gtArrObj);
case GT_FIELD:
{
GenTree* fldObj = this->AsField()->GetFldObj();
if (fldObj != nullptr)
{
return comp->fgAddrCouldBeNull(fldObj);
}
return false;
}
case GT_BOUNDS_CHECK:
case GT_ARR_INDEX:
case GT_ARR_OFFSET:
case GT_LCLHEAP:
case GT_CKFINITE:
case GT_INDEX_ADDR:
return true;
#ifdef FEATURE_HW_INTRINSICS
case GT_HWINTRINSIC:
{
GenTreeHWIntrinsic* hwIntrinsicNode = this->AsHWIntrinsic();
assert(hwIntrinsicNode != nullptr);
if (hwIntrinsicNode->OperIsMemoryLoadOrStore())
{
// This operation contains an implicit indirection
// it could throw a null reference exception.
//
return true;
}
break;
}
#endif // FEATURE_HW_INTRINSICS
default:
break;
}
/* Overflow arithmetic operations also throw exceptions */
if (gtOverflowEx())
{
return true;
}
return false;
}
//-----------------------------------------------------------------------------------
// GetFieldCount: Return the register count for a multi-reg lclVar.
//
// Arguments:
// compiler - the current Compiler instance.
//
// Return Value:
// Returns the number of registers defined by this node.
//
// Notes:
// This must be a multireg lclVar.
//
unsigned int GenTreeLclVar::GetFieldCount(Compiler* compiler) const
{
assert(IsMultiReg());
LclVarDsc* varDsc = compiler->lvaGetDesc(GetLclNum());
return varDsc->lvFieldCnt;
}
//-----------------------------------------------------------------------------------
// GetFieldTypeByIndex: Get a specific register's type, based on regIndex, that is produced
// by this multi-reg node.
//
// Arguments:
// compiler - the current Compiler instance.
// idx - which register type to return.
//
// Return Value:
// The register type assigned to this index for this node.
//
// Notes:
// This must be a multireg lclVar and 'regIndex' must be a valid index for this node.
//
var_types GenTreeLclVar::GetFieldTypeByIndex(Compiler* compiler, unsigned idx)
{
assert(IsMultiReg());
LclVarDsc* varDsc = compiler->lvaGetDesc(GetLclNum());
LclVarDsc* fieldVarDsc = compiler->lvaGetDesc(varDsc->lvFieldLclStart + idx);
assert(fieldVarDsc->TypeGet() != TYP_STRUCT); // Don't expect struct fields.
return fieldVarDsc->TypeGet();
}
#if DEBUGGABLE_GENTREE
// static
GenTree::VtablePtr GenTree::s_vtablesForOpers[] = {nullptr};
GenTree::VtablePtr GenTree::s_vtableForOp = nullptr;
GenTree::VtablePtr GenTree::GetVtableForOper(genTreeOps oper)
{
noway_assert(oper < GT_COUNT);
// First, check a cache.
if (s_vtablesForOpers[oper] != nullptr)
{
return s_vtablesForOpers[oper];
}
// Otherwise, look up the correct vtable entry. Note that we want the most derived GenTree subtype
// for an oper. E.g., GT_LCL_VAR is defined in GTSTRUCT_3 as GenTreeLclVar and in GTSTRUCT_N as
// GenTreeLclVarCommon. We want the GenTreeLclVar vtable, since nothing should actually be
// instantiated as a GenTreeLclVarCommon.
VtablePtr res = nullptr;
switch (oper)
{
// clang-format off
#define GTSTRUCT_0(nm, tag) /*handle explicitly*/
#define GTSTRUCT_1(nm, tag) \
case tag: \
{ \
GenTree##nm gt; \
res = *reinterpret_cast<VtablePtr*>(>); \
} \
break;
#define GTSTRUCT_2(nm, tag, tag2) \
case tag: \
case tag2: \
{ \
GenTree##nm gt; \
res = *reinterpret_cast<VtablePtr*>(>); \
} \
break;
#define GTSTRUCT_3(nm, tag, tag2, tag3) \
case tag: \
case tag2: \
case tag3: \
{ \
GenTree##nm gt; \
res = *reinterpret_cast<VtablePtr*>(>); \
} \
break;
#define GTSTRUCT_4(nm, tag, tag2, tag3, tag4) \
case tag: \
case tag2: \
case tag3: \
case tag4: \
{ \
GenTree##nm gt; \
res = *reinterpret_cast<VtablePtr*>(>); \
} \
break;
#define GTSTRUCT_N(nm, ...) /*handle explicitly*/
#define GTSTRUCT_2_SPECIAL(nm, tag, tag2) /*handle explicitly*/
#define GTSTRUCT_3_SPECIAL(nm, tag, tag2, tag3) /*handle explicitly*/
#include "gtstructs.h"
// clang-format on
// Handle the special cases.
// The following opers are in GTSTRUCT_N but no other place (namely, no subtypes).
case GT_STORE_BLK:
case GT_BLK:
{
GenTreeBlk gt;
res = *reinterpret_cast<VtablePtr*>(>);
}
break;
case GT_IND:
case GT_NULLCHECK:
{
GenTreeIndir gt;
res = *reinterpret_cast<VtablePtr*>(>);
}
break;
// We don't need to handle GTSTRUCT_N for LclVarCommon, since all those allowed opers are specified
// in their proper subtype. Similarly for GenTreeIndir.
default:
{
// Should be unary or binary op.
if (s_vtableForOp == nullptr)
{
unsigned opKind = OperKind(oper);
assert(!IsExOp(opKind));
assert(OperIsSimple(oper) || OperIsLeaf(oper));
// Need to provide non-null operands.
GenTreeIntCon dummyOp(TYP_INT, 0);
GenTreeOp gt(oper, TYP_INT, &dummyOp, ((opKind & GTK_UNOP) ? nullptr : &dummyOp));
s_vtableForOp = *reinterpret_cast<VtablePtr*>(>);
}
res = s_vtableForOp;
break;
}
}
s_vtablesForOpers[oper] = res;
return res;
}
void GenTree::SetVtableForOper(genTreeOps oper)
{
*reinterpret_cast<VtablePtr*>(this) = GetVtableForOper(oper);
}
#endif // DEBUGGABLE_GENTREE
GenTree* Compiler::gtNewOperNode(genTreeOps oper, var_types type, GenTree* op1, GenTree* op2)
{
assert(op1 != nullptr);
assert(op2 != nullptr);
// We should not be allocating nodes that extend GenTreeOp with this;
// should call the appropriate constructor for the extended type.
assert(!GenTree::IsExOp(GenTree::OperKind(oper)));
GenTree* node = new (this, oper) GenTreeOp(oper, type, op1, op2);
return node;
}
GenTreeColon* Compiler::gtNewColonNode(var_types type, GenTree* elseNode, GenTree* thenNode)
{
return new (this, GT_COLON) GenTreeColon(TYP_INT, elseNode, thenNode);
}
GenTreeQmark* Compiler::gtNewQmarkNode(var_types type, GenTree* cond, GenTreeColon* colon)
{
compQmarkUsed = true;
GenTreeQmark* result = new (this, GT_QMARK) GenTreeQmark(type, cond, colon);
#ifdef DEBUG
if (compQmarkRationalized)
{
fgCheckQmarkAllowedForm(result);
}
#endif
return result;
}
GenTreeIntCon* Compiler::gtNewIconNode(ssize_t value, var_types type)
{
return new (this, GT_CNS_INT) GenTreeIntCon(type, value);
}
GenTreeIntCon* Compiler::gtNewNull()
{
return gtNewIconNode(0, TYP_REF);
}
GenTreeIntCon* Compiler::gtNewTrue()
{
return gtNewIconNode(1, TYP_INT);
}
GenTreeIntCon* Compiler::gtNewFalse()
{
return gtNewIconNode(0, TYP_INT);
}
GenTreeIntCon* Compiler::gtNewIconNode(unsigned fieldOffset, FieldSeqNode* fieldSeq)
{
GenTreeIntCon* node = new (this, GT_CNS_INT) GenTreeIntCon(TYP_I_IMPL, static_cast<ssize_t>(fieldOffset));
node->gtFieldSeq = fieldSeq == nullptr ? FieldSeqStore::NotAField() : fieldSeq;
return node;
}
// return a new node representing the value in a physical register
GenTree* Compiler::gtNewPhysRegNode(regNumber reg, var_types type)
{
assert(genIsValidIntReg(reg) || (reg == REG_SPBASE));
GenTree* result = new (this, GT_PHYSREG) GenTreePhysReg(reg, type);
return result;
}
GenTree* Compiler::gtNewJmpTableNode()
{
return new (this, GT_JMPTABLE) GenTree(GT_JMPTABLE, TYP_I_IMPL);
}
/*****************************************************************************
*
* Converts an annotated token into an icon flags (so that we will later be
* able to tell the type of the handle that will be embedded in the icon
* node)
*/
GenTreeFlags Compiler::gtTokenToIconFlags(unsigned token)
{
GenTreeFlags flags = GTF_EMPTY;
switch (TypeFromToken(token))
{
case mdtTypeRef:
case mdtTypeDef:
case mdtTypeSpec:
flags = GTF_ICON_CLASS_HDL;
break;
case mdtMethodDef:
flags = GTF_ICON_METHOD_HDL;
break;
case mdtFieldDef:
flags = GTF_ICON_FIELD_HDL;
break;
default:
flags = GTF_ICON_TOKEN_HDL;
break;
}
return flags;
}
//-----------------------------------------------------------------------------------------
// gtNewIndOfIconHandleNode: Creates an indirection GenTree node of a constant handle
//
// Arguments:
// indType - The type returned by the indirection node
// addr - The constant address to read from
// iconFlags - The GTF_ICON flag value that specifies the kind of handle that we have
// isInvariant - The indNode should also be marked as invariant
//
// Return Value:
// Returns a GT_IND node representing value at the address provided by 'value'
//
// Notes:
// The GT_IND node is marked as non-faulting
// If the indType is GT_REF we also mark the indNode as GTF_GLOB_REF
//
GenTree* Compiler::gtNewIndOfIconHandleNode(var_types indType, size_t addr, GenTreeFlags iconFlags, bool isInvariant)
{
GenTree* addrNode = gtNewIconHandleNode(addr, iconFlags);
GenTree* indNode = gtNewOperNode(GT_IND, indType, addrNode);
// This indirection won't cause an exception.
//
indNode->gtFlags |= GTF_IND_NONFAULTING;
if (isInvariant)
{
assert(iconFlags != GTF_ICON_STATIC_HDL); // Pointer to a mutable class Static variable
assert(iconFlags != GTF_ICON_BBC_PTR); // Pointer to a mutable basic block count value
assert(iconFlags != GTF_ICON_GLOBAL_PTR); // Pointer to mutable data from the VM state
// This indirection also is invariant.
indNode->gtFlags |= GTF_IND_INVARIANT;
if (iconFlags == GTF_ICON_STR_HDL)
{
// String literals are never null
indNode->gtFlags |= GTF_IND_NONNULL;
}
}
else
{
// GLOB_REF needs to be set for indirections returning values from mutable
// locations, so that e. g. args sorting does not reorder them with calls.
indNode->gtFlags |= GTF_GLOB_REF;
}
return indNode;
}
/*****************************************************************************
*
* Allocates a integer constant entry that represents a HANDLE to something.
* It may not be allowed to embed HANDLEs directly into the JITed code (for eg,
* as arguments to JIT helpers). Get a corresponding value that can be embedded.
* If the handle needs to be accessed via an indirection, pValue points to it.
*/
GenTree* Compiler::gtNewIconEmbHndNode(void* value, void* pValue, GenTreeFlags iconFlags, void* compileTimeHandle)
{
GenTree* iconNode;
GenTree* handleNode;
if (value != nullptr)
{
// When 'value' is non-null, pValue is required to be null
assert(pValue == nullptr);
// use 'value' to construct an integer constant node
iconNode = gtNewIconHandleNode((size_t)value, iconFlags);
// 'value' is the handle
handleNode = iconNode;
}
else
{
// When 'value' is null, pValue is required to be non-null
assert(pValue != nullptr);
// use 'pValue' to construct an integer constant node
iconNode = gtNewIconHandleNode((size_t)pValue, iconFlags);
// 'pValue' is an address of a location that contains the handle
// construct the indirection of 'pValue'
handleNode = gtNewOperNode(GT_IND, TYP_I_IMPL, iconNode);
// This indirection won't cause an exception.
handleNode->gtFlags |= GTF_IND_NONFAULTING;
// This indirection also is invariant.
handleNode->gtFlags |= GTF_IND_INVARIANT;
}
iconNode->AsIntCon()->gtCompileTimeHandle = (size_t)compileTimeHandle;
return handleNode;
}
/*****************************************************************************/
GenTree* Compiler::gtNewStringLiteralNode(InfoAccessType iat, void* pValue)
{
GenTree* tree = nullptr;
switch (iat)
{
case IAT_VALUE:
setMethodHasFrozenString();
tree = gtNewIconEmbHndNode(pValue, nullptr, GTF_ICON_STR_HDL, nullptr);
tree->gtType = TYP_REF;
#ifdef DEBUG
tree->AsIntCon()->gtTargetHandle = (size_t)pValue;
#endif
break;
case IAT_PVALUE: // The value needs to be accessed via an indirection
// Create an indirection
tree = gtNewIndOfIconHandleNode(TYP_REF, (size_t)pValue, GTF_ICON_STR_HDL, true);
#ifdef DEBUG
tree->gtGetOp1()->AsIntCon()->gtTargetHandle = (size_t)pValue;
#endif
break;
case IAT_PPVALUE: // The value needs to be accessed via a double indirection
// Create the first indirection
tree = gtNewIndOfIconHandleNode(TYP_I_IMPL, (size_t)pValue, GTF_ICON_CONST_PTR, true);
#ifdef DEBUG
tree->gtGetOp1()->AsIntCon()->gtTargetHandle = (size_t)pValue;
#endif
// Create the second indirection
tree = gtNewOperNode(GT_IND, TYP_REF, tree);
// This indirection won't cause an exception.
tree->gtFlags |= GTF_IND_NONFAULTING;
// This indirection points into the gloabal heap (it is String Object)
tree->gtFlags |= GTF_GLOB_REF;
break;
default:
noway_assert(!"Unexpected InfoAccessType");
}
return tree;
}
//------------------------------------------------------------------------
// gtNewStringLiteralLength: create GenTreeIntCon node for the given string
// literal to store its length.
//
// Arguments:
// node - string literal node.
//
// Return Value:
// GenTreeIntCon node with string's length as a value or null.
//
GenTreeIntCon* Compiler::gtNewStringLiteralLength(GenTreeStrCon* node)
{
if (node->IsStringEmptyField())
{
JITDUMP("Folded String.Empty.Length to 0\n");
return gtNewIconNode(0);
}
int length = -1;
const char16_t* str = info.compCompHnd->getStringLiteral(node->gtScpHnd, node->gtSconCPX, &length);
if (length >= 0)
{
GenTreeIntCon* iconNode = gtNewIconNode(length);
// str can be NULL for dynamic context
if (str != nullptr)
{
JITDUMP("Folded '\"%ws\".Length' to '%d'\n", str, length)
}
else
{
JITDUMP("Folded 'CNS_STR.Length' to '%d'\n", length)
}
return iconNode;
}
return nullptr;
}
/*****************************************************************************/
GenTree* Compiler::gtNewLconNode(__int64 value)
{
#ifdef TARGET_64BIT
GenTree* node = new (this, GT_CNS_INT) GenTreeIntCon(TYP_LONG, value);
#else
GenTree* node = new (this, GT_CNS_LNG) GenTreeLngCon(value);
#endif
return node;
}
GenTree* Compiler::gtNewDconNode(double value, var_types type)
{
GenTree* node = new (this, GT_CNS_DBL) GenTreeDblCon(value, type);
return node;
}
GenTree* Compiler::gtNewSconNode(int CPX, CORINFO_MODULE_HANDLE scpHandle)
{
// 'GT_CNS_STR' nodes later get transformed into 'GT_CALL'
assert(GenTree::s_gtNodeSizes[GT_CALL] > GenTree::s_gtNodeSizes[GT_CNS_STR]);
GenTree* node = new (this, GT_CALL) GenTreeStrCon(CPX, scpHandle DEBUGARG(/*largeNode*/ true));
return node;
}
GenTree* Compiler::gtNewZeroConNode(var_types type)
{
GenTree* zero;
switch (type)
{
case TYP_INT:
zero = gtNewIconNode(0);
break;
case TYP_BYREF:
FALLTHROUGH;
case TYP_REF:
zero = gtNewIconNode(0);
zero->gtType = type;
break;
case TYP_LONG:
zero = gtNewLconNode(0);
break;
case TYP_FLOAT:
zero = gtNewDconNode(0.0);
zero->gtType = type;
break;
case TYP_DOUBLE:
zero = gtNewDconNode(0.0);
break;
default:
noway_assert(!"Bad type in gtNewZeroConNode");
zero = nullptr;
break;
}
return zero;
}
GenTree* Compiler::gtNewOneConNode(var_types type)
{
GenTree* one;
switch (type)
{
case TYP_INT:
case TYP_UINT:
one = gtNewIconNode(1);
break;
case TYP_LONG:
case TYP_ULONG:
one = gtNewLconNode(1);
break;
case TYP_FLOAT:
case TYP_DOUBLE:
one = gtNewDconNode(1.0);
one->gtType = type;
break;
default:
noway_assert(!"Bad type in gtNewOneConNode");
one = nullptr;
break;
}
return one;
}
GenTreeLclVar* Compiler::gtNewStoreLclVar(unsigned dstLclNum, GenTree* src)
{
GenTreeLclVar* store = new (this, GT_STORE_LCL_VAR) GenTreeLclVar(GT_STORE_LCL_VAR, src->TypeGet(), dstLclNum);
store->gtOp1 = src;
store->gtFlags = (src->gtFlags & GTF_COMMON_MASK);
store->gtFlags |= GTF_VAR_DEF | GTF_ASG;
return store;
}
#ifdef FEATURE_SIMD
//---------------------------------------------------------------------
// gtNewSIMDVectorZero: create a GT_SIMD node for Vector<T>.Zero
//
// Arguments:
// simdType - simd vector type
// simdBaseJitType - element type of vector
// simdSize - size of vector in bytes
GenTree* Compiler::gtNewSIMDVectorZero(var_types simdType, CorInfoType simdBaseJitType, unsigned simdSize)
{
var_types simdBaseType = genActualType(JitType2PreciseVarType(simdBaseJitType));
GenTree* initVal = gtNewZeroConNode(simdBaseType);
initVal->gtType = simdBaseType;
return gtNewSIMDNode(simdType, initVal, SIMDIntrinsicInit, simdBaseJitType, simdSize);
}
#endif // FEATURE_SIMD
GenTreeCall* Compiler::gtNewIndCallNode(GenTree* addr, var_types type, GenTreeCall::Use* args, const DebugInfo& di)
{
return gtNewCallNode(CT_INDIRECT, (CORINFO_METHOD_HANDLE)addr, type, args, di);
}
GenTreeCall* Compiler::gtNewCallNode(
gtCallTypes callType, CORINFO_METHOD_HANDLE callHnd, var_types type, GenTreeCall::Use* args, const DebugInfo& di)
{
GenTreeCall* node = new (this, GT_CALL) GenTreeCall(genActualType(type));
node->gtFlags |= (GTF_CALL | GTF_GLOB_REF);
#ifdef UNIX_X86_ABI
if (callType == CT_INDIRECT || callType == CT_HELPER)
node->gtFlags |= GTF_CALL_POP_ARGS;
#endif // UNIX_X86_ABI
for (GenTreeCall::Use& use : GenTreeCall::UseList(args))
{
node->gtFlags |= (use.GetNode()->gtFlags & GTF_ALL_EFFECT);
}
node->gtCallType = callType;
node->gtCallMethHnd = callHnd;
node->gtCallArgs = args;
node->gtCallThisArg = nullptr;
node->fgArgInfo = nullptr;
INDEBUG(node->callSig = nullptr;)
node->tailCallInfo = nullptr;
node->gtRetClsHnd = nullptr;
node->gtControlExpr = nullptr;
node->gtCallMoreFlags = GTF_CALL_M_EMPTY;
if (callType == CT_INDIRECT)
{
node->gtCallCookie = nullptr;
}
else
{
node->gtInlineCandidateInfo = nullptr;
}
node->gtCallLateArgs = nullptr;
node->gtReturnType = type;
#ifdef FEATURE_READYTORUN
node->gtEntryPoint.addr = nullptr;
node->gtEntryPoint.accessType = IAT_VALUE;
#endif
#if defined(DEBUG) || defined(INLINE_DATA)
// These get updated after call node is built.
node->gtInlineObservation = InlineObservation::CALLEE_UNUSED_INITIAL;
node->gtRawILOffset = BAD_IL_OFFSET;
node->gtInlineContext = compInlineContext;
#endif
// Spec: Managed Retval sequence points needs to be generated while generating debug info for debuggable code.
//
// Implementation note: if not generating MRV info genCallSite2ILOffsetMap will be NULL and
// codegen will pass DebugInfo() to emitter, which will cause emitter
// not to emit IP mapping entry.
if (opts.compDbgCode && opts.compDbgInfo && di.IsValid())
{
// Managed Retval - IL offset of the call. This offset is used to emit a
// CALL_INSTRUCTION type sequence point while emitting corresponding native call.
//
// TODO-Cleanup:
// a) (Opt) We need not store this offset if the method doesn't return a
// value. Rather it can be made BAD_IL_OFFSET to prevent a sequence
// point being emitted.
//
// b) (Opt) Add new sequence points only if requested by debugger through
// a new boundary type - ICorDebugInfo::BoundaryTypes
if (genCallSite2DebugInfoMap == nullptr)
{
genCallSite2DebugInfoMap = new (getAllocator()) CallSiteDebugInfoTable(getAllocator());
}
// Make sure that there are no duplicate entries for a given call node
assert(!genCallSite2DebugInfoMap->Lookup(node));
genCallSite2DebugInfoMap->Set(node, di);
}
// Initialize gtOtherRegs
node->ClearOtherRegs();
// Initialize spill flags of gtOtherRegs
node->ClearOtherRegFlags();
#if !defined(TARGET_64BIT)
if (varTypeIsLong(node))
{
assert(node->gtReturnType == node->gtType);
// Initialize Return type descriptor of call node
node->InitializeLongReturnType();
}
#endif // !defined(TARGET_64BIT)
return node;
}
GenTreeLclVar* Compiler::gtNewLclvNode(unsigned lnum, var_types type DEBUGARG(IL_OFFSET offs))
{
assert(type != TYP_VOID);
// We need to ensure that all struct values are normalized.
// It might be nice to assert this in general, but we have assignments of int to long.
if (varTypeIsStruct(type))
{
// Make an exception for implicit by-ref parameters during global morph, since
// their lvType has been updated to byref but their appearances have not yet all
// been rewritten and so may have struct type still.
LclVarDsc* varDsc = lvaGetDesc(lnum);
bool simd12ToSimd16Widening = false;
#if FEATURE_SIMD
// We can additionally have a SIMD12 that was widened to a SIMD16, generally as part of lowering
simd12ToSimd16Widening = (type == TYP_SIMD16) && (varDsc->lvType == TYP_SIMD12);
#endif
assert((type == varDsc->lvType) || simd12ToSimd16Widening ||
(lvaIsImplicitByRefLocal(lnum) && fgGlobalMorph && (varDsc->lvType == TYP_BYREF)));
}
GenTreeLclVar* node = new (this, GT_LCL_VAR) GenTreeLclVar(GT_LCL_VAR, type, lnum DEBUGARG(offs));
/* Cannot have this assert because the inliner uses this function
* to add temporaries */
// assert(lnum < lvaCount);
return node;
}
GenTreeLclVar* Compiler::gtNewLclLNode(unsigned lnum, var_types type DEBUGARG(IL_OFFSET offs))
{
// We need to ensure that all struct values are normalized.
// It might be nice to assert this in general, but we have assignments of int to long.
if (varTypeIsStruct(type))
{
// Make an exception for implicit by-ref parameters during global morph, since
// their lvType has been updated to byref but their appearances have not yet all
// been rewritten and so may have struct type still.
assert(type == lvaTable[lnum].lvType ||
(lvaIsImplicitByRefLocal(lnum) && fgGlobalMorph && (lvaTable[lnum].lvType == TYP_BYREF)));
}
// This local variable node may later get transformed into a large node
assert(GenTree::s_gtNodeSizes[LargeOpOpcode()] > GenTree::s_gtNodeSizes[GT_LCL_VAR]);
GenTreeLclVar* node =
new (this, LargeOpOpcode()) GenTreeLclVar(GT_LCL_VAR, type, lnum DEBUGARG(offs) DEBUGARG(/*largeNode*/ true));
return node;
}
GenTreeLclVar* Compiler::gtNewLclVarAddrNode(unsigned lclNum, var_types type)
{
GenTreeLclVar* node = new (this, GT_LCL_VAR_ADDR) GenTreeLclVar(GT_LCL_VAR_ADDR, type, lclNum);
return node;
}
GenTreeLclFld* Compiler::gtNewLclFldAddrNode(unsigned lclNum, unsigned lclOffs, FieldSeqNode* fieldSeq, var_types type)
{
GenTreeLclFld* node = new (this, GT_LCL_FLD_ADDR) GenTreeLclFld(GT_LCL_FLD_ADDR, type, lclNum, lclOffs);
node->SetFieldSeq(fieldSeq == nullptr ? FieldSeqStore::NotAField() : fieldSeq);
return node;
}
GenTreeLclFld* Compiler::gtNewLclFldNode(unsigned lnum, var_types type, unsigned offset)
{
GenTreeLclFld* node = new (this, GT_LCL_FLD) GenTreeLclFld(GT_LCL_FLD, type, lnum, offset);
/* Cannot have this assert because the inliner uses this function
* to add temporaries */
// assert(lnum < lvaCount);
node->SetFieldSeq(FieldSeqStore::NotAField());
return node;
}
GenTree* Compiler::gtNewInlineCandidateReturnExpr(GenTree* inlineCandidate, var_types type, BasicBlockFlags bbFlags)
{
assert(GenTree::s_gtNodeSizes[GT_RET_EXPR] == TREE_NODE_SZ_LARGE);
GenTreeRetExpr* node = new (this, GT_RET_EXPR) GenTreeRetExpr(type);
node->gtInlineCandidate = inlineCandidate;
node->bbFlags = bbFlags;
if (varTypeIsStruct(inlineCandidate) && !inlineCandidate->OperIsBlkOp())
{
node->gtRetClsHnd = gtGetStructHandle(inlineCandidate);
}
// GT_RET_EXPR node eventually might be bashed back to GT_CALL (when inlining is aborted for example).
// Therefore it should carry the GTF_CALL flag so that all the rules about spilling can apply to it as well.
// For example, impImportLeave or CEE_POP need to spill GT_RET_EXPR before empty the evaluation stack.
node->gtFlags |= GTF_CALL;
return node;
}
GenTreeCall::Use* Compiler::gtPrependNewCallArg(GenTree* node, GenTreeCall::Use* args)
{
return new (this, CMK_ASTNode) GenTreeCall::Use(node, args);
}
GenTreeCall::Use* Compiler::gtInsertNewCallArgAfter(GenTree* node, GenTreeCall::Use* after)
{
after->SetNext(new (this, CMK_ASTNode) GenTreeCall::Use(node, after->GetNext()));
return after->GetNext();
}
GenTreeCall::Use* Compiler::gtNewCallArgs(GenTree* node)
{
return new (this, CMK_ASTNode) GenTreeCall::Use(node);
}
GenTreeCall::Use* Compiler::gtNewCallArgs(GenTree* node1, GenTree* node2)
{
return new (this, CMK_ASTNode) GenTreeCall::Use(node1, gtNewCallArgs(node2));
}
GenTreeCall::Use* Compiler::gtNewCallArgs(GenTree* node1, GenTree* node2, GenTree* node3)
{
return new (this, CMK_ASTNode) GenTreeCall::Use(node1, gtNewCallArgs(node2, node3));
}
GenTreeCall::Use* Compiler::gtNewCallArgs(GenTree* node1, GenTree* node2, GenTree* node3, GenTree* node4)
{
return new (this, CMK_ASTNode) GenTreeCall::Use(node1, gtNewCallArgs(node2, node3, node4));
}
/*****************************************************************************
*
* Given a GT_CALL node, access the fgArgInfo and find the entry
* that has the matching argNum and return the fgArgTableEntryPtr
*/
fgArgTabEntry* Compiler::gtArgEntryByArgNum(GenTreeCall* call, unsigned argNum)
{
fgArgInfo* argInfo = call->fgArgInfo;
noway_assert(argInfo != nullptr);
return argInfo->GetArgEntry(argNum);
}
/*****************************************************************************
*
* Given a GT_CALL node, access the fgArgInfo and find the entry
* that has the matching node and return the fgArgTableEntryPtr
*/
fgArgTabEntry* Compiler::gtArgEntryByNode(GenTreeCall* call, GenTree* node)
{
fgArgInfo* argInfo = call->fgArgInfo;
noway_assert(argInfo != nullptr);
unsigned argCount = argInfo->ArgCount();
fgArgTabEntry** argTable = argInfo->ArgTable();
fgArgTabEntry* curArgTabEntry = nullptr;
for (unsigned i = 0; i < argCount; i++)
{
curArgTabEntry = argTable[i];
if (curArgTabEntry->GetNode() == node)
{
return curArgTabEntry;
}
else if (curArgTabEntry->use->GetNode() == node)
{
return curArgTabEntry;
}
}
noway_assert(!"gtArgEntryByNode: node not found");
return nullptr;
}
/*****************************************************************************
*
* Find and return the entry with the given "lateArgInx". Requires that one is found
* (asserts this).
*/
fgArgTabEntry* Compiler::gtArgEntryByLateArgIndex(GenTreeCall* call, unsigned lateArgInx)
{
fgArgInfo* argInfo = call->fgArgInfo;
noway_assert(argInfo != nullptr);
assert(lateArgInx != UINT_MAX);
unsigned argCount = argInfo->ArgCount();
fgArgTabEntry** argTable = argInfo->ArgTable();
fgArgTabEntry* curArgTabEntry = nullptr;
for (unsigned i = 0; i < argCount; i++)
{
curArgTabEntry = argTable[i];
if (curArgTabEntry->isLateArg() && curArgTabEntry->GetLateArgInx() == lateArgInx)
{
return curArgTabEntry;
}
}
noway_assert(!"gtArgEntryByNode: node not found");
return nullptr;
}
//------------------------------------------------------------------------
// gtArgNodeByLateArgInx: Given a call instruction, find the argument with the given
// late arg index (i.e. the given position in the gtCallLateArgs list).
// Arguments:
// call - the call node
// lateArgInx - the index into the late args list
//
// Return value:
// The late argument node.
//
GenTree* Compiler::gtArgNodeByLateArgInx(GenTreeCall* call, unsigned lateArgInx)
{
GenTree* argx = nullptr;
unsigned regIndex = 0;
for (GenTreeCall::Use& use : call->LateArgs())
{
argx = use.GetNode();
assert(!argx->IsArgPlaceHolderNode()); // No placeholder nodes are in gtCallLateArgs;
if (regIndex == lateArgInx)
{
break;
}
regIndex++;
}
noway_assert(argx != nullptr);
return argx;
}
/*****************************************************************************
*
* Create a node that will assign 'src' to 'dst'.
*/
GenTreeOp* Compiler::gtNewAssignNode(GenTree* dst, GenTree* src)
{
assert(!src->TypeIs(TYP_VOID));
/* Mark the target as being assigned */
if ((dst->gtOper == GT_LCL_VAR) || (dst->OperGet() == GT_LCL_FLD))
{
dst->gtFlags |= GTF_VAR_DEF;
if (dst->IsPartialLclFld(this))
{
// We treat these partial writes as combined uses and defs.
dst->gtFlags |= GTF_VAR_USEASG;
}
}
dst->gtFlags |= GTF_DONT_CSE;
#if defined(FEATURE_SIMD) && !defined(TARGET_X86)
// TODO-CQ: x86 Windows supports multi-reg returns but not SIMD multi-reg returns
if (varTypeIsSIMD(dst->gtType))
{
// We want to track SIMD assignments as being intrinsics since they
// are functionally SIMD `mov` instructions and are more efficient
// when we don't promote, particularly when it occurs due to inlining
SetOpLclRelatedToSIMDIntrinsic(dst);
SetOpLclRelatedToSIMDIntrinsic(src);
}
#endif // FEATURE_SIMD
/* Create the assignment node */
GenTreeOp* asg = gtNewOperNode(GT_ASG, dst->TypeGet(), dst, src)->AsOp();
/* Mark the expression as containing an assignment */
asg->gtFlags |= GTF_ASG;
return asg;
}
//------------------------------------------------------------------------
// gtNewObjNode: Creates a new Obj node.
//
// Arguments:
// structHnd - The class handle of the struct type.
// addr - The address of the struct.
//
// Return Value:
// Returns a node representing the struct value at the given address.
//
GenTreeObj* Compiler::gtNewObjNode(CORINFO_CLASS_HANDLE structHnd, GenTree* addr)
{
var_types nodeType = impNormStructType(structHnd);
assert(varTypeIsStruct(nodeType));
GenTreeObj* objNode = new (this, GT_OBJ) GenTreeObj(nodeType, addr, typGetObjLayout(structHnd));
// An Obj is not a global reference, if it is known to be a local struct.
if ((addr->gtFlags & GTF_GLOB_REF) == 0)
{
GenTreeLclVarCommon* lclNode = addr->IsLocalAddrExpr();
if (lclNode != nullptr)
{
objNode->gtFlags |= GTF_IND_NONFAULTING;
if (!lvaIsImplicitByRefLocal(lclNode->GetLclNum()))
{
objNode->gtFlags &= ~GTF_GLOB_REF;
}
}
}
return objNode;
}
//------------------------------------------------------------------------
// gtSetObjGcInfo: Set the GC info on an object node
//
// Arguments:
// objNode - The object node of interest
void Compiler::gtSetObjGcInfo(GenTreeObj* objNode)
{
assert(varTypeIsStruct(objNode->TypeGet()));
assert(objNode->TypeGet() == impNormStructType(objNode->GetLayout()->GetClassHandle()));
if (!objNode->GetLayout()->HasGCPtr())
{
objNode->SetOper(objNode->OperIs(GT_OBJ) ? GT_BLK : GT_STORE_BLK);
}
}
//------------------------------------------------------------------------
// gtNewStructVal: Return a node that represents a struct value
//
// Arguments:
// structHnd - The class for the struct
// addr - The address of the struct
//
// Return Value:
// A block, object or local node that represents the struct value pointed to by 'addr'.
GenTree* Compiler::gtNewStructVal(CORINFO_CLASS_HANDLE structHnd, GenTree* addr)
{
if (addr->gtOper == GT_ADDR)
{
GenTree* val = addr->gtGetOp1();
if (val->OperGet() == GT_LCL_VAR)
{
unsigned lclNum = addr->gtGetOp1()->AsLclVarCommon()->GetLclNum();
LclVarDsc* varDsc = &(lvaTable[lclNum]);
if (varTypeIsStruct(varDsc) && (varDsc->GetStructHnd() == structHnd) && !lvaIsImplicitByRefLocal(lclNum))
{
return addr->gtGetOp1();
}
}
}
return gtNewObjNode(structHnd, addr);
}
//------------------------------------------------------------------------
// gtNewBlockVal: Return a node that represents a possibly untyped block value
//
// Arguments:
// addr - The address of the block
// size - The size of the block
//
// Return Value:
// A block, object or local node that represents the block value pointed to by 'addr'.
GenTree* Compiler::gtNewBlockVal(GenTree* addr, unsigned size)
{
// By default we treat this as an opaque struct type with known size.
var_types blkType = TYP_STRUCT;
if (addr->gtOper == GT_ADDR)
{
GenTree* val = addr->gtGetOp1();
#if FEATURE_SIMD
if (varTypeIsSIMD(val) && (genTypeSize(val) == size))
{
blkType = val->TypeGet();
}
#endif // FEATURE_SIMD
if (varTypeIsStruct(val) && val->OperIs(GT_LCL_VAR))
{
LclVarDsc* varDsc = lvaGetDesc(val->AsLclVarCommon());
unsigned varSize = varTypeIsStruct(varDsc) ? varDsc->lvExactSize : genTypeSize(varDsc);
if (varSize == size)
{
return val;
}
}
}
return new (this, GT_BLK) GenTreeBlk(GT_BLK, blkType, addr, typGetBlkLayout(size));
}
// Creates a new assignment node for a CpObj.
// Parameters (exactly the same as MSIL CpObj):
//
// dstAddr - The target to copy the struct to
// srcAddr - The source to copy the struct from
// structHnd - A class token that represents the type of object being copied. May be null
// if FEATURE_SIMD is enabled and the source has a SIMD type.
// isVolatile - Is this marked as volatile memory?
GenTree* Compiler::gtNewCpObjNode(GenTree* dstAddr, GenTree* srcAddr, CORINFO_CLASS_HANDLE structHnd, bool isVolatile)
{
GenTree* lhs = gtNewStructVal(structHnd, dstAddr);
GenTree* src = nullptr;
if (lhs->OperIs(GT_OBJ))
{
GenTreeObj* lhsObj = lhs->AsObj();
#if DEBUG
// Codegen for CpObj assumes that we cannot have a struct with GC pointers whose size is not a multiple
// of the register size. The EE currently does not allow this to ensure that GC pointers are aligned
// if the struct is stored in an array. Note that this restriction doesn't apply to stack-allocated objects:
// they are never stored in arrays. We should never get to this method with stack-allocated objects since they
// are never copied so we don't need to exclude them from the assert below.
// Let's assert it just to be safe.
ClassLayout* layout = lhsObj->GetLayout();
unsigned size = layout->GetSize();
assert((layout->GetGCPtrCount() == 0) || (roundUp(size, REGSIZE_BYTES) == size));
#endif
gtSetObjGcInfo(lhsObj);
}
if (srcAddr->OperGet() == GT_ADDR)
{
src = srcAddr->AsOp()->gtOp1;
}
else
{
src = gtNewOperNode(GT_IND, lhs->TypeGet(), srcAddr);
}
GenTree* result = gtNewBlkOpNode(lhs, src, isVolatile, true);
return result;
}
//------------------------------------------------------------------------
// FixupInitBlkValue: Fixup the init value for an initBlk operation
//
// Arguments:
// asgType - The type of assignment that the initBlk is being transformed into
//
// Return Value:
// Modifies the constant value on this node to be the appropriate "fill"
// value for the initblk.
//
// Notes:
// The initBlk MSIL instruction takes a byte value, which must be
// extended to the size of the assignment when an initBlk is transformed
// to an assignment of a primitive type.
// This performs the appropriate extension.
void GenTreeIntCon::FixupInitBlkValue(var_types asgType)
{
assert(varTypeIsIntegralOrI(asgType));
unsigned size = genTypeSize(asgType);
if (size > 1)
{
size_t cns = gtIconVal;
cns = cns & 0xFF;
cns |= cns << 8;
if (size >= 4)
{
cns |= cns << 16;
#ifdef TARGET_64BIT
if (size == 8)
{
cns |= cns << 32;
}
#endif // TARGET_64BIT
// Make the type match for evaluation types.
gtType = asgType;
// if we are initializing a GC type the value being assigned must be zero (null).
assert(!varTypeIsGC(asgType) || (cns == 0));
}
gtIconVal = cns;
}
}
//----------------------------------------------------------------------------
// UsesDivideByConstOptimized:
// returns true if rationalize will use the division by constant
// optimization for this node.
//
// Arguments:
// this - a GenTreeOp node
// comp - the compiler instance
//
// Return Value:
// Return true iff the node is a GT_DIV,GT_UDIV, GT_MOD or GT_UMOD with
// an integer constant and we can perform the division operation using
// a reciprocal multiply or a shift operation.
//
bool GenTreeOp::UsesDivideByConstOptimized(Compiler* comp)
{
if (!comp->opts.OptimizationEnabled())
{
return false;
}
if (!OperIs(GT_DIV, GT_MOD, GT_UDIV, GT_UMOD))
{
return false;
}
#if defined(TARGET_ARM64)
if (OperIs(GT_MOD, GT_UMOD))
{
// MOD, UMOD not supported for ARM64
return false;
}
#endif // TARGET_ARM64
bool isSignedDivide = OperIs(GT_DIV, GT_MOD);
GenTree* dividend = gtGetOp1()->gtEffectiveVal(/*commaOnly*/ true);
GenTree* divisor = gtGetOp2()->gtEffectiveVal(/*commaOnly*/ true);
#if !defined(TARGET_64BIT)
if (dividend->OperIs(GT_LONG))
{
return false;
}
#endif
if (dividend->IsCnsIntOrI())
{
// We shouldn't see a divmod with constant operands here but if we do then it's likely
// because optimizations are disabled or it's a case that's supposed to throw an exception.
// Don't optimize this.
return false;
}
ssize_t divisorValue;
if (divisor->IsCnsIntOrI())
{
divisorValue = static_cast<ssize_t>(divisor->AsIntCon()->IconValue());
}
else
{
ValueNum vn = divisor->gtVNPair.GetLiberal();
if (comp->vnStore->IsVNConstant(vn))
{
divisorValue = comp->vnStore->CoercedConstantValue<ssize_t>(vn);
}
else
{
return false;
}
}
const var_types divType = TypeGet();
if (divisorValue == 0)
{
// x / 0 and x % 0 can't be optimized because they are required to throw an exception.
return false;
}
else if (isSignedDivide)
{
if (divisorValue == -1)
{
// x / -1 can't be optimized because INT_MIN / -1 is required to throw an exception.
return false;
}
else if (isPow2(divisorValue))
{
return true;
}
}
else // unsigned divide
{
if (divType == TYP_INT)
{
// Clear up the upper 32 bits of the value, they may be set to 1 because constants
// are treated as signed and stored in ssize_t which is 64 bit in size on 64 bit targets.
divisorValue &= UINT32_MAX;
}
size_t unsignedDivisorValue = (size_t)divisorValue;
if (isPow2(unsignedDivisorValue))
{
return true;
}
}
const bool isDiv = OperIs(GT_DIV, GT_UDIV);
if (isDiv)
{
if (isSignedDivide)
{
// If the divisor is the minimum representable integer value then the result is either 0 or 1
if ((divType == TYP_INT && divisorValue == INT_MIN) || (divType == TYP_LONG && divisorValue == INT64_MIN))
{
return true;
}
}
else
{
// If the divisor is greater or equal than 2^(N - 1) then the result is either 0 or 1
if (((divType == TYP_INT) && ((UINT32)divisorValue > (UINT32_MAX / 2))) ||
((divType == TYP_LONG) && ((UINT64)divisorValue > (UINT64_MAX / 2))))
{
return true;
}
}
}
// TODO-ARM-CQ: Currently there's no GT_MULHI for ARM32
#if defined(TARGET_XARCH) || defined(TARGET_ARM64)
if (!comp->opts.MinOpts() && ((divisorValue >= 3) || !isSignedDivide))
{
// All checks pass we can perform the division operation using a reciprocal multiply.
return true;
}
#endif
return false;
}
//------------------------------------------------------------------------
// CheckDivideByConstOptimized:
// Checks if we can use the division by constant optimization
// on this node
// and if so sets the flag GTF_DIV_BY_CNS_OPT and
// set GTF_DONT_CSE on the constant node
//
// Arguments:
// this - a GenTreeOp node
// comp - the compiler instance
//
void GenTreeOp::CheckDivideByConstOptimized(Compiler* comp)
{
if (UsesDivideByConstOptimized(comp))
{
gtFlags |= GTF_DIV_BY_CNS_OPT;
// Now set DONT_CSE on the GT_CNS_INT divisor, note that
// with ValueNumbering we can have a non GT_CNS_INT divisior
GenTree* divisor = gtGetOp2()->gtEffectiveVal(/*commaOnly*/ true);
if (divisor->OperIs(GT_CNS_INT))
{
divisor->gtFlags |= GTF_DONT_CSE;
}
}
}
//
//------------------------------------------------------------------------
// gtBlockOpInit: Initializes a BlkOp GenTree
//
// Arguments:
// result - an assignment node that is to be initialized.
// dst - the target (destination) we want to either initialize or copy to.
// src - the init value for InitBlk or the source struct for CpBlk/CpObj.
// isVolatile - specifies whether this node is a volatile memory operation.
//
// Assumptions:
// 'result' is an assignment that is newly constructed.
// If 'dst' is TYP_STRUCT, then it must be a block node or lclVar.
//
// Notes:
// This procedure centralizes all the logic to both enforce proper structure and
// to properly construct any InitBlk/CpBlk node.
void Compiler::gtBlockOpInit(GenTree* result, GenTree* dst, GenTree* srcOrFillVal, bool isVolatile)
{
if (!result->OperIsBlkOp())
{
assert(dst->TypeGet() != TYP_STRUCT);
return;
}
/* In the case of CpBlk, we want to avoid generating
* nodes where the source and destination are the same
* because of two reasons, first, is useless, second
* it introduces issues in liveness and also copying
* memory from an overlapping memory location is
* undefined both as per the ECMA standard and also
* the memcpy semantics specify that.
*
* NOTE: In this case we'll only detect the case for addr of a local
* and a local itself, any other complex expressions won't be
* caught.
*
* TODO-Cleanup: though having this logic is goodness (i.e. avoids self-assignment
* of struct vars very early), it was added because fgInterBlockLocalVarLiveness()
* isn't handling self-assignment of struct variables correctly. This issue may not
* surface if struct promotion is ON (which is the case on x86/arm). But still the
* fundamental issue exists that needs to be addressed.
*/
if (result->OperIsCopyBlkOp())
{
GenTree* currSrc = srcOrFillVal;
GenTree* currDst = dst;
if (currSrc->OperIsBlk() && (currSrc->AsBlk()->Addr()->OperGet() == GT_ADDR))
{
currSrc = currSrc->AsBlk()->Addr()->gtGetOp1();
}
if (currDst->OperIsBlk() && (currDst->AsBlk()->Addr()->OperGet() == GT_ADDR))
{
currDst = currDst->AsBlk()->Addr()->gtGetOp1();
}
if (currSrc->OperGet() == GT_LCL_VAR && currDst->OperGet() == GT_LCL_VAR &&
currSrc->AsLclVarCommon()->GetLclNum() == currDst->AsLclVarCommon()->GetLclNum())
{
// Make this a NOP
// TODO-Cleanup: probably doesn't matter, but could do this earlier and avoid creating a GT_ASG
result->gtBashToNOP();
return;
}
}
// Propagate all effect flags from children
result->gtFlags |= dst->gtFlags & GTF_ALL_EFFECT;
result->gtFlags |= result->AsOp()->gtOp2->gtFlags & GTF_ALL_EFFECT;
result->gtFlags |= (dst->gtFlags & GTF_EXCEPT) | (srcOrFillVal->gtFlags & GTF_EXCEPT);
if (isVolatile)
{
result->gtFlags |= GTF_BLK_VOLATILE;
}
#ifdef FEATURE_SIMD
if (result->OperIsCopyBlkOp() && varTypeIsSIMD(srcOrFillVal))
{
// If the source is a GT_SIMD node of SIMD type, then the dst lclvar struct
// should be labeled as simd intrinsic related struct.
// This is done so that the morpher can transform any field accesses into
// intrinsics, thus avoiding conflicting access methods (fields vs. whole-register).
GenTree* src = srcOrFillVal;
if (src->OperIsIndir() && (src->AsIndir()->Addr()->OperGet() == GT_ADDR))
{
src = src->AsIndir()->Addr()->gtGetOp1();
}
#ifdef FEATURE_HW_INTRINSICS
if ((src->OperGet() == GT_SIMD) || (src->OperGet() == GT_HWINTRINSIC))
#else
if (src->OperGet() == GT_SIMD)
#endif // FEATURE_HW_INTRINSICS
{
if (dst->OperIsBlk() && (dst->AsIndir()->Addr()->OperGet() == GT_ADDR))
{
dst = dst->AsIndir()->Addr()->gtGetOp1();
}
if (dst->OperIsLocal() && varTypeIsStruct(dst))
{
setLclRelatedToSIMDIntrinsic(dst);
}
}
}
#endif // FEATURE_SIMD
}
//------------------------------------------------------------------------
// gtNewBlkOpNode: Creates a GenTree for a block (struct) assignment.
//
// Arguments:
// dst - The destination node: local var / block node.
// srcOrFillVall - The value to assign for CopyBlk, the integer "fill" for InitBlk
// isVolatile - Whether this is a volatile memory operation or not.
// isCopyBlock - True if this is a block copy (rather than a block init).
//
// Return Value:
// Returns the newly constructed and initialized block operation.
//
GenTree* Compiler::gtNewBlkOpNode(GenTree* dst, GenTree* srcOrFillVal, bool isVolatile, bool isCopyBlock)
{
assert(dst->OperIsBlk() || dst->OperIsLocal());
if (isCopyBlock)
{
if (srcOrFillVal->OperIsIndir() && (srcOrFillVal->gtGetOp1()->gtOper == GT_ADDR))
{
srcOrFillVal = srcOrFillVal->gtGetOp1()->gtGetOp1();
}
}
else
{
// InitBlk
assert(varTypeIsIntegral(srcOrFillVal));
if (varTypeIsStruct(dst))
{
if (!srcOrFillVal->IsIntegralConst(0))
{
srcOrFillVal = gtNewOperNode(GT_INIT_VAL, TYP_INT, srcOrFillVal);
}
}
}
GenTree* result = gtNewAssignNode(dst, srcOrFillVal);
gtBlockOpInit(result, dst, srcOrFillVal, isVolatile);
return result;
}
//------------------------------------------------------------------------
// gtNewPutArgReg: Creates a new PutArgReg node.
//
// Arguments:
// type - The actual type of the argument
// arg - The argument node
// argReg - The register that the argument will be passed in
//
// Return Value:
// Returns the newly created PutArgReg node.
//
// Notes:
// The node is generated as GenTreeMultiRegOp on RyuJIT/armel, GenTreeOp on all the other archs.
//
GenTree* Compiler::gtNewPutArgReg(var_types type, GenTree* arg, regNumber argReg)
{
assert(arg != nullptr);
GenTree* node = nullptr;
#if defined(TARGET_ARM)
// A PUTARG_REG could be a MultiRegOp on arm since we could move a double register to two int registers.
node = new (this, GT_PUTARG_REG) GenTreeMultiRegOp(GT_PUTARG_REG, type, arg, nullptr);
if (type == TYP_LONG)
{
node->AsMultiRegOp()->gtOtherReg = REG_NEXT(argReg);
}
#else
node = gtNewOperNode(GT_PUTARG_REG, type, arg);
#endif
node->SetRegNum(argReg);
return node;
}
//------------------------------------------------------------------------
// gtNewBitCastNode: Creates a new BitCast node.
//
// Arguments:
// type - The actual type of the argument
// arg - The argument node
// argReg - The register that the argument will be passed in
//
// Return Value:
// Returns the newly created BitCast node.
//
// Notes:
// The node is generated as GenTreeMultiRegOp on RyuJIT/arm, as GenTreeOp on all the other archs.
//
GenTree* Compiler::gtNewBitCastNode(var_types type, GenTree* arg)
{
assert(arg != nullptr);
assert(type != TYP_STRUCT);
GenTree* node = nullptr;
#if defined(TARGET_ARM)
// A BITCAST could be a MultiRegOp on arm since we could move a double register to two int registers.
node = new (this, GT_BITCAST) GenTreeMultiRegOp(GT_BITCAST, type, arg, nullptr);
#else
node = gtNewOperNode(GT_BITCAST, type, arg);
#endif
return node;
}
//------------------------------------------------------------------------
// gtNewAllocObjNode: Helper to create an object allocation node.
//
// Arguments:
// pResolvedToken - Resolved token for the object being allocated
// useParent - true iff the token represents a child of the object's class
//
// Return Value:
// Returns GT_ALLOCOBJ node that will be later morphed into an
// allocation helper call or local variable allocation on the stack.
//
// Node creation can fail for inlinees when the type described by pResolvedToken
// can't be represented in jitted code. If this happens, this method will return
// nullptr.
//
GenTreeAllocObj* Compiler::gtNewAllocObjNode(CORINFO_RESOLVED_TOKEN* pResolvedToken, bool useParent)
{
const bool mustRestoreHandle = true;
bool* const pRuntimeLookup = nullptr;
bool usingReadyToRunHelper = false;
CorInfoHelpFunc helper = CORINFO_HELP_UNDEF;
GenTree* opHandle = impTokenToHandle(pResolvedToken, pRuntimeLookup, mustRestoreHandle, useParent);
#ifdef FEATURE_READYTORUN
CORINFO_CONST_LOOKUP lookup = {};
if (opts.IsReadyToRun())
{
helper = CORINFO_HELP_READYTORUN_NEW;
CORINFO_LOOKUP_KIND* const pGenericLookupKind = nullptr;
usingReadyToRunHelper =
info.compCompHnd->getReadyToRunHelper(pResolvedToken, pGenericLookupKind, helper, &lookup);
}
#endif
if (!usingReadyToRunHelper)
{
if (opHandle == nullptr)
{
// We must be backing out of an inline.
assert(compDonotInline());
return nullptr;
}
}
bool helperHasSideEffects;
CorInfoHelpFunc helperTemp =
info.compCompHnd->getNewHelper(pResolvedToken, info.compMethodHnd, &helperHasSideEffects);
if (!usingReadyToRunHelper)
{
helper = helperTemp;
}
// TODO: ReadyToRun: When generic dictionary lookups are necessary, replace the lookup call
// and the newfast call with a single call to a dynamic R2R cell that will:
// 1) Load the context
// 2) Perform the generic dictionary lookup and caching, and generate the appropriate stub
// 3) Allocate and return the new object for boxing
// Reason: performance (today, we'll always use the slow helper for the R2R generics case)
GenTreeAllocObj* allocObj =
gtNewAllocObjNode(helper, helperHasSideEffects, pResolvedToken->hClass, TYP_REF, opHandle);
#ifdef FEATURE_READYTORUN
if (usingReadyToRunHelper)
{
assert(lookup.addr != nullptr);
allocObj->gtEntryPoint = lookup;
}
#endif
return allocObj;
}
/*****************************************************************************
*
* Clones the given tree value and returns a copy of the given tree.
* If 'complexOK' is false, the cloning is only done provided the tree
* is not too complex (whatever that may mean);
* If 'complexOK' is true, we try slightly harder to clone the tree.
* In either case, NULL is returned if the tree cannot be cloned
*
* Note that there is the function gtCloneExpr() which does a more
* complete job if you can't handle this function failing.
*/
GenTree* Compiler::gtClone(GenTree* tree, bool complexOK)
{
GenTree* copy;
switch (tree->gtOper)
{
case GT_CNS_INT:
#if defined(LATE_DISASM)
if (tree->IsIconHandle())
{
copy = gtNewIconHandleNode(tree->AsIntCon()->gtIconVal, tree->gtFlags, tree->AsIntCon()->gtFieldSeq);
copy->AsIntCon()->gtCompileTimeHandle = tree->AsIntCon()->gtCompileTimeHandle;
copy->gtType = tree->gtType;
}
else
#endif
{
copy = new (this, GT_CNS_INT)
GenTreeIntCon(tree->gtType, tree->AsIntCon()->gtIconVal, tree->AsIntCon()->gtFieldSeq);
copy->AsIntCon()->gtCompileTimeHandle = tree->AsIntCon()->gtCompileTimeHandle;
}
break;
case GT_CNS_LNG:
copy = gtNewLconNode(tree->AsLngCon()->gtLconVal);
break;
case GT_LCL_VAR:
// Remember that the LclVar node has been cloned. The flag will be set
// on 'copy' as well.
tree->gtFlags |= GTF_VAR_CLONED;
copy = gtNewLclvNode(tree->AsLclVarCommon()->GetLclNum(),
tree->gtType DEBUGARG(tree->AsLclVar()->gtLclILoffs));
break;
case GT_LCL_FLD:
case GT_LCL_FLD_ADDR:
// Remember that the LclVar node has been cloned. The flag will be set
// on 'copy' as well.
tree->gtFlags |= GTF_VAR_CLONED;
copy = new (this, tree->OperGet())
GenTreeLclFld(tree->OperGet(), tree->TypeGet(), tree->AsLclFld()->GetLclNum(),
tree->AsLclFld()->GetLclOffs());
copy->AsLclFld()->SetFieldSeq(tree->AsLclFld()->GetFieldSeq());
break;
case GT_CLS_VAR:
copy = new (this, GT_CLS_VAR)
GenTreeClsVar(tree->gtType, tree->AsClsVar()->gtClsVarHnd, tree->AsClsVar()->gtFieldSeq);
break;
default:
if (!complexOK)
{
return nullptr;
}
if (tree->gtOper == GT_FIELD)
{
GenTree* objp = nullptr;
if (tree->AsField()->GetFldObj() != nullptr)
{
objp = gtClone(tree->AsField()->GetFldObj(), false);
if (objp == nullptr)
{
return nullptr;
}
}
copy = gtNewFieldRef(tree->TypeGet(), tree->AsField()->gtFldHnd, objp, tree->AsField()->gtFldOffset);
copy->AsField()->gtFldMayOverlap = tree->AsField()->gtFldMayOverlap;
#ifdef FEATURE_READYTORUN
copy->AsField()->gtFieldLookup = tree->AsField()->gtFieldLookup;
#endif
}
else if (tree->OperIs(GT_ADD, GT_SUB))
{
GenTree* op1 = tree->AsOp()->gtOp1;
GenTree* op2 = tree->AsOp()->gtOp2;
if (op1->OperIsLeaf() && op2->OperIsLeaf())
{
op1 = gtClone(op1);
if (op1 == nullptr)
{
return nullptr;
}
op2 = gtClone(op2);
if (op2 == nullptr)
{
return nullptr;
}
copy = gtNewOperNode(tree->OperGet(), tree->TypeGet(), op1, op2);
}
else
{
return nullptr;
}
}
else if (tree->gtOper == GT_ADDR)
{
GenTree* op1 = gtClone(tree->AsOp()->gtOp1);
if (op1 == nullptr)
{
return nullptr;
}
copy = gtNewOperNode(GT_ADDR, tree->TypeGet(), op1);
}
else
{
return nullptr;
}
break;
}
copy->gtFlags |= tree->gtFlags & ~GTF_NODE_MASK;
#if defined(DEBUG)
copy->gtDebugFlags |= tree->gtDebugFlags & ~GTF_DEBUG_NODE_MASK;
#endif // defined(DEBUG)
return copy;
}
//------------------------------------------------------------------------
// gtCloneExpr: Create a copy of `tree`, adding flags `addFlags`, mapping
// local `varNum` to int constant `varVal` if it appears at
// the root, and mapping uses of local `deepVarNum` to constant
// `deepVarVal` if they occur beyond the root.
//
// Arguments:
// tree - GenTree to create a copy of
// addFlags - GTF_* flags to add to the copied tree nodes
// varNum - lclNum to replace at the root, or ~0 for no root replacement
// varVal - If replacing at root, replace local `varNum` with IntCns `varVal`
// deepVarNum - lclNum to replace uses of beyond the root, or ~0 for no replacement
// deepVarVal - If replacing beyond root, replace `deepVarNum` with IntCns `deepVarVal`
//
// Return Value:
// A copy of the given tree with the replacements and added flags specified.
//
// Notes:
// Top-level callers should generally call the overload that doesn't have
// the explicit `deepVarNum` and `deepVarVal` parameters; those are used in
// recursive invocations to avoid replacing defs.
GenTree* Compiler::gtCloneExpr(
GenTree* tree, GenTreeFlags addFlags, unsigned varNum, int varVal, unsigned deepVarNum, int deepVarVal)
{
if (tree == nullptr)
{
return nullptr;
}
/* Figure out what kind of a node we have */
genTreeOps oper = tree->OperGet();
unsigned kind = tree->OperKind();
GenTree* copy;
/* Is this a leaf node? */
if (kind & GTK_LEAF)
{
switch (oper)
{
case GT_CNS_INT:
#if defined(LATE_DISASM)
if (tree->IsIconHandle())
{
copy =
gtNewIconHandleNode(tree->AsIntCon()->gtIconVal, tree->gtFlags, tree->AsIntCon()->gtFieldSeq);
copy->AsIntCon()->gtCompileTimeHandle = tree->AsIntCon()->gtCompileTimeHandle;
copy->gtType = tree->gtType;
}
else
#endif
{
copy = gtNewIconNode(tree->AsIntCon()->gtIconVal, tree->gtType);
#ifdef DEBUG
copy->AsIntCon()->gtTargetHandle = tree->AsIntCon()->gtTargetHandle;
#endif
copy->AsIntCon()->gtCompileTimeHandle = tree->AsIntCon()->gtCompileTimeHandle;
copy->AsIntCon()->gtFieldSeq = tree->AsIntCon()->gtFieldSeq;
}
goto DONE;
case GT_CNS_LNG:
copy = gtNewLconNode(tree->AsLngCon()->gtLconVal);
goto DONE;
case GT_CNS_DBL:
copy = gtNewDconNode(tree->AsDblCon()->gtDconVal);
copy->gtType = tree->gtType; // keep the same type
goto DONE;
case GT_CNS_STR:
copy = gtNewSconNode(tree->AsStrCon()->gtSconCPX, tree->AsStrCon()->gtScpHnd);
goto DONE;
case GT_LCL_VAR:
if (tree->AsLclVarCommon()->GetLclNum() == varNum)
{
copy = gtNewIconNode(varVal, tree->gtType);
if (tree->gtFlags & GTF_VAR_ARR_INDEX)
{
copy->LabelIndex(this);
}
}
else
{
// Remember that the LclVar node has been cloned. The flag will
// be set on 'copy' as well.
tree->gtFlags |= GTF_VAR_CLONED;
copy = gtNewLclvNode(tree->AsLclVar()->GetLclNum(),
tree->gtType DEBUGARG(tree->AsLclVar()->gtLclILoffs));
copy->AsLclVarCommon()->SetSsaNum(tree->AsLclVarCommon()->GetSsaNum());
}
goto DONE;
case GT_LCL_FLD:
if (tree->AsLclFld()->GetLclNum() == varNum)
{
IMPL_LIMITATION("replacing GT_LCL_FLD with a constant");
}
else
{
// Remember that the LclVar node has been cloned. The flag will
// be set on 'copy' as well.
tree->gtFlags |= GTF_VAR_CLONED;
copy =
new (this, GT_LCL_FLD) GenTreeLclFld(GT_LCL_FLD, tree->TypeGet(), tree->AsLclFld()->GetLclNum(),
tree->AsLclFld()->GetLclOffs());
copy->AsLclFld()->SetFieldSeq(tree->AsLclFld()->GetFieldSeq());
copy->gtFlags = tree->gtFlags;
}
goto DONE;
case GT_CLS_VAR:
copy = new (this, GT_CLS_VAR)
GenTreeClsVar(tree->TypeGet(), tree->AsClsVar()->gtClsVarHnd, tree->AsClsVar()->gtFieldSeq);
goto DONE;
case GT_RET_EXPR:
// GT_RET_EXPR is unique node, that contains a link to a gtInlineCandidate node,
// that is part of another statement. We cannot clone both here and cannot
// create another GT_RET_EXPR that points to the same gtInlineCandidate.
NO_WAY("Cloning of GT_RET_EXPR node not supported");
goto DONE;
case GT_MEMORYBARRIER:
copy = new (this, GT_MEMORYBARRIER) GenTree(GT_MEMORYBARRIER, TYP_VOID);
goto DONE;
case GT_ARGPLACE:
copy = gtNewArgPlaceHolderNode(tree->gtType, tree->AsArgPlace()->gtArgPlaceClsHnd);
goto DONE;
case GT_FTN_ADDR:
copy = new (this, oper) GenTreeFptrVal(tree->gtType, tree->AsFptrVal()->gtFptrMethod);
#ifdef FEATURE_READYTORUN
copy->AsFptrVal()->gtEntryPoint = tree->AsFptrVal()->gtEntryPoint;
#endif
goto DONE;
case GT_CATCH_ARG:
case GT_NO_OP:
case GT_LABEL:
copy = new (this, oper) GenTree(oper, tree->gtType);
goto DONE;
#if !defined(FEATURE_EH_FUNCLETS)
case GT_END_LFIN:
#endif // !FEATURE_EH_FUNCLETS
case GT_JMP:
copy = new (this, oper) GenTreeVal(oper, tree->gtType, tree->AsVal()->gtVal1);
goto DONE;
case GT_LCL_VAR_ADDR:
copy = new (this, oper) GenTreeLclVar(oper, tree->TypeGet(), tree->AsLclVar()->GetLclNum());
goto DONE;
case GT_LCL_FLD_ADDR:
copy = new (this, oper)
GenTreeLclFld(oper, tree->TypeGet(), tree->AsLclFld()->GetLclNum(), tree->AsLclFld()->GetLclOffs());
copy->AsLclFld()->SetFieldSeq(tree->AsLclFld()->GetFieldSeq());
goto DONE;
default:
NO_WAY("Cloning of node not supported");
goto DONE;
}
}
/* Is it a 'simple' unary/binary operator? */
if (kind & GTK_SMPOP)
{
/* If necessary, make sure we allocate a "fat" tree node */
CLANG_FORMAT_COMMENT_ANCHOR;
switch (oper)
{
/* These nodes sometimes get bashed to "fat" ones */
case GT_MUL:
case GT_DIV:
case GT_MOD:
case GT_UDIV:
case GT_UMOD:
// In the implementation of gtNewLargeOperNode you have
// to give an oper that will create a small node,
// otherwise it asserts.
//
if (GenTree::s_gtNodeSizes[oper] == TREE_NODE_SZ_SMALL)
{
copy = gtNewLargeOperNode(oper, tree->TypeGet(), tree->AsOp()->gtOp1,
tree->OperIsBinary() ? tree->AsOp()->gtOp2 : nullptr);
}
else // Always a large tree
{
if (tree->OperIsBinary())
{
copy = gtNewOperNode(oper, tree->TypeGet(), tree->AsOp()->gtOp1, tree->AsOp()->gtOp2);
}
else
{
copy = gtNewOperNode(oper, tree->TypeGet(), tree->AsOp()->gtOp1);
}
}
break;
case GT_CAST:
copy = new (this, LargeOpOpcode())
GenTreeCast(tree->TypeGet(), tree->AsCast()->CastOp(), tree->IsUnsigned(),
tree->AsCast()->gtCastType DEBUGARG(/*largeNode*/ TRUE));
break;
case GT_INDEX:
{
GenTreeIndex* asInd = tree->AsIndex();
copy = new (this, GT_INDEX)
GenTreeIndex(asInd->TypeGet(), asInd->Arr(), asInd->Index(), asInd->gtIndElemSize);
copy->AsIndex()->gtStructElemClass = asInd->gtStructElemClass;
}
break;
case GT_INDEX_ADDR:
{
GenTreeIndexAddr* asIndAddr = tree->AsIndexAddr();
copy = new (this, GT_INDEX_ADDR)
GenTreeIndexAddr(asIndAddr->Arr(), asIndAddr->Index(), asIndAddr->gtElemType,
asIndAddr->gtStructElemClass, asIndAddr->gtElemSize, asIndAddr->gtLenOffset,
asIndAddr->gtElemOffset);
copy->AsIndexAddr()->gtIndRngFailBB = asIndAddr->gtIndRngFailBB;
}
break;
case GT_ALLOCOBJ:
{
GenTreeAllocObj* asAllocObj = tree->AsAllocObj();
copy = new (this, GT_ALLOCOBJ)
GenTreeAllocObj(tree->TypeGet(), asAllocObj->gtNewHelper, asAllocObj->gtHelperHasSideEffects,
asAllocObj->gtAllocObjClsHnd, asAllocObj->gtOp1);
#ifdef FEATURE_READYTORUN
copy->AsAllocObj()->gtEntryPoint = asAllocObj->gtEntryPoint;
#endif
}
break;
case GT_RUNTIMELOOKUP:
{
GenTreeRuntimeLookup* asRuntimeLookup = tree->AsRuntimeLookup();
copy = new (this, GT_RUNTIMELOOKUP)
GenTreeRuntimeLookup(asRuntimeLookup->gtHnd, asRuntimeLookup->gtHndType, asRuntimeLookup->gtOp1);
}
break;
case GT_ARR_LENGTH:
copy = gtNewArrLen(tree->TypeGet(), tree->AsOp()->gtOp1, tree->AsArrLen()->ArrLenOffset(), nullptr);
break;
case GT_ARR_INDEX:
copy = new (this, GT_ARR_INDEX)
GenTreeArrIndex(tree->TypeGet(),
gtCloneExpr(tree->AsArrIndex()->ArrObj(), addFlags, deepVarNum, deepVarVal),
gtCloneExpr(tree->AsArrIndex()->IndexExpr(), addFlags, deepVarNum, deepVarVal),
tree->AsArrIndex()->gtCurrDim, tree->AsArrIndex()->gtArrRank,
tree->AsArrIndex()->gtArrElemType);
break;
case GT_QMARK:
copy = new (this, GT_QMARK)
GenTreeQmark(tree->TypeGet(), tree->AsOp()->gtGetOp1(), tree->AsOp()->gtGetOp2()->AsColon());
break;
case GT_OBJ:
copy =
new (this, GT_OBJ) GenTreeObj(tree->TypeGet(), tree->AsObj()->Addr(), tree->AsObj()->GetLayout());
break;
case GT_BLK:
copy = new (this, GT_BLK)
GenTreeBlk(GT_BLK, tree->TypeGet(), tree->AsBlk()->Addr(), tree->AsBlk()->GetLayout());
break;
case GT_FIELD:
copy = new (this, GT_FIELD) GenTreeField(tree->TypeGet(), tree->AsField()->GetFldObj(),
tree->AsField()->gtFldHnd, tree->AsField()->gtFldOffset);
copy->AsField()->gtFldMayOverlap = tree->AsField()->gtFldMayOverlap;
#ifdef FEATURE_READYTORUN
copy->AsField()->gtFieldLookup = tree->AsField()->gtFieldLookup;
#endif
break;
case GT_BOX:
copy = new (this, GT_BOX)
GenTreeBox(tree->TypeGet(), tree->AsOp()->gtOp1, tree->AsBox()->gtAsgStmtWhenInlinedBoxValue,
tree->AsBox()->gtCopyStmtWhenInlinedBoxValue);
break;
case GT_INTRINSIC:
copy = new (this, GT_INTRINSIC)
GenTreeIntrinsic(tree->TypeGet(), tree->AsOp()->gtOp1, tree->AsOp()->gtOp2,
tree->AsIntrinsic()->gtIntrinsicName, tree->AsIntrinsic()->gtMethodHandle);
#ifdef FEATURE_READYTORUN
copy->AsIntrinsic()->gtEntryPoint = tree->AsIntrinsic()->gtEntryPoint;
#endif
break;
case GT_BOUNDS_CHECK:
copy = new (this, GT_BOUNDS_CHECK)
GenTreeBoundsChk(tree->AsBoundsChk()->GetIndex(), tree->AsBoundsChk()->GetArrayLength(),
tree->AsBoundsChk()->gtThrowKind);
copy->AsBoundsChk()->gtIndRngFailBB = tree->AsBoundsChk()->gtIndRngFailBB;
break;
case GT_LEA:
{
GenTreeAddrMode* addrModeOp = tree->AsAddrMode();
copy = new (this, GT_LEA)
GenTreeAddrMode(addrModeOp->TypeGet(), addrModeOp->Base(), addrModeOp->Index(), addrModeOp->gtScale,
static_cast<unsigned>(addrModeOp->Offset()));
}
break;
case GT_COPY:
case GT_RELOAD:
{
copy = new (this, oper) GenTreeCopyOrReload(oper, tree->TypeGet(), tree->gtGetOp1());
}
break;
default:
assert(!GenTree::IsExOp(tree->OperKind()) && tree->OperIsSimple());
// We're in the SimpleOp case, so it's always unary or binary.
if (GenTree::OperIsUnary(tree->OperGet()))
{
copy = gtNewOperNode(oper, tree->TypeGet(), tree->AsOp()->gtOp1, /*doSimplifications*/ false);
}
else
{
assert(GenTree::OperIsBinary(tree->OperGet()));
copy = gtNewOperNode(oper, tree->TypeGet(), tree->AsOp()->gtOp1, tree->AsOp()->gtOp2);
}
break;
}
// Some flags are conceptually part of the gtOper, and should be copied immediately.
if (tree->gtOverflowEx())
{
copy->gtFlags |= GTF_OVERFLOW;
}
if (tree->AsOp()->gtOp1)
{
if (tree->gtOper == GT_ASG)
{
// Don't replace varNum if it appears as the LHS of an assign.
copy->AsOp()->gtOp1 = gtCloneExpr(tree->AsOp()->gtOp1, addFlags, -1, 0, deepVarNum, deepVarVal);
}
else
{
copy->AsOp()->gtOp1 = gtCloneExpr(tree->AsOp()->gtOp1, addFlags, deepVarNum, deepVarVal);
}
}
if (tree->gtGetOp2IfPresent())
{
copy->AsOp()->gtOp2 = gtCloneExpr(tree->AsOp()->gtOp2, addFlags, deepVarNum, deepVarVal);
}
/* Flags */
addFlags |= tree->gtFlags;
// Copy any node annotations, if necessary.
switch (tree->gtOper)
{
case GT_STOREIND:
case GT_IND:
case GT_OBJ:
case GT_STORE_OBJ:
{
ArrayInfo arrInfo;
if (!tree->AsIndir()->gtOp1->OperIs(GT_INDEX_ADDR) && TryGetArrayInfo(tree->AsIndir(), &arrInfo))
{
GetArrayInfoMap()->Set(copy, arrInfo);
}
}
break;
default:
break;
}
#ifdef DEBUG
/* GTF_NODE_MASK should not be propagated from 'tree' to 'copy' */
addFlags &= ~GTF_NODE_MASK;
#endif
// Effects flags propagate upwards.
if (copy->AsOp()->gtOp1 != nullptr)
{
copy->gtFlags |= (copy->AsOp()->gtOp1->gtFlags & GTF_ALL_EFFECT);
}
if (copy->gtGetOp2IfPresent() != nullptr)
{
copy->gtFlags |= (copy->gtGetOp2()->gtFlags & GTF_ALL_EFFECT);
}
goto DONE;
}
/* See what kind of a special operator we have here */
switch (oper)
{
case GT_CALL:
// We can't safely clone calls that have GT_RET_EXPRs via gtCloneExpr.
// You must use gtCloneCandidateCall for these calls (and then do appropriate other fixup)
if (tree->AsCall()->IsInlineCandidate() || tree->AsCall()->IsGuardedDevirtualizationCandidate())
{
NO_WAY("Cloning of calls with associated GT_RET_EXPR nodes is not supported");
}
copy = gtCloneExprCallHelper(tree->AsCall(), addFlags, deepVarNum, deepVarVal);
break;
#ifdef FEATURE_SIMD
case GT_SIMD:
copy = new (this, GT_SIMD)
GenTreeSIMD(tree->TypeGet(), IntrinsicNodeBuilder(getAllocator(CMK_ASTNode), tree->AsSIMD()),
tree->AsSIMD()->GetSIMDIntrinsicId(), tree->AsSIMD()->GetSimdBaseJitType(),
tree->AsSIMD()->GetSimdSize());
goto CLONE_MULTIOP_OPERANDS;
#endif
#ifdef FEATURE_HW_INTRINSICS
case GT_HWINTRINSIC:
copy = new (this, GT_HWINTRINSIC)
GenTreeHWIntrinsic(tree->TypeGet(), IntrinsicNodeBuilder(getAllocator(CMK_ASTNode), tree->AsMultiOp()),
tree->AsHWIntrinsic()->GetHWIntrinsicId(),
tree->AsHWIntrinsic()->GetSimdBaseJitType(), tree->AsHWIntrinsic()->GetSimdSize(),
tree->AsHWIntrinsic()->IsSimdAsHWIntrinsic());
copy->AsHWIntrinsic()->SetAuxiliaryJitType(tree->AsHWIntrinsic()->GetAuxiliaryJitType());
goto CLONE_MULTIOP_OPERANDS;
#endif
#if defined(FEATURE_SIMD) || defined(FEATURE_HW_INTRINSICS)
CLONE_MULTIOP_OPERANDS:
for (GenTree** use : copy->AsMultiOp()->UseEdges())
{
*use = gtCloneExpr(*use, addFlags, deepVarNum, deepVarVal);
}
break;
#endif
case GT_ARR_ELEM:
{
GenTreeArrElem* arrElem = tree->AsArrElem();
GenTree* inds[GT_ARR_MAX_RANK];
for (unsigned dim = 0; dim < arrElem->gtArrRank; dim++)
{
inds[dim] = gtCloneExpr(arrElem->gtArrInds[dim], addFlags, deepVarNum, deepVarVal);
}
copy = new (this, GT_ARR_ELEM)
GenTreeArrElem(arrElem->TypeGet(), gtCloneExpr(arrElem->gtArrObj, addFlags, deepVarNum, deepVarVal),
arrElem->gtArrRank, arrElem->gtArrElemSize, arrElem->gtArrElemType, &inds[0]);
}
break;
case GT_ARR_OFFSET:
{
copy = new (this, GT_ARR_OFFSET)
GenTreeArrOffs(tree->TypeGet(),
gtCloneExpr(tree->AsArrOffs()->gtOffset, addFlags, deepVarNum, deepVarVal),
gtCloneExpr(tree->AsArrOffs()->gtIndex, addFlags, deepVarNum, deepVarVal),
gtCloneExpr(tree->AsArrOffs()->gtArrObj, addFlags, deepVarNum, deepVarVal),
tree->AsArrOffs()->gtCurrDim, tree->AsArrOffs()->gtArrRank,
tree->AsArrOffs()->gtArrElemType);
}
break;
case GT_PHI:
{
copy = new (this, GT_PHI) GenTreePhi(tree->TypeGet());
GenTreePhi::Use** prevUse = ©->AsPhi()->gtUses;
for (GenTreePhi::Use& use : tree->AsPhi()->Uses())
{
*prevUse = new (this, CMK_ASTNode)
GenTreePhi::Use(gtCloneExpr(use.GetNode(), addFlags, deepVarNum, deepVarVal), *prevUse);
prevUse = &((*prevUse)->NextRef());
}
}
break;
case GT_FIELD_LIST:
copy = new (this, GT_FIELD_LIST) GenTreeFieldList();
for (GenTreeFieldList::Use& use : tree->AsFieldList()->Uses())
{
copy->AsFieldList()->AddField(this, gtCloneExpr(use.GetNode(), addFlags, deepVarNum, deepVarVal),
use.GetOffset(), use.GetType());
}
break;
case GT_CMPXCHG:
copy = new (this, GT_CMPXCHG)
GenTreeCmpXchg(tree->TypeGet(),
gtCloneExpr(tree->AsCmpXchg()->gtOpLocation, addFlags, deepVarNum, deepVarVal),
gtCloneExpr(tree->AsCmpXchg()->gtOpValue, addFlags, deepVarNum, deepVarVal),
gtCloneExpr(tree->AsCmpXchg()->gtOpComparand, addFlags, deepVarNum, deepVarVal));
break;
case GT_STORE_DYN_BLK:
copy = new (this, oper)
GenTreeStoreDynBlk(gtCloneExpr(tree->AsStoreDynBlk()->Addr(), addFlags, deepVarNum, deepVarVal),
gtCloneExpr(tree->AsStoreDynBlk()->Data(), addFlags, deepVarNum, deepVarVal),
gtCloneExpr(tree->AsStoreDynBlk()->gtDynamicSize, addFlags, deepVarNum, deepVarVal));
break;
default:
#ifdef DEBUG
gtDispTree(tree);
#endif
NO_WAY("unexpected operator");
}
DONE:
// If it has a zero-offset field seq, copy annotation.
if (tree->TypeGet() == TYP_BYREF)
{
FieldSeqNode* fldSeq = nullptr;
if (GetZeroOffsetFieldMap()->Lookup(tree, &fldSeq))
{
fgAddFieldSeqForZeroOffset(copy, fldSeq);
}
}
copy->gtVNPair = tree->gtVNPair; // A cloned tree gets the orginal's Value number pair
/* Compute the flags for the copied node. Note that we can do this only
if we didnt gtFoldExpr(copy) */
if (copy->gtOper == oper)
{
addFlags |= tree->gtFlags;
#ifdef DEBUG
/* GTF_NODE_MASK should not be propagated from 'tree' to 'copy' */
addFlags &= ~GTF_NODE_MASK;
#endif
copy->gtFlags |= addFlags;
// Update side effect flags since they may be different from the source side effect flags.
// For example, we may have replaced some locals with constants and made indirections non-throwing.
gtUpdateNodeSideEffects(copy);
}
/* GTF_COLON_COND should be propagated from 'tree' to 'copy' */
copy->gtFlags |= (tree->gtFlags & GTF_COLON_COND);
#if defined(DEBUG)
// Non-node debug flags should be propagated from 'tree' to 'copy'
copy->gtDebugFlags |= (tree->gtDebugFlags & ~GTF_DEBUG_NODE_MASK);
#endif
/* Make sure to copy back fields that may have been initialized */
copy->CopyRawCosts(tree);
copy->gtRsvdRegs = tree->gtRsvdRegs;
copy->CopyReg(tree);
return copy;
}
//------------------------------------------------------------------------
// gtCloneExprCallHelper: clone a call tree
//
// Notes:
// Do not invoke this method directly, instead call either gtCloneExpr
// or gtCloneCandidateCall, as appropriate.
//
// Arguments:
// tree - the call to clone
// addFlags - GTF_* flags to add to the copied tree nodes
// deepVarNum - lclNum to replace uses of beyond the root, or BAD_VAR_NUM for no replacement
// deepVarVal - If replacing beyond root, replace `deepVarNum` with IntCns `deepVarVal`
//
// Returns:
// Cloned copy of call and all subtrees.
GenTreeCall* Compiler::gtCloneExprCallHelper(GenTreeCall* tree,
GenTreeFlags addFlags,
unsigned deepVarNum,
int deepVarVal)
{
GenTreeCall* copy = new (this, GT_CALL) GenTreeCall(tree->TypeGet());
if (tree->gtCallThisArg == nullptr)
{
copy->gtCallThisArg = nullptr;
}
else
{
copy->gtCallThisArg =
gtNewCallArgs(gtCloneExpr(tree->gtCallThisArg->GetNode(), addFlags, deepVarNum, deepVarVal));
}
copy->gtCallMoreFlags = tree->gtCallMoreFlags;
copy->gtCallArgs = nullptr;
copy->gtCallLateArgs = nullptr;
copy->gtRetBufArg = nullptr;
GenTreeCall::Use** argsTail = ©->gtCallArgs;
for (GenTreeCall::Use& use : tree->Args())
{
GenTree* argNode = use.GetNode();
GenTree* copyArgNode = gtCloneExpr(argNode, addFlags, deepVarNum, deepVarVal);
*argsTail = gtNewCallArgs(copyArgNode);
if (tree->gtRetBufArg == &use)
{
// Set the return buffer arg, if any.
assert(copy->gtRetBufArg == nullptr);
copy->gtRetBufArg = *argsTail;
}
argsTail = &((*argsTail)->NextRef());
}
argsTail = ©->gtCallLateArgs;
for (GenTreeCall::Use& use : tree->LateArgs())
{
GenTree* argNode = use.GetNode();
GenTree* copyArgNode = gtCloneExpr(argNode, addFlags, deepVarNum, deepVarVal);
*argsTail = gtNewCallArgs(copyArgNode);
if (tree->gtRetBufArg == &use)
{
// Set the return buffer arg, if any.
assert(copy->gtRetBufArg == nullptr);
copy->gtRetBufArg = *argsTail;
}
argsTail = &((*argsTail)->NextRef());
}
// Either there was not return buffer for the "tree" or else we successfully set the
// return buffer in the copy.
assert((tree->gtRetBufArg == nullptr) || (copy->gtRetBufArg != nullptr));
// The call sig comes from the EE and doesn't change throughout the compilation process, meaning
// we only really need one physical copy of it. Therefore a shallow pointer copy will suffice.
// (Note that this still holds even if the tree we are cloning was created by an inlinee compiler,
// because the inlinee still uses the inliner's memory allocator anyway.)
INDEBUG(copy->callSig = tree->callSig;)
// The tail call info does not change after it is allocated, so for the same reasons as above
// a shallow copy suffices.
copy->tailCallInfo = tree->tailCallInfo;
copy->gtRetClsHnd = tree->gtRetClsHnd;
copy->gtControlExpr = gtCloneExpr(tree->gtControlExpr, addFlags, deepVarNum, deepVarVal);
copy->gtStubCallStubAddr = tree->gtStubCallStubAddr;
/* Copy the union */
if (tree->gtCallType == CT_INDIRECT)
{
copy->gtCallCookie =
tree->gtCallCookie ? gtCloneExpr(tree->gtCallCookie, addFlags, deepVarNum, deepVarVal) : nullptr;
copy->gtCallAddr = tree->gtCallAddr ? gtCloneExpr(tree->gtCallAddr, addFlags, deepVarNum, deepVarVal) : nullptr;
}
else
{
copy->gtCallMethHnd = tree->gtCallMethHnd;
copy->gtInlineCandidateInfo = tree->gtInlineCandidateInfo;
}
copy->gtCallType = tree->gtCallType;
copy->gtReturnType = tree->gtReturnType;
if (tree->fgArgInfo)
{
// Create and initialize the fgArgInfo for our copy of the call tree
copy->fgArgInfo = new (this, CMK_Unknown) fgArgInfo(copy, tree);
}
else
{
copy->fgArgInfo = nullptr;
}
#if FEATURE_MULTIREG_RET
copy->gtReturnTypeDesc = tree->gtReturnTypeDesc;
#endif
#ifdef FEATURE_READYTORUN
copy->setEntryPoint(tree->gtEntryPoint);
#endif
#if defined(DEBUG) || defined(INLINE_DATA)
copy->gtInlineObservation = tree->gtInlineObservation;
copy->gtRawILOffset = tree->gtRawILOffset;
copy->gtInlineContext = tree->gtInlineContext;
#endif
copy->CopyOtherRegFlags(tree);
// We keep track of the number of no return calls, so if we've cloned
// one of these, update the tracking.
//
if (tree->IsNoReturn())
{
assert(copy->IsNoReturn());
setMethodHasNoReturnCalls();
}
return copy;
}
//------------------------------------------------------------------------
// gtCloneCandidateCall: clone a call that is an inline or guarded
// devirtualization candidate (~ any call that can have a GT_RET_EXPR)
//
// Notes:
// If the call really is a candidate, the caller must take additional steps
// after cloning to re-establish candidate info and the relationship between
// the candidate and any associated GT_RET_EXPR.
//
// Arguments:
// call - the call to clone
//
// Returns:
// Cloned copy of call and all subtrees.
GenTreeCall* Compiler::gtCloneCandidateCall(GenTreeCall* call)
{
assert(call->IsInlineCandidate() || call->IsGuardedDevirtualizationCandidate());
GenTreeCall* result = gtCloneExprCallHelper(call);
// There is some common post-processing in gtCloneExpr that we reproduce
// here, for the fields that make sense for candidate calls.
result->gtFlags |= call->gtFlags;
#if defined(DEBUG)
result->gtDebugFlags |= (call->gtDebugFlags & ~GTF_DEBUG_NODE_MASK);
#endif
result->CopyReg(call);
return result;
}
//------------------------------------------------------------------------
// gtUpdateSideEffects: Update the side effects of a tree and its ancestors
//
// Arguments:
// stmt - The tree's statement
// tree - Tree to update the side effects for
//
// Note: If tree's order hasn't been established, the method updates side effect
// flags on all statement's nodes.
void Compiler::gtUpdateSideEffects(Statement* stmt, GenTree* tree)
{
if (fgStmtListThreaded)
{
gtUpdateTreeAncestorsSideEffects(tree);
}
else
{
gtUpdateStmtSideEffects(stmt);
}
}
//------------------------------------------------------------------------
// gtUpdateTreeAncestorsSideEffects: Update the side effects of a tree and its ancestors
// when statement order has been established.
//
// Arguments:
// tree - Tree to update the side effects for
//
void Compiler::gtUpdateTreeAncestorsSideEffects(GenTree* tree)
{
assert(fgStmtListThreaded);
while (tree != nullptr)
{
gtUpdateNodeSideEffects(tree);
tree = tree->gtGetParent(nullptr);
}
}
//------------------------------------------------------------------------
// gtUpdateStmtSideEffects: Update the side effects for statement tree nodes.
//
// Arguments:
// stmt - The statement to update side effects on
//
void Compiler::gtUpdateStmtSideEffects(Statement* stmt)
{
fgWalkTree(stmt->GetRootNodePointer(), fgUpdateSideEffectsPre, fgUpdateSideEffectsPost);
}
//------------------------------------------------------------------------
// gtUpdateNodeOperSideEffects: Update the side effects based on the node operation.
//
// Arguments:
// tree - Tree to update the side effects on
//
// Notes:
// This method currently only updates GTF_EXCEPT, GTF_ASG, and GTF_CALL flags.
// The other side effect flags may remain unnecessarily (conservatively) set.
// The caller of this method is expected to update the flags based on the children's flags.
//
void Compiler::gtUpdateNodeOperSideEffects(GenTree* tree)
{
if (tree->OperMayThrow(this))
{
tree->gtFlags |= GTF_EXCEPT;
}
else
{
tree->gtFlags &= ~GTF_EXCEPT;
if (tree->OperIsIndirOrArrLength())
{
tree->SetIndirExceptionFlags(this);
}
}
if (tree->OperRequiresAsgFlag())
{
tree->gtFlags |= GTF_ASG;
}
else
{
tree->gtFlags &= ~GTF_ASG;
}
if (tree->OperRequiresCallFlag(this))
{
tree->gtFlags |= GTF_CALL;
}
else
{
tree->gtFlags &= ~GTF_CALL;
}
}
//------------------------------------------------------------------------
// gtUpdateNodeOperSideEffectsPost: Update the side effects based on the node operation,
// in the post-order visit of a tree walk. It is expected that the pre-order visit cleared
// the bits, so the post-order visit only sets them. This is important for binary nodes
// where one child already may have set the GTF_EXCEPT bit. Note that `SetIndirExceptionFlags`
// looks at its child, which is why we need to do this in a bottom-up walk.
//
// Arguments:
// tree - Tree to update the side effects on
//
// Notes:
// This method currently only updates GTF_ASG, GTF_CALL, and GTF_EXCEPT flags.
// The other side effect flags may remain unnecessarily (conservatively) set.
//
void Compiler::gtUpdateNodeOperSideEffectsPost(GenTree* tree)
{
if (tree->OperMayThrow(this))
{
tree->gtFlags |= GTF_EXCEPT;
}
if (tree->OperRequiresAsgFlag())
{
tree->gtFlags |= GTF_ASG;
}
if (tree->OperRequiresCallFlag(this))
{
tree->gtFlags |= GTF_CALL;
}
}
//------------------------------------------------------------------------
// gtUpdateNodeSideEffects: Update the side effects based on the node operation and
// children's side efects.
//
// Arguments:
// tree - Tree to update the side effects on
//
// Notes:
// This method currently only updates GTF_EXCEPT, GTF_ASG, and GTF_CALL flags.
// The other side effect flags may remain unnecessarily (conservatively) set.
//
void Compiler::gtUpdateNodeSideEffects(GenTree* tree)
{
gtUpdateNodeOperSideEffects(tree);
tree->VisitOperands([tree](GenTree* operand) -> GenTree::VisitResult {
tree->gtFlags |= (operand->gtFlags & GTF_ALL_EFFECT);
return GenTree::VisitResult::Continue;
});
}
//------------------------------------------------------------------------
// fgUpdateSideEffectsPre: Update the side effects based on the tree operation.
// The pre-visit walk clears GTF_ASG, GTF_CALL, and GTF_EXCEPT; the post-visit walk sets
// the bits as necessary.
//
// Arguments:
// pTree - Pointer to the tree to update the side effects
// fgWalkPre - Walk data
//
Compiler::fgWalkResult Compiler::fgUpdateSideEffectsPre(GenTree** pTree, fgWalkData* fgWalkPre)
{
GenTree* tree = *pTree;
tree->gtFlags &= ~(GTF_ASG | GTF_CALL | GTF_EXCEPT);
return WALK_CONTINUE;
}
//------------------------------------------------------------------------
// fgUpdateSideEffectsPost: Update the side effects of the node and parent based on the tree's flags.
//
// Arguments:
// pTree - Pointer to the tree
// fgWalkPost - Walk data
//
// Notes:
// The routine is used for updating the stale side effect flags for ancestor
// nodes starting from treeParent up to the top-level stmt expr.
//
Compiler::fgWalkResult Compiler::fgUpdateSideEffectsPost(GenTree** pTree, fgWalkData* fgWalkPost)
{
GenTree* tree = *pTree;
// Update the node's side effects first.
fgWalkPost->compiler->gtUpdateNodeOperSideEffectsPost(tree);
// If this node is an indir or array length, and it doesn't have the GTF_EXCEPT bit set, we
// set the GTF_IND_NONFAULTING bit. This needs to be done after all children, and this node, have
// been processed.
if (tree->OperIsIndirOrArrLength() && ((tree->gtFlags & GTF_EXCEPT) == 0))
{
tree->gtFlags |= GTF_IND_NONFAULTING;
}
// Then update the parent's side effects based on this node.
GenTree* parent = fgWalkPost->parent;
if (parent != nullptr)
{
parent->gtFlags |= (tree->gtFlags & GTF_ALL_EFFECT);
}
return WALK_CONTINUE;
}
//------------------------------------------------------------------------
// gtGetThisArg: Return this pointer node for the call.
//
// Arguments:
// call - the call node with a this argument.
//
// Return value:
// the this pointer node.
//
GenTree* Compiler::gtGetThisArg(GenTreeCall* call)
{
assert(call->gtCallThisArg != nullptr);
GenTree* thisArg = call->gtCallThisArg->GetNode();
if (!thisArg->OperIs(GT_ASG))
{
if ((thisArg->gtFlags & GTF_LATE_ARG) == 0)
{
return thisArg;
}
}
assert(call->gtCallLateArgs != nullptr);
unsigned argNum = 0;
fgArgTabEntry* thisArgTabEntry = gtArgEntryByArgNum(call, argNum);
GenTree* result = thisArgTabEntry->GetNode();
// Assert if we used DEBUG_DESTROY_NODE.
assert(result->gtOper != GT_COUNT);
return result;
}
bool GenTree::gtSetFlags() const
{
//
// When FEATURE_SET_FLAGS (TARGET_ARM) is active the method returns true
// when the gtFlags has the flag GTF_SET_FLAGS set
// otherwise the architecture will be have instructions that typically set
// the flags and this method will return true.
//
// Exceptions: GT_IND (load/store) is not allowed to set the flags
// and on XARCH the GT_MUL/GT_DIV and all overflow instructions
// do not set the condition flags
//
// Precondition we have a GTK_SMPOP
//
if (!varTypeIsIntegralOrI(TypeGet()) && (TypeGet() != TYP_VOID))
{
return false;
}
if (((gtFlags & GTF_SET_FLAGS) != 0) && (gtOper != GT_IND))
{
// GTF_SET_FLAGS is not valid on GT_IND and is overlaid with GTF_NONFAULTING_IND
return true;
}
else
{
return false;
}
}
bool GenTree::gtRequestSetFlags()
{
bool result = false;
#if FEATURE_SET_FLAGS
// This method is a Nop unless FEATURE_SET_FLAGS is defined
// In order to set GTF_SET_FLAGS
// we must have a GTK_SMPOP
// and we have a integer or machine size type (not floating point or TYP_LONG on 32-bit)
//
if (!OperIsSimple())
return false;
if (!varTypeIsIntegralOrI(TypeGet()))
return false;
switch (gtOper)
{
case GT_IND:
case GT_ARR_LENGTH:
// These will turn into simple load from memory instructions
// and we can't force the setting of the flags on load from memory
break;
case GT_MUL:
case GT_DIV:
// These instructions don't set the flags (on x86/x64)
//
break;
default:
// Otherwise we can set the flags for this gtOper
// and codegen must set the condition flags.
//
gtFlags |= GTF_SET_FLAGS;
result = true;
break;
}
#endif // FEATURE_SET_FLAGS
// Codegen for this tree must set the condition flags if
// this method returns true.
//
return result;
}
GenTreeUseEdgeIterator::GenTreeUseEdgeIterator()
: m_advance(nullptr), m_node(nullptr), m_edge(nullptr), m_statePtr(nullptr), m_state(-1)
{
}
GenTreeUseEdgeIterator::GenTreeUseEdgeIterator(GenTree* node)
: m_advance(nullptr), m_node(node), m_edge(nullptr), m_statePtr(nullptr), m_state(0)
{
assert(m_node != nullptr);
// NOTE: the switch statement below must be updated when introducing new nodes.
switch (m_node->OperGet())
{
// Leaf nodes
case GT_LCL_VAR:
case GT_LCL_FLD:
case GT_LCL_VAR_ADDR:
case GT_LCL_FLD_ADDR:
case GT_CATCH_ARG:
case GT_LABEL:
case GT_FTN_ADDR:
case GT_RET_EXPR:
case GT_CNS_INT:
case GT_CNS_LNG:
case GT_CNS_DBL:
case GT_CNS_STR:
case GT_MEMORYBARRIER:
case GT_JMP:
case GT_JCC:
case GT_SETCC:
case GT_NO_OP:
case GT_START_NONGC:
case GT_START_PREEMPTGC:
case GT_PROF_HOOK:
#if !defined(FEATURE_EH_FUNCLETS)
case GT_END_LFIN:
#endif // !FEATURE_EH_FUNCLETS
case GT_PHI_ARG:
case GT_JMPTABLE:
case GT_CLS_VAR:
case GT_CLS_VAR_ADDR:
case GT_ARGPLACE:
case GT_PHYSREG:
case GT_EMITNOP:
case GT_PINVOKE_PROLOG:
case GT_PINVOKE_EPILOG:
case GT_IL_OFFSET:
m_state = -1;
return;
// Standard unary operators
case GT_STORE_LCL_VAR:
case GT_STORE_LCL_FLD:
case GT_NOT:
case GT_NEG:
case GT_COPY:
case GT_RELOAD:
case GT_ARR_LENGTH:
case GT_CAST:
case GT_BITCAST:
case GT_CKFINITE:
case GT_LCLHEAP:
case GT_ADDR:
case GT_IND:
case GT_OBJ:
case GT_BLK:
case GT_BOX:
case GT_ALLOCOBJ:
case GT_RUNTIMELOOKUP:
case GT_INIT_VAL:
case GT_JTRUE:
case GT_SWITCH:
case GT_NULLCHECK:
case GT_PUTARG_REG:
case GT_PUTARG_STK:
case GT_PUTARG_TYPE:
case GT_BSWAP:
case GT_BSWAP16:
case GT_KEEPALIVE:
case GT_INC_SATURATE:
#if FEATURE_ARG_SPLIT
case GT_PUTARG_SPLIT:
#endif // FEATURE_ARG_SPLIT
case GT_RETURNTRAP:
m_edge = &m_node->AsUnOp()->gtOp1;
assert(*m_edge != nullptr);
m_advance = &GenTreeUseEdgeIterator::Terminate;
return;
// Unary operators with an optional operand
case GT_NOP:
case GT_FIELD:
case GT_RETURN:
case GT_RETFILT:
if (m_node->AsUnOp()->gtOp1 == nullptr)
{
assert(m_node->NullOp1Legal());
m_state = -1;
}
else
{
m_edge = &m_node->AsUnOp()->gtOp1;
m_advance = &GenTreeUseEdgeIterator::Terminate;
}
return;
// Variadic nodes
#ifdef FEATURE_SIMD
case GT_SIMD:
#endif
#ifdef FEATURE_HW_INTRINSICS
case GT_HWINTRINSIC:
#endif
#if defined(FEATURE_SIMD) || defined(FEATURE_HW_INTRINSICS)
SetEntryStateForMultiOp();
return;
#endif // defined(FEATURE_SIMD) || defined(FEATURE_HW_INTRINSICS)
// LEA, which may have no first operand
case GT_LEA:
if (m_node->AsAddrMode()->gtOp1 == nullptr)
{
m_edge = &m_node->AsAddrMode()->gtOp2;
m_advance = &GenTreeUseEdgeIterator::Terminate;
}
else
{
SetEntryStateForBinOp();
}
return;
// Special nodes
case GT_FIELD_LIST:
m_statePtr = m_node->AsFieldList()->Uses().GetHead();
m_advance = &GenTreeUseEdgeIterator::AdvanceFieldList;
AdvanceFieldList();
return;
case GT_PHI:
m_statePtr = m_node->AsPhi()->gtUses;
m_advance = &GenTreeUseEdgeIterator::AdvancePhi;
AdvancePhi();
return;
case GT_CMPXCHG:
m_edge = &m_node->AsCmpXchg()->gtOpLocation;
assert(*m_edge != nullptr);
m_advance = &GenTreeUseEdgeIterator::AdvanceCmpXchg;
return;
case GT_ARR_ELEM:
m_edge = &m_node->AsArrElem()->gtArrObj;
assert(*m_edge != nullptr);
m_advance = &GenTreeUseEdgeIterator::AdvanceArrElem;
return;
case GT_ARR_OFFSET:
m_edge = &m_node->AsArrOffs()->gtOffset;
assert(*m_edge != nullptr);
m_advance = &GenTreeUseEdgeIterator::AdvanceArrOffset;
return;
case GT_STORE_DYN_BLK:
m_edge = &m_node->AsStoreDynBlk()->Addr();
assert(*m_edge != nullptr);
m_advance = &GenTreeUseEdgeIterator::AdvanceStoreDynBlk;
return;
case GT_CALL:
AdvanceCall<CALL_INSTANCE>();
return;
// Binary nodes
default:
assert(m_node->OperIsBinary());
SetEntryStateForBinOp();
return;
}
}
//------------------------------------------------------------------------
// GenTreeUseEdgeIterator::AdvanceCmpXchg: produces the next operand of a CmpXchg node and advances the state.
//
void GenTreeUseEdgeIterator::AdvanceCmpXchg()
{
switch (m_state)
{
case 0:
m_edge = &m_node->AsCmpXchg()->gtOpValue;
m_state = 1;
break;
case 1:
m_edge = &m_node->AsCmpXchg()->gtOpComparand;
m_advance = &GenTreeUseEdgeIterator::Terminate;
break;
default:
unreached();
}
assert(*m_edge != nullptr);
}
//------------------------------------------------------------------------
// GenTreeUseEdgeIterator::AdvanceArrElem: produces the next operand of a ArrElem node and advances the state.
//
// Because these nodes are variadic, this function uses `m_state` to index into the list of array indices.
//
void GenTreeUseEdgeIterator::AdvanceArrElem()
{
if (m_state < m_node->AsArrElem()->gtArrRank)
{
m_edge = &m_node->AsArrElem()->gtArrInds[m_state];
assert(*m_edge != nullptr);
m_state++;
}
else
{
m_state = -1;
}
}
//------------------------------------------------------------------------
// GenTreeUseEdgeIterator::AdvanceArrOffset: produces the next operand of a ArrOffset node and advances the state.
//
void GenTreeUseEdgeIterator::AdvanceArrOffset()
{
switch (m_state)
{
case 0:
m_edge = &m_node->AsArrOffs()->gtIndex;
m_state = 1;
break;
case 1:
m_edge = &m_node->AsArrOffs()->gtArrObj;
m_advance = &GenTreeUseEdgeIterator::Terminate;
break;
default:
unreached();
}
assert(*m_edge != nullptr);
}
//------------------------------------------------------------------------
// GenTreeUseEdgeIterator::AdvanceStoreDynBlk: produces the next operand of a StoreDynBlk node and advances the state.
//
void GenTreeUseEdgeIterator::AdvanceStoreDynBlk()
{
GenTreeStoreDynBlk* const dynBlock = m_node->AsStoreDynBlk();
switch (m_state)
{
case 0:
m_edge = &dynBlock->Data();
m_state = 1;
break;
case 1:
m_edge = &dynBlock->gtDynamicSize;
m_advance = &GenTreeUseEdgeIterator::Terminate;
break;
default:
unreached();
}
assert(*m_edge != nullptr);
}
//------------------------------------------------------------------------
// GenTreeUseEdgeIterator::AdvanceFieldList: produces the next operand of a FieldList node and advances the state.
//
void GenTreeUseEdgeIterator::AdvanceFieldList()
{
assert(m_state == 0);
if (m_statePtr == nullptr)
{
m_state = -1;
}
else
{
GenTreeFieldList::Use* currentUse = static_cast<GenTreeFieldList::Use*>(m_statePtr);
m_edge = ¤tUse->NodeRef();
m_statePtr = currentUse->GetNext();
}
}
//------------------------------------------------------------------------
// GenTreeUseEdgeIterator::AdvancePhi: produces the next operand of a Phi node and advances the state.
//
void GenTreeUseEdgeIterator::AdvancePhi()
{
assert(m_state == 0);
if (m_statePtr == nullptr)
{
m_state = -1;
}
else
{
GenTreePhi::Use* currentUse = static_cast<GenTreePhi::Use*>(m_statePtr);
m_edge = ¤tUse->NodeRef();
m_statePtr = currentUse->GetNext();
}
}
//------------------------------------------------------------------------
// GenTreeUseEdgeIterator::AdvanceBinOp: produces the next operand of a binary node and advances the state.
//
// This function must be instantiated s.t. `ReverseOperands` is `true` iff the node is marked with the
// `GTF_REVERSE_OPS` flag.
//
template <bool ReverseOperands>
void GenTreeUseEdgeIterator::AdvanceBinOp()
{
assert(ReverseOperands == ((m_node->gtFlags & GTF_REVERSE_OPS) != 0));
m_edge = !ReverseOperands ? &m_node->AsOp()->gtOp2 : &m_node->AsOp()->gtOp1;
assert(*m_edge != nullptr);
m_advance = &GenTreeUseEdgeIterator::Terminate;
}
//------------------------------------------------------------------------
// GenTreeUseEdgeIterator::SetEntryStateForBinOp: produces the first operand of a binary node and chooses
// the appropriate advance function.
//
void GenTreeUseEdgeIterator::SetEntryStateForBinOp()
{
assert(m_node != nullptr);
assert(m_node->OperIsBinary());
GenTreeOp* const node = m_node->AsOp();
if (node->gtOp2 == nullptr)
{
assert(node->gtOp1 != nullptr);
assert(node->NullOp2Legal());
m_edge = &node->gtOp1;
m_advance = &GenTreeUseEdgeIterator::Terminate;
}
else if ((node->gtFlags & GTF_REVERSE_OPS) != 0)
{
m_edge = &m_node->AsOp()->gtOp2;
m_advance = &GenTreeUseEdgeIterator::AdvanceBinOp<true>;
}
else
{
m_edge = &m_node->AsOp()->gtOp1;
m_advance = &GenTreeUseEdgeIterator::AdvanceBinOp<false>;
}
}
#if defined(FEATURE_SIMD) || defined(FEATURE_HW_INTRINSICS)
//------------------------------------------------------------------------
// GenTreeUseEdgeIterator::AdvanceMultiOp: produces the next operand of a multi-op node and advances the state.
//
// Takes advantage of the fact that GenTreeMultiOp stores the operands in a contigious array, simply
// incrementing the "m_edge" pointer, unless the end, stored in "m_statePtr", has been reached.
//
void GenTreeUseEdgeIterator::AdvanceMultiOp()
{
assert(m_node != nullptr);
assert(m_node->OperIs(GT_SIMD, GT_HWINTRINSIC));
m_edge++;
if (m_edge == m_statePtr)
{
Terminate();
}
}
//------------------------------------------------------------------------
// GenTreeUseEdgeIterator::AdvanceReversedMultiOp: produces the next operand of a multi-op node
// marked with GTF_REVRESE_OPS and advances the state.
//
// Takes advantage of the fact that GenTreeMultiOp stores the operands in a contigious array, simply
// decrementing the "m_edge" pointer, unless the beginning, stored in "m_statePtr", has been reached.
//
void GenTreeUseEdgeIterator::AdvanceReversedMultiOp()
{
assert(m_node != nullptr);
assert(m_node->OperIs(GT_SIMD, GT_HWINTRINSIC));
assert((m_node->AsMultiOp()->GetOperandCount() == 2) && m_node->IsReverseOp());
m_edge--;
if (m_edge == m_statePtr)
{
Terminate();
}
}
//------------------------------------------------------------------------
// GenTreeUseEdgeIterator::SetEntryStateForMultiOp: produces the first operand of a multi-op node and sets the
// required advance function.
//
void GenTreeUseEdgeIterator::SetEntryStateForMultiOp()
{
size_t operandCount = m_node->AsMultiOp()->GetOperandCount();
if (operandCount == 0)
{
Terminate();
}
else
{
if (m_node->IsReverseOp())
{
assert(operandCount == 2);
m_edge = m_node->AsMultiOp()->GetOperandArray() + 1;
m_statePtr = m_node->AsMultiOp()->GetOperandArray() - 1;
m_advance = &GenTreeUseEdgeIterator::AdvanceReversedMultiOp;
}
else
{
m_edge = m_node->AsMultiOp()->GetOperandArray();
m_statePtr = m_node->AsMultiOp()->GetOperandArray(operandCount);
m_advance = &GenTreeUseEdgeIterator::AdvanceMultiOp;
}
}
}
#endif
//------------------------------------------------------------------------
// GenTreeUseEdgeIterator::AdvanceCall: produces the next operand of a call node and advances the state.
//
// This function is a bit tricky: in order to avoid doing unnecessary work, it is instantiated with the
// state number the iterator will be in when it is called. For example, `AdvanceCall<CALL_INSTANCE>`
// is the instantiation used when the iterator is at the `CALL_INSTANCE` state (i.e. the entry state).
// This sort of templating allows each state to avoid processing earlier states without unnecessary
// duplication of code.
//
// Note that this method expands the argument lists (`gtCallArgs` and `gtCallLateArgs`) into their
// component operands.
//
template <int state>
void GenTreeUseEdgeIterator::AdvanceCall()
{
GenTreeCall* const call = m_node->AsCall();
switch (state)
{
case CALL_INSTANCE:
m_statePtr = call->gtCallArgs;
m_advance = &GenTreeUseEdgeIterator::AdvanceCall<CALL_ARGS>;
if (call->gtCallThisArg != nullptr)
{
m_edge = &call->gtCallThisArg->NodeRef();
return;
}
FALLTHROUGH;
case CALL_ARGS:
if (m_statePtr != nullptr)
{
GenTreeCall::Use* use = static_cast<GenTreeCall::Use*>(m_statePtr);
m_edge = &use->NodeRef();
m_statePtr = use->GetNext();
return;
}
m_statePtr = call->gtCallLateArgs;
m_advance = &GenTreeUseEdgeIterator::AdvanceCall<CALL_LATE_ARGS>;
FALLTHROUGH;
case CALL_LATE_ARGS:
if (m_statePtr != nullptr)
{
GenTreeCall::Use* use = static_cast<GenTreeCall::Use*>(m_statePtr);
m_edge = &use->NodeRef();
m_statePtr = use->GetNext();
return;
}
m_advance = &GenTreeUseEdgeIterator::AdvanceCall<CALL_CONTROL_EXPR>;
FALLTHROUGH;
case CALL_CONTROL_EXPR:
if (call->gtControlExpr != nullptr)
{
if (call->gtCallType == CT_INDIRECT)
{
m_advance = &GenTreeUseEdgeIterator::AdvanceCall<CALL_COOKIE>;
}
else
{
m_advance = &GenTreeUseEdgeIterator::Terminate;
}
m_edge = &call->gtControlExpr;
return;
}
else if (call->gtCallType != CT_INDIRECT)
{
m_state = -1;
return;
}
FALLTHROUGH;
case CALL_COOKIE:
assert(call->gtCallType == CT_INDIRECT);
m_advance = &GenTreeUseEdgeIterator::AdvanceCall<CALL_ADDRESS>;
if (call->gtCallCookie != nullptr)
{
m_edge = &call->gtCallCookie;
return;
}
FALLTHROUGH;
case CALL_ADDRESS:
assert(call->gtCallType == CT_INDIRECT);
m_advance = &GenTreeUseEdgeIterator::Terminate;
if (call->gtCallAddr != nullptr)
{
m_edge = &call->gtCallAddr;
}
return;
default:
unreached();
}
}
//------------------------------------------------------------------------
// GenTreeUseEdgeIterator::Terminate: advances the iterator to the terminal state.
//
void GenTreeUseEdgeIterator::Terminate()
{
m_state = -1;
}
//------------------------------------------------------------------------
// GenTreeUseEdgeIterator::operator++: advances the iterator to the next operand.
//
GenTreeUseEdgeIterator& GenTreeUseEdgeIterator::operator++()
{
// If we've reached the terminal state, do nothing.
if (m_state != -1)
{
(this->*m_advance)();
}
return *this;
}
GenTreeUseEdgeIterator GenTree::UseEdgesBegin()
{
return GenTreeUseEdgeIterator(this);
}
GenTreeUseEdgeIterator GenTree::UseEdgesEnd()
{
return GenTreeUseEdgeIterator();
}
IteratorPair<GenTreeUseEdgeIterator> GenTree::UseEdges()
{
return MakeIteratorPair(UseEdgesBegin(), UseEdgesEnd());
}
GenTreeOperandIterator GenTree::OperandsBegin()
{
return GenTreeOperandIterator(this);
}
GenTreeOperandIterator GenTree::OperandsEnd()
{
return GenTreeOperandIterator();
}
IteratorPair<GenTreeOperandIterator> GenTree::Operands()
{
return MakeIteratorPair(OperandsBegin(), OperandsEnd());
}
bool GenTree::Precedes(GenTree* other)
{
assert(other != nullptr);
for (GenTree* node = gtNext; node != nullptr; node = node->gtNext)
{
if (node == other)
{
return true;
}
}
return false;
}
//------------------------------------------------------------------------------
// SetIndirExceptionFlags : Set GTF_EXCEPT and GTF_IND_NONFAULTING flags as appropriate
// on an indirection or an array length node.
//
// Arguments:
// comp - compiler instance
//
void GenTree::SetIndirExceptionFlags(Compiler* comp)
{
assert(OperIsIndirOrArrLength());
if (OperMayThrow(comp))
{
gtFlags |= GTF_EXCEPT;
return;
}
GenTree* addr = nullptr;
if (OperIsIndir())
{
addr = AsIndir()->Addr();
}
else
{
assert(gtOper == GT_ARR_LENGTH);
addr = AsArrLen()->ArrRef();
}
if ((addr->gtFlags & GTF_EXCEPT) != 0)
{
gtFlags |= GTF_EXCEPT;
}
else
{
gtFlags &= ~GTF_EXCEPT;
gtFlags |= GTF_IND_NONFAULTING;
}
}
#ifdef DEBUG
/* static */ int GenTree::gtDispFlags(GenTreeFlags flags, GenTreeDebugFlags debugFlags)
{
int charsDisplayed = 11; // 11 is the "baseline" number of flag characters displayed
printf("%c", (flags & GTF_ASG) ? 'A' : (IsContained(flags) ? 'c' : '-'));
printf("%c", (flags & GTF_CALL) ? 'C' : '-');
printf("%c", (flags & GTF_EXCEPT) ? 'X' : '-');
printf("%c", (flags & GTF_GLOB_REF) ? 'G' : '-');
printf("%c", (debugFlags & GTF_DEBUG_NODE_MORPHED) ? '+' : // First print '+' if GTF_DEBUG_NODE_MORPHED is set
(flags & GTF_ORDER_SIDEEFF) ? 'O' : '-'); // otherwise print 'O' or '-'
printf("%c", (flags & GTF_COLON_COND) ? '?' : '-');
printf("%c", (flags & GTF_DONT_CSE) ? 'N' : // N is for No cse
(flags & GTF_MAKE_CSE) ? 'H' : '-'); // H is for Hoist this expr
printf("%c", (flags & GTF_REVERSE_OPS) ? 'R' : '-');
printf("%c", (flags & GTF_UNSIGNED) ? 'U' : (flags & GTF_BOOLEAN) ? 'B' : '-');
#if FEATURE_SET_FLAGS
printf("%c", (flags & GTF_SET_FLAGS) ? 'S' : '-');
++charsDisplayed;
#endif
printf("%c", (flags & GTF_LATE_ARG) ? 'L' : '-');
printf("%c", (flags & GTF_SPILLED) ? 'z' : (flags & GTF_SPILL) ? 'Z' : '-');
return charsDisplayed;
}
#ifdef TARGET_X86
inline const char* GetCallConvName(CorInfoCallConvExtension callConv)
{
switch (callConv)
{
case CorInfoCallConvExtension::Managed:
return "Managed";
case CorInfoCallConvExtension::C:
return "C";
case CorInfoCallConvExtension::Stdcall:
return "Stdcall";
case CorInfoCallConvExtension::Thiscall:
return "Thiscall";
case CorInfoCallConvExtension::Fastcall:
return "Fastcall";
case CorInfoCallConvExtension::CMemberFunction:
return "CMemberFunction";
case CorInfoCallConvExtension::StdcallMemberFunction:
return "StdcallMemberFunction";
case CorInfoCallConvExtension::FastcallMemberFunction:
return "FastcallMemberFunction";
default:
return "UnknownCallConv";
}
}
#endif // TARGET_X86
/*****************************************************************************/
void Compiler::gtDispNodeName(GenTree* tree)
{
/* print the node name */
const char* name;
assert(tree);
if (tree->gtOper < GT_COUNT)
{
name = GenTree::OpName(tree->OperGet());
}
else
{
name = "<ERROR>";
}
char buf[32];
char* bufp = &buf[0];
if ((tree->gtOper == GT_CNS_INT) && tree->IsIconHandle())
{
sprintf_s(bufp, sizeof(buf), " %s(h)%c", name, 0);
}
else if (tree->gtOper == GT_PUTARG_STK)
{
sprintf_s(bufp, sizeof(buf), " %s [+0x%02x]%c", name, tree->AsPutArgStk()->getArgOffset(), 0);
}
else if (tree->gtOper == GT_CALL)
{
const char* callType = "CALL";
const char* gtfType = "";
const char* ctType = "";
char gtfTypeBuf[100];
if (tree->AsCall()->gtCallType == CT_USER_FUNC)
{
if (tree->AsCall()->IsVirtual())
{
callType = "CALLV";
}
}
else if (tree->AsCall()->gtCallType == CT_HELPER)
{
ctType = " help";
}
else if (tree->AsCall()->gtCallType == CT_INDIRECT)
{
ctType = " ind";
}
else
{
assert(!"Unknown gtCallType");
}
if (tree->gtFlags & GTF_CALL_NULLCHECK)
{
gtfType = " nullcheck";
}
if (tree->AsCall()->IsVirtualVtable())
{
gtfType = " vt-ind";
}
else if (tree->AsCall()->IsVirtualStub())
{
gtfType = " stub";
}
#ifdef FEATURE_READYTORUN
else if (tree->AsCall()->IsR2RRelativeIndir())
{
gtfType = " r2r_ind";
}
#endif // FEATURE_READYTORUN
else if (tree->gtFlags & GTF_CALL_UNMANAGED)
{
char* gtfTypeBufWalk = gtfTypeBuf;
gtfTypeBufWalk += SimpleSprintf_s(gtfTypeBufWalk, gtfTypeBuf, sizeof(gtfTypeBuf), " unman");
if (tree->gtFlags & GTF_CALL_POP_ARGS)
{
gtfTypeBufWalk += SimpleSprintf_s(gtfTypeBufWalk, gtfTypeBuf, sizeof(gtfTypeBuf), " popargs");
}
if (tree->AsCall()->gtCallMoreFlags & GTF_CALL_M_UNMGD_THISCALL)
{
gtfTypeBufWalk += SimpleSprintf_s(gtfTypeBufWalk, gtfTypeBuf, sizeof(gtfTypeBuf), " thiscall");
}
#ifdef TARGET_X86
gtfTypeBufWalk += SimpleSprintf_s(gtfTypeBufWalk, gtfTypeBuf, sizeof(gtfTypeBuf), " %s",
GetCallConvName(tree->AsCall()->GetUnmanagedCallConv()));
#endif // TARGET_X86
gtfType = gtfTypeBuf;
}
sprintf_s(bufp, sizeof(buf), " %s%s%s%c", callType, ctType, gtfType, 0);
}
else if (tree->gtOper == GT_ARR_ELEM)
{
bufp += SimpleSprintf_s(bufp, buf, sizeof(buf), " %s[", name);
for (unsigned rank = tree->AsArrElem()->gtArrRank - 1; rank; rank--)
{
bufp += SimpleSprintf_s(bufp, buf, sizeof(buf), ",");
}
SimpleSprintf_s(bufp, buf, sizeof(buf), "]");
}
else if (tree->gtOper == GT_ARR_OFFSET || tree->gtOper == GT_ARR_INDEX)
{
bufp += SimpleSprintf_s(bufp, buf, sizeof(buf), " %s[", name);
unsigned char currDim;
unsigned char rank;
if (tree->gtOper == GT_ARR_OFFSET)
{
currDim = tree->AsArrOffs()->gtCurrDim;
rank = tree->AsArrOffs()->gtArrRank;
}
else
{
currDim = tree->AsArrIndex()->gtCurrDim;
rank = tree->AsArrIndex()->gtArrRank;
}
for (unsigned char dim = 0; dim < rank; dim++)
{
// Use a defacto standard i,j,k for the dimensions.
// Note that we only support up to rank 3 arrays with these nodes, so we won't run out of characters.
char dimChar = '*';
if (dim == currDim)
{
dimChar = 'i' + dim;
}
else if (dim > currDim)
{
dimChar = ' ';
}
bufp += SimpleSprintf_s(bufp, buf, sizeof(buf), "%c", dimChar);
if (dim != rank - 1)
{
bufp += SimpleSprintf_s(bufp, buf, sizeof(buf), ",");
}
}
SimpleSprintf_s(bufp, buf, sizeof(buf), "]");
}
else if (tree->gtOper == GT_LEA)
{
GenTreeAddrMode* lea = tree->AsAddrMode();
bufp += SimpleSprintf_s(bufp, buf, sizeof(buf), " %s(", name);
if (lea->Base() != nullptr)
{
bufp += SimpleSprintf_s(bufp, buf, sizeof(buf), "b+");
}
if (lea->Index() != nullptr)
{
bufp += SimpleSprintf_s(bufp, buf, sizeof(buf), "(i*%d)+", lea->gtScale);
}
bufp += SimpleSprintf_s(bufp, buf, sizeof(buf), "%d)", lea->Offset());
}
else if (tree->gtOper == GT_BOUNDS_CHECK)
{
switch (tree->AsBoundsChk()->gtThrowKind)
{
case SCK_RNGCHK_FAIL:
{
bufp += SimpleSprintf_s(bufp, buf, sizeof(buf), " %s_Rng", name);
if (tree->AsBoundsChk()->gtIndRngFailBB != nullptr)
{
bufp += SimpleSprintf_s(bufp, buf, sizeof(buf), " -> " FMT_BB,
tree->AsBoundsChk()->gtIndRngFailBB->bbNum);
}
break;
}
case SCK_ARG_EXCPN:
sprintf_s(bufp, sizeof(buf), " %s_Arg", name);
break;
case SCK_ARG_RNG_EXCPN:
sprintf_s(bufp, sizeof(buf), " %s_ArgRng", name);
break;
default:
unreached();
}
}
else if (tree->gtOverflowEx())
{
sprintf_s(bufp, sizeof(buf), " %s_ovfl%c", name, 0);
}
else
{
sprintf_s(bufp, sizeof(buf), " %s%c", name, 0);
}
if (strlen(buf) < 10)
{
printf(" %-10s", buf);
}
else
{
printf(" %s", buf);
}
}
//------------------------------------------------------------------------
// gtDispZeroFieldSeq: If this node has a zero fieldSeq annotation
// then print this Field Sequence
//
void Compiler::gtDispZeroFieldSeq(GenTree* tree)
{
NodeToFieldSeqMap* map = GetZeroOffsetFieldMap();
// THe most common case is having no entries in this map
if (map->GetCount() > 0)
{
FieldSeqNode* fldSeq = nullptr;
if (map->Lookup(tree, &fldSeq))
{
printf(" Zero");
gtDispAnyFieldSeq(fldSeq);
}
}
}
//------------------------------------------------------------------------
// gtDispVN: Utility function that prints a tree's ValueNumber: gtVNPair
//
void Compiler::gtDispVN(GenTree* tree)
{
if (tree->gtVNPair.GetLiberal() != ValueNumStore::NoVN)
{
assert(tree->gtVNPair.GetConservative() != ValueNumStore::NoVN);
printf(" ");
vnpPrint(tree->gtVNPair, 0);
}
}
//------------------------------------------------------------------------
// gtDispCommonEndLine
// Utility function that prints the following node information
// 1: The associated zero field sequence (if any)
// 2. The register assigned to this node (if any)
// 2. The value number assigned (if any)
// 3. A newline character
//
void Compiler::gtDispCommonEndLine(GenTree* tree)
{
gtDispZeroFieldSeq(tree);
gtDispRegVal(tree);
gtDispVN(tree);
printf("\n");
}
//------------------------------------------------------------------------
// gtDispNode: Print a tree to jitstdout.
//
// Arguments:
// tree - the tree to be printed
// indentStack - the specification for the current level of indentation & arcs
// msg - a contextual method (i.e. from the parent) to print
//
// Return Value:
// None.
//
// Notes:
// 'indentStack' may be null, in which case no indentation or arcs are printed
// 'msg' may be null
void Compiler::gtDispNode(GenTree* tree, IndentStack* indentStack, _In_ _In_opt_z_ const char* msg, bool isLIR)
{
bool printFlags = true; // always true..
int msgLength = 25;
GenTree* prev;
if (tree->gtSeqNum)
{
printf("N%03u ", tree->gtSeqNum);
if (tree->gtCostsInitialized)
{
printf("(%3u,%3u) ", tree->GetCostEx(), tree->GetCostSz());
}
else
{
printf("(???"
",???"
") "); // This probably indicates a bug: the node has a sequence number, but not costs.
}
}
else
{
prev = tree;
bool hasSeqNum = true;
unsigned dotNum = 0;
do
{
dotNum++;
prev = prev->gtPrev;
if ((prev == nullptr) || (prev == tree))
{
hasSeqNum = false;
break;
}
assert(prev);
} while (prev->gtSeqNum == 0);
// If we have an indent stack, don't add additional characters,
// as it will mess up the alignment.
bool displayDotNum = hasSeqNum && (indentStack == nullptr);
if (displayDotNum)
{
printf("N%03u.%02u ", prev->gtSeqNum, dotNum);
}
else
{
printf(" ");
}
if (tree->gtCostsInitialized)
{
printf("(%3u,%3u) ", tree->GetCostEx(), tree->GetCostSz());
}
else
{
if (displayDotNum)
{
// Do better alignment in this case
printf(" ");
}
else
{
printf(" ");
}
}
}
if (optValnumCSE_phase)
{
if (IS_CSE_INDEX(tree->gtCSEnum))
{
printf(FMT_CSE " (%s)", GET_CSE_INDEX(tree->gtCSEnum), (IS_CSE_USE(tree->gtCSEnum) ? "use" : "def"));
}
else
{
printf(" ");
}
}
/* Print the node ID */
printTreeID(tree);
printf(" ");
if (tree->gtOper >= GT_COUNT)
{
printf(" **** ILLEGAL NODE ****");
return;
}
if (printFlags)
{
/* First print the flags associated with the node */
switch (tree->gtOper)
{
case GT_LEA:
case GT_BLK:
case GT_OBJ:
case GT_STORE_BLK:
case GT_STORE_OBJ:
case GT_STORE_DYN_BLK:
case GT_IND:
// We prefer printing V or U
if ((tree->gtFlags & (GTF_IND_VOLATILE | GTF_IND_UNALIGNED)) == 0)
{
if (tree->gtFlags & GTF_IND_TGTANYWHERE)
{
printf("*");
--msgLength;
break;
}
if (tree->gtFlags & GTF_IND_TGT_NOT_HEAP)
{
printf("s");
--msgLength;
break;
}
if (tree->gtFlags & GTF_IND_INVARIANT)
{
printf("#");
--msgLength;
break;
}
if (tree->gtFlags & GTF_IND_ARR_INDEX)
{
printf("a");
--msgLength;
break;
}
if (tree->gtFlags & GTF_IND_NONFAULTING)
{
printf("n"); // print a n for non-faulting
--msgLength;
break;
}
if (tree->gtFlags & GTF_IND_ASG_LHS)
{
printf("D"); // print a D for definition
--msgLength;
break;
}
if (tree->gtFlags & GTF_IND_NONNULL)
{
printf("@");
--msgLength;
break;
}
}
FALLTHROUGH;
case GT_INDEX:
case GT_INDEX_ADDR:
case GT_FIELD:
case GT_CLS_VAR:
if (tree->gtFlags & GTF_IND_VOLATILE)
{
printf("V");
--msgLength;
break;
}
if (tree->gtFlags & GTF_IND_UNALIGNED)
{
printf("U");
--msgLength;
break;
}
goto DASH;
case GT_ASG:
if (tree->OperIsInitBlkOp())
{
printf("I");
--msgLength;
break;
}
goto DASH;
case GT_CALL:
if (tree->AsCall()->IsInlineCandidate())
{
if (tree->AsCall()->IsGuardedDevirtualizationCandidate())
{
printf("&");
}
else
{
printf("I");
}
--msgLength;
break;
}
else if (tree->AsCall()->IsGuardedDevirtualizationCandidate())
{
printf("G");
--msgLength;
break;
}
if (tree->AsCall()->gtCallMoreFlags & GTF_CALL_M_RETBUFFARG)
{
printf("S");
--msgLength;
break;
}
if (tree->gtFlags & GTF_CALL_HOISTABLE)
{
printf("H");
--msgLength;
break;
}
goto DASH;
case GT_MUL:
#if !defined(TARGET_64BIT)
case GT_MUL_LONG:
#endif
if (tree->gtFlags & GTF_MUL_64RSLT)
{
printf("L");
--msgLength;
break;
}
goto DASH;
case GT_DIV:
case GT_MOD:
case GT_UDIV:
case GT_UMOD:
if (tree->gtFlags & GTF_DIV_BY_CNS_OPT)
{
printf("M"); // We will use a Multiply by reciprical
--msgLength;
break;
}
goto DASH;
case GT_LCL_FLD:
case GT_LCL_VAR:
case GT_LCL_VAR_ADDR:
case GT_LCL_FLD_ADDR:
case GT_STORE_LCL_FLD:
case GT_STORE_LCL_VAR:
if (tree->gtFlags & GTF_VAR_USEASG)
{
printf("U");
--msgLength;
break;
}
if (tree->gtFlags & GTF_VAR_MULTIREG)
{
printf((tree->gtFlags & GTF_VAR_DEF) ? "M" : "m");
--msgLength;
break;
}
if (tree->gtFlags & GTF_VAR_DEF)
{
printf("D");
--msgLength;
break;
}
if (tree->gtFlags & GTF_VAR_CAST)
{
printf("C");
--msgLength;
break;
}
if (tree->gtFlags & GTF_VAR_ARR_INDEX)
{
printf("i");
--msgLength;
break;
}
if (tree->gtFlags & GTF_VAR_CONTEXT)
{
printf("!");
--msgLength;
break;
}
goto DASH;
case GT_EQ:
case GT_NE:
case GT_LT:
case GT_LE:
case GT_GE:
case GT_GT:
case GT_TEST_EQ:
case GT_TEST_NE:
if (tree->gtFlags & GTF_RELOP_NAN_UN)
{
printf("N");
--msgLength;
break;
}
if (tree->gtFlags & GTF_RELOP_JMP_USED)
{
printf("J");
--msgLength;
break;
}
goto DASH;
case GT_JCMP:
printf((tree->gtFlags & GTF_JCMP_TST) ? "T" : "C");
printf((tree->gtFlags & GTF_JCMP_EQ) ? "EQ" : "NE");
goto DASH;
case GT_CNS_INT:
if (tree->IsIconHandle())
{
if ((tree->gtFlags & GTF_ICON_INITCLASS) != 0)
{
printf("I"); // Static Field handle with INITCLASS requirement
--msgLength;
break;
}
else if ((tree->gtFlags & GTF_ICON_FIELD_OFF) != 0)
{
printf("O");
--msgLength;
break;
}
else
{
// Some other handle
printf("H");
--msgLength;
break;
}
}
goto DASH;
default:
DASH:
printf("-");
--msgLength;
break;
}
/* Then print the general purpose flags */
GenTreeFlags flags = tree->gtFlags;
if (tree->OperIsBinary() || tree->OperIsMultiOp())
{
genTreeOps oper = tree->OperGet();
// Check for GTF_ADDRMODE_NO_CSE flag on add/mul/shl Binary Operators
if ((oper == GT_ADD) || (oper == GT_MUL) || (oper == GT_LSH))
{
if ((tree->gtFlags & GTF_ADDRMODE_NO_CSE) != 0)
{
flags |= GTF_DONT_CSE; // Force the GTF_ADDRMODE_NO_CSE flag to print out like GTF_DONT_CSE
}
}
}
else // !(tree->OperIsBinary() || tree->OperIsMultiOp())
{
// the GTF_REVERSE flag only applies to binary operations (which some MultiOp nodes are).
flags &= ~GTF_REVERSE_OPS; // we use this value for GTF_VAR_ARR_INDEX above
}
msgLength -= GenTree::gtDispFlags(flags, tree->gtDebugFlags);
/*
printf("%c", (flags & GTF_ASG ) ? 'A' : '-');
printf("%c", (flags & GTF_CALL ) ? 'C' : '-');
printf("%c", (flags & GTF_EXCEPT ) ? 'X' : '-');
printf("%c", (flags & GTF_GLOB_REF ) ? 'G' : '-');
printf("%c", (flags & GTF_ORDER_SIDEEFF ) ? 'O' : '-');
printf("%c", (flags & GTF_COLON_COND ) ? '?' : '-');
printf("%c", (flags & GTF_DONT_CSE ) ? 'N' : // N is for No cse
(flags & GTF_MAKE_CSE ) ? 'H' : '-'); // H is for Hoist this expr
printf("%c", (flags & GTF_REVERSE_OPS ) ? 'R' : '-');
printf("%c", (flags & GTF_UNSIGNED ) ? 'U' :
(flags & GTF_BOOLEAN ) ? 'B' : '-');
printf("%c", (flags & GTF_SET_FLAGS ) ? 'S' : '-');
printf("%c", (flags & GTF_SPILLED ) ? 'z' : '-');
printf("%c", (flags & GTF_SPILL ) ? 'Z' : '-');
*/
}
// If we're printing a node for LIR, we use the space normally associated with the message
// to display the node's temp name (if any)
const bool hasOperands = tree->OperandsBegin() != tree->OperandsEnd();
if (isLIR)
{
assert(msg == nullptr);
// If the tree does not have any operands, we do not display the indent stack. This gives us
// two additional characters for alignment.
if (!hasOperands)
{
msgLength += 1;
}
if (tree->IsValue())
{
const size_t bufLength = msgLength - 1;
msg = reinterpret_cast<char*>(_alloca(bufLength * sizeof(char)));
sprintf_s(const_cast<char*>(msg), bufLength, "t%d = %s", tree->gtTreeID, hasOperands ? "" : " ");
}
}
/* print the msg associated with the node */
if (msg == nullptr)
{
msg = "";
}
if (msgLength < 0)
{
msgLength = 0;
}
printf(isLIR ? " %+*s" : " %-*s", msgLength, msg);
/* Indent the node accordingly */
if (!isLIR || hasOperands)
{
printIndent(indentStack);
}
gtDispNodeName(tree);
assert(tree == nullptr || tree->gtOper < GT_COUNT);
if (tree)
{
/* print the type of the node */
if (tree->gtOper != GT_CAST)
{
printf(" %-6s", varTypeName(tree->TypeGet()));
if (varTypeIsStruct(tree->TypeGet()))
{
ClassLayout* layout = nullptr;
if (tree->OperIs(GT_BLK, GT_OBJ, GT_STORE_BLK, GT_STORE_OBJ))
{
layout = tree->AsBlk()->GetLayout();
}
else if (tree->OperIs(GT_LCL_VAR, GT_STORE_LCL_VAR))
{
LclVarDsc* varDsc = lvaGetDesc(tree->AsLclVar());
if (varTypeIsStruct(varDsc->TypeGet()))
{
layout = varDsc->GetLayout();
}
}
else if (tree->OperIs(GT_INDEX))
{
GenTreeIndex* asInd = tree->AsIndex();
CORINFO_CLASS_HANDLE clsHnd = asInd->gtStructElemClass;
if (clsHnd != nullptr)
{
// We could create a layout with `typGetObjLayout(asInd->gtStructElemClass)` but we
// don't want to affect the layout table.
const unsigned classSize = info.compCompHnd->getClassSize(clsHnd);
const char16_t* shortClassName = eeGetShortClassName(clsHnd);
printf("<%S, %u>", shortClassName, classSize);
}
}
else if (tree->OperIsIndir())
{
ArrayInfo arrInfo;
if (TryGetArrayInfo(tree->AsIndir(), &arrInfo))
{
if (varTypeIsStruct(arrInfo.m_elemType))
{
CORINFO_CLASS_HANDLE clsHnd = arrInfo.m_elemStructType;
// We could create a layout with `typGetObjLayout(asInd->gtStructElemClass)` but we
// don't want to affect the layout table.
const unsigned classSize = info.compCompHnd->getClassSize(clsHnd);
const char16_t* shortClassName = eeGetShortClassName(clsHnd);
printf("<%S, %u>", shortClassName, classSize);
}
}
}
if (layout != nullptr)
{
gtDispClassLayout(layout, tree->TypeGet());
}
}
if (tree->gtOper == GT_LCL_VAR || tree->gtOper == GT_STORE_LCL_VAR)
{
LclVarDsc* varDsc = lvaGetDesc(tree->AsLclVarCommon());
if (varDsc->IsAddressExposed())
{
printf("(AX)"); // Variable has address exposed.
}
if (varDsc->IsHiddenBufferStructArg())
{
printf("(RB)"); // Variable is hidden return buffer
}
if (varDsc->lvUnusedStruct)
{
assert(varDsc->lvPromoted);
printf("(U)"); // Unused struct
}
else if (varDsc->lvPromoted)
{
if (varTypeIsPromotable(varDsc))
{
printf("(P)"); // Promoted struct
}
else
{
// Promoted implicit by-refs can have this state during
// global morph while they are being rewritten
printf("(P?!)"); // Promoted struct
}
}
}
if (tree->IsArgPlaceHolderNode() && (tree->AsArgPlace()->gtArgPlaceClsHnd != nullptr))
{
printf(" => [clsHnd=%08X]", dspPtr(tree->AsArgPlace()->gtArgPlaceClsHnd));
}
if (tree->gtOper == GT_RUNTIMELOOKUP)
{
#ifdef TARGET_64BIT
printf(" 0x%llx", dspPtr(tree->AsRuntimeLookup()->gtHnd));
#else
printf(" 0x%x", dspPtr(tree->AsRuntimeLookup()->gtHnd));
#endif
switch (tree->AsRuntimeLookup()->gtHndType)
{
case CORINFO_HANDLETYPE_CLASS:
printf(" class");
break;
case CORINFO_HANDLETYPE_METHOD:
printf(" method");
break;
case CORINFO_HANDLETYPE_FIELD:
printf(" field");
break;
default:
printf(" unknown");
break;
}
}
}
// for tracking down problems in reguse prediction or liveness tracking
if (verbose && 0)
{
printf(" RR=");
dspRegMask(tree->gtRsvdRegs);
printf("\n");
}
}
}
#if FEATURE_MULTIREG_RET
//----------------------------------------------------------------------------------
// gtDispMultiRegCount: determine how many registers to print for a multi-reg node
//
// Arguments:
// tree - GenTree node whose registers we want to print
//
// Return Value:
// The number of registers to print
//
// Notes:
// This is not the same in all cases as GenTree::GetMultiRegCount().
// In particular, for COPY or RELOAD it only returns the number of *valid* registers,
// and for CALL, it will return 0 if the ReturnTypeDesc hasn't yet been initialized.
// But we want to print all register positions.
//
unsigned Compiler::gtDispMultiRegCount(GenTree* tree)
{
if (tree->IsCopyOrReload())
{
// GetRegCount() will return only the number of valid regs for COPY or RELOAD,
// but we want to print all positions, so we get the reg count for op1.
return gtDispMultiRegCount(tree->gtGetOp1());
}
else if (!tree->IsMultiRegNode())
{
// We can wind up here because IsMultiRegNode() always returns true for COPY or RELOAD,
// even if its op1 is not multireg.
// Note that this method won't be called for non-register-producing nodes.
return 1;
}
else if (tree->OperIs(GT_CALL))
{
unsigned regCount = tree->AsCall()->GetReturnTypeDesc()->TryGetReturnRegCount();
// If it hasn't yet been initialized, we'd still like to see the registers printed.
if (regCount == 0)
{
regCount = MAX_RET_REG_COUNT;
}
return regCount;
}
else
{
return tree->GetMultiRegCount(this);
}
}
#endif // FEATURE_MULTIREG_RET
//----------------------------------------------------------------------------------
// gtDispRegVal: Print the register(s) defined by the given node
//
// Arguments:
// tree - Gentree node whose registers we want to print
//
void Compiler::gtDispRegVal(GenTree* tree)
{
switch (tree->GetRegTag())
{
// Don't display anything for the GT_REGTAG_NONE case;
// the absence of printed register values will imply this state.
case GenTree::GT_REGTAG_REG:
printf(" REG %s", compRegVarName(tree->GetRegNum()));
break;
default:
return;
}
#if FEATURE_MULTIREG_RET
if (tree->IsMultiRegNode())
{
// 0th reg is GetRegNum(), which is already printed above.
// Print the remaining regs of a multi-reg node.
unsigned regCount = gtDispMultiRegCount(tree);
// For some nodes, e.g. COPY, RELOAD or CALL, we may not have valid regs for all positions.
for (unsigned i = 1; i < regCount; ++i)
{
regNumber reg = tree->GetRegByIndex(i);
printf(",%s", genIsValidReg(reg) ? compRegVarName(reg) : "NA");
}
}
#endif
}
// We usually/commonly don't expect to print anything longer than this string,
#define LONGEST_COMMON_LCL_VAR_DISPLAY "V99 PInvokeFrame"
#define LONGEST_COMMON_LCL_VAR_DISPLAY_LENGTH (sizeof(LONGEST_COMMON_LCL_VAR_DISPLAY))
#define BUF_SIZE (LONGEST_COMMON_LCL_VAR_DISPLAY_LENGTH * 2)
void Compiler::gtGetLclVarNameInfo(unsigned lclNum, const char** ilKindOut, const char** ilNameOut, unsigned* ilNumOut)
{
const char* ilKind = nullptr;
const char* ilName = nullptr;
unsigned ilNum = compMap2ILvarNum(lclNum);
if (ilNum == (unsigned)ICorDebugInfo::RETBUF_ILNUM)
{
ilName = "RetBuf";
}
else if (ilNum == (unsigned)ICorDebugInfo::VARARGS_HND_ILNUM)
{
ilName = "VarArgHandle";
}
else if (ilNum == (unsigned)ICorDebugInfo::TYPECTXT_ILNUM)
{
ilName = "TypeCtx";
}
else if (ilNum == (unsigned)ICorDebugInfo::UNKNOWN_ILNUM)
{
if (lclNumIsTrueCSE(lclNum))
{
ilKind = "cse";
ilNum = lclNum - optCSEstart;
}
else if (lclNum >= optCSEstart)
{
// Currently any new LclVar's introduced after the CSE phase
// are believed to be created by the "rationalizer" that is what is meant by the "rat" prefix.
ilKind = "rat";
ilNum = lclNum - (optCSEstart + optCSEcount);
}
else
{
if (lclNum == info.compLvFrameListRoot)
{
ilName = "FramesRoot";
}
else if (lclNum == lvaInlinedPInvokeFrameVar)
{
ilName = "PInvokeFrame";
}
else if (lclNum == lvaGSSecurityCookie)
{
ilName = "GsCookie";
}
else if (lclNum == lvaRetAddrVar)
{
ilName = "ReturnAddress";
}
#if FEATURE_FIXED_OUT_ARGS
else if (lclNum == lvaPInvokeFrameRegSaveVar)
{
ilName = "PInvokeFrameRegSave";
}
else if (lclNum == lvaOutgoingArgSpaceVar)
{
ilName = "OutArgs";
}
#endif // FEATURE_FIXED_OUT_ARGS
#if !defined(FEATURE_EH_FUNCLETS)
else if (lclNum == lvaShadowSPslotsVar)
{
ilName = "EHSlots";
}
#endif // !FEATURE_EH_FUNCLETS
#ifdef JIT32_GCENCODER
else if (lclNum == lvaLocAllocSPvar)
{
ilName = "LocAllocSP";
}
#endif // JIT32_GCENCODER
#if defined(FEATURE_EH_FUNCLETS)
else if (lclNum == lvaPSPSym)
{
ilName = "PSPSym";
}
#endif // FEATURE_EH_FUNCLETS
else
{
ilKind = "tmp";
if (compIsForInlining())
{
ilNum = lclNum - impInlineInfo->InlinerCompiler->info.compLocalsCount;
}
else
{
ilNum = lclNum - info.compLocalsCount;
}
}
}
}
else if (lclNum < (compIsForInlining() ? impInlineInfo->InlinerCompiler->info.compArgsCount : info.compArgsCount))
{
if (ilNum == 0 && !info.compIsStatic)
{
ilName = "this";
}
else
{
ilKind = "arg";
}
}
else
{
if (!lvaTable[lclNum].lvIsStructField)
{
ilKind = "loc";
}
if (compIsForInlining())
{
ilNum -= impInlineInfo->InlinerCompiler->info.compILargsCount;
}
else
{
ilNum -= info.compILargsCount;
}
}
*ilKindOut = ilKind;
*ilNameOut = ilName;
*ilNumOut = ilNum;
}
/*****************************************************************************/
int Compiler::gtGetLclVarName(unsigned lclNum, char* buf, unsigned buf_remaining)
{
char* bufp_next = buf;
unsigned charsPrinted = 0;
int sprintf_result;
sprintf_result = sprintf_s(bufp_next, buf_remaining, "V%02u", lclNum);
if (sprintf_result < 0)
{
return sprintf_result;
}
charsPrinted += sprintf_result;
bufp_next += sprintf_result;
buf_remaining -= sprintf_result;
const char* ilKind = nullptr;
const char* ilName = nullptr;
unsigned ilNum = 0;
gtGetLclVarNameInfo(lclNum, &ilKind, &ilName, &ilNum);
if (ilName != nullptr)
{
sprintf_result = sprintf_s(bufp_next, buf_remaining, " %s", ilName);
if (sprintf_result < 0)
{
return sprintf_result;
}
charsPrinted += sprintf_result;
bufp_next += sprintf_result;
buf_remaining -= sprintf_result;
}
else if (ilKind != nullptr)
{
sprintf_result = sprintf_s(bufp_next, buf_remaining, " %s%d", ilKind, ilNum);
if (sprintf_result < 0)
{
return sprintf_result;
}
charsPrinted += sprintf_result;
bufp_next += sprintf_result;
buf_remaining -= sprintf_result;
}
assert(charsPrinted > 0);
assert(buf_remaining > 0);
return (int)charsPrinted;
}
/*****************************************************************************
* Get the local var name, and create a copy of the string that can be used in debug output.
*/
char* Compiler::gtGetLclVarName(unsigned lclNum)
{
char buf[BUF_SIZE];
int charsPrinted = gtGetLclVarName(lclNum, buf, ArrLen(buf));
if (charsPrinted < 0)
{
return nullptr;
}
char* retBuf = new (this, CMK_DebugOnly) char[charsPrinted + 1];
strcpy_s(retBuf, charsPrinted + 1, buf);
return retBuf;
}
/*****************************************************************************/
void Compiler::gtDispLclVar(unsigned lclNum, bool padForBiggestDisp)
{
char buf[BUF_SIZE];
int charsPrinted = gtGetLclVarName(lclNum, buf, ArrLen(buf));
if (charsPrinted < 0)
{
return;
}
printf("%s", buf);
if (padForBiggestDisp && (charsPrinted < (int)LONGEST_COMMON_LCL_VAR_DISPLAY_LENGTH))
{
printf("%*c", LONGEST_COMMON_LCL_VAR_DISPLAY_LENGTH - charsPrinted, ' ');
}
}
//------------------------------------------------------------------------
// gtDispLclVarStructType: Print size and type information about a struct or lclBlk local variable.
//
// Arguments:
// lclNum - The local var id.
//
void Compiler::gtDispLclVarStructType(unsigned lclNum)
{
LclVarDsc* varDsc = lvaGetDesc(lclNum);
var_types type = varDsc->TypeGet();
if (type == TYP_STRUCT)
{
ClassLayout* layout = varDsc->GetLayout();
assert(layout != nullptr);
gtDispClassLayout(layout, type);
}
else if (type == TYP_LCLBLK)
{
#if FEATURE_FIXED_OUT_ARGS
assert(lclNum == lvaOutgoingArgSpaceVar);
// Since lvaOutgoingArgSpaceSize is a PhasedVar we can't read it for Dumping until
// after we set it to something.
if (lvaOutgoingArgSpaceSize.HasFinalValue())
{
// A PhasedVar<T> can't be directly used as an arg to a variadic function
unsigned value = lvaOutgoingArgSpaceSize;
printf("<%u> ", value);
}
else
{
printf("<na> "); // The value hasn't yet been determined
}
#else
assert(!"Unknown size");
NO_WAY("Target doesn't support TYP_LCLBLK");
#endif // FEATURE_FIXED_OUT_ARGS
}
}
//------------------------------------------------------------------------
// gtDispClassLayout: Print size and type information about a layout.
//
// Arguments:
// layout - the layout;
// type - variable type, used to avoid printing size for SIMD nodes.
//
void Compiler::gtDispClassLayout(ClassLayout* layout, var_types type)
{
assert(layout != nullptr);
if (layout->IsBlockLayout())
{
printf("<%u>", layout->GetSize());
}
else if (varTypeIsSIMD(type))
{
printf("<%S>", layout->GetShortClassName());
}
else
{
printf("<%S, %u>", layout->GetShortClassName(), layout->GetSize());
}
}
/*****************************************************************************/
void Compiler::gtDispConst(GenTree* tree)
{
assert(tree->OperIsConst());
switch (tree->gtOper)
{
case GT_CNS_INT:
if (tree->IsIconHandle(GTF_ICON_STR_HDL))
{
const WCHAR* str = eeGetCPString(tree->AsIntCon()->gtIconVal);
// If *str points to a '\0' then don't print the string's values
if ((str != nullptr) && (*str != '\0'))
{
printf(" 0x%X \"%S\"", dspPtr(tree->AsIntCon()->gtIconVal), str);
}
else // We can't print the value of the string
{
// Note that eeGetCPString isn't currently implemented on Linux/ARM
// and instead always returns nullptr
printf(" 0x%X [ICON_STR_HDL]", dspPtr(tree->AsIntCon()->gtIconVal));
}
}
else
{
ssize_t dspIconVal =
tree->IsIconHandle() ? dspPtr(tree->AsIntCon()->gtIconVal) : tree->AsIntCon()->gtIconVal;
if (tree->TypeGet() == TYP_REF)
{
assert(tree->AsIntCon()->gtIconVal == 0);
printf(" null");
}
else if ((tree->AsIntCon()->gtIconVal > -1000) && (tree->AsIntCon()->gtIconVal < 1000))
{
printf(" %ld", dspIconVal);
}
#ifdef TARGET_64BIT
else if ((tree->AsIntCon()->gtIconVal & 0xFFFFFFFF00000000LL) != 0)
{
if (dspIconVal >= 0)
{
printf(" 0x%llx", dspIconVal);
}
else
{
printf(" -0x%llx", -dspIconVal);
}
}
#endif
else
{
if (dspIconVal >= 0)
{
printf(" 0x%X", dspIconVal);
}
else
{
printf(" -0x%X", -dspIconVal);
}
}
if (tree->IsIconHandle())
{
switch (tree->GetIconHandleFlag())
{
case GTF_ICON_SCOPE_HDL:
printf(" scope");
break;
case GTF_ICON_CLASS_HDL:
printf(" class");
break;
case GTF_ICON_METHOD_HDL:
printf(" method");
break;
case GTF_ICON_FIELD_HDL:
printf(" field");
break;
case GTF_ICON_STATIC_HDL:
printf(" static");
break;
case GTF_ICON_STR_HDL:
unreached(); // This case is handled above
break;
case GTF_ICON_CONST_PTR:
printf(" const ptr");
break;
case GTF_ICON_GLOBAL_PTR:
printf(" global ptr");
break;
case GTF_ICON_VARG_HDL:
printf(" vararg");
break;
case GTF_ICON_PINVKI_HDL:
printf(" pinvoke");
break;
case GTF_ICON_TOKEN_HDL:
printf(" token");
break;
case GTF_ICON_TLS_HDL:
printf(" tls");
break;
case GTF_ICON_FTN_ADDR:
printf(" ftn");
break;
case GTF_ICON_CIDMID_HDL:
printf(" cid/mid");
break;
case GTF_ICON_BBC_PTR:
printf(" bbc");
break;
case GTF_ICON_STATIC_BOX_PTR:
printf(" static box ptr");
break;
default:
printf(" UNKNOWN");
break;
}
}
if ((tree->gtFlags & GTF_ICON_FIELD_OFF) != 0)
{
printf(" field offset");
}
#ifdef FEATURE_SIMD
if ((tree->gtFlags & GTF_ICON_SIMD_COUNT) != 0)
{
printf(" vector element count");
}
#endif
if ((tree->IsReuseRegVal()) != 0)
{
printf(" reuse reg val");
}
}
gtDispFieldSeq(tree->AsIntCon()->gtFieldSeq);
break;
case GT_CNS_LNG:
printf(" 0x%016I64x", tree->AsLngCon()->gtLconVal);
break;
case GT_CNS_DBL:
if (*((__int64*)&tree->AsDblCon()->gtDconVal) == (__int64)I64(0x8000000000000000))
{
printf(" -0.00000");
}
else
{
printf(" %#.17g", tree->AsDblCon()->gtDconVal);
}
break;
case GT_CNS_STR:
printf("<string constant>");
break;
default:
assert(!"unexpected constant node");
}
}
//------------------------------------------------------------------------
// gtDispFieldSeq: "gtDispFieldSeq" that also prints "<NotAField>".
//
// Useful for printing zero-offset field sequences.
//
void Compiler::gtDispAnyFieldSeq(FieldSeqNode* fieldSeq)
{
if (fieldSeq == FieldSeqStore::NotAField())
{
printf(" Fseq<NotAField>");
return;
}
gtDispFieldSeq(fieldSeq);
}
//------------------------------------------------------------------------
// gtDispFieldSeq: Print out the fields in this field sequence.
//
void Compiler::gtDispFieldSeq(FieldSeqNode* pfsn)
{
if ((pfsn == nullptr) || (pfsn == FieldSeqStore::NotAField()))
{
return;
}
// Otherwise...
printf(" Fseq[");
while (pfsn != nullptr)
{
assert(pfsn != FieldSeqStore::NotAField()); // Can't exist in a field sequence list except alone
CORINFO_FIELD_HANDLE fldHnd = pfsn->GetFieldHandleValue();
// First check the "pseudo" field handles...
if (fldHnd == FieldSeqStore::FirstElemPseudoField)
{
printf("#FirstElem");
}
else if (fldHnd == FieldSeqStore::ConstantIndexPseudoField)
{
printf("#ConstantIndex");
}
else
{
printf("%s", eeGetFieldName(fldHnd));
}
pfsn = pfsn->GetNext();
if (pfsn != nullptr)
{
printf(", ");
}
}
printf("]");
}
//------------------------------------------------------------------------
// gtDispLeaf: Print a single leaf node to jitstdout.
//
// Arguments:
// tree - the tree to be printed
// indentStack - the specification for the current level of indentation & arcs
//
// Return Value:
// None.
//
// Notes:
// 'indentStack' may be null, in which case no indentation or arcs are printed
void Compiler::gtDispLeaf(GenTree* tree, IndentStack* indentStack)
{
if (tree->OperIsConst())
{
gtDispConst(tree);
return;
}
bool isLclFld = false;
switch (tree->gtOper)
{
case GT_LCL_FLD:
case GT_LCL_FLD_ADDR:
case GT_STORE_LCL_FLD:
isLclFld = true;
FALLTHROUGH;
case GT_PHI_ARG:
case GT_LCL_VAR:
case GT_LCL_VAR_ADDR:
case GT_STORE_LCL_VAR:
{
printf(" ");
const unsigned varNum = tree->AsLclVarCommon()->GetLclNum();
const LclVarDsc* varDsc = lvaGetDesc(varNum);
gtDispLclVar(varNum);
if (tree->AsLclVarCommon()->HasSsaName())
{
if (tree->gtFlags & GTF_VAR_USEASG)
{
assert(tree->gtFlags & GTF_VAR_DEF);
printf("ud:%d->%d", tree->AsLclVarCommon()->GetSsaNum(), GetSsaNumForLocalVarDef(tree));
}
else
{
printf("%s:%d", (tree->gtFlags & GTF_VAR_DEF) ? "d" : "u", tree->AsLclVarCommon()->GetSsaNum());
}
}
if (isLclFld)
{
printf("[+%u]", tree->AsLclFld()->GetLclOffs());
gtDispFieldSeq(tree->AsLclFld()->GetFieldSeq());
}
if (varDsc->lvRegister)
{
printf(" ");
varDsc->PrintVarReg();
}
else if (tree->InReg())
{
printf(" %s", compRegVarName(tree->GetRegNum()));
}
if (varDsc->lvPromoted)
{
if (!varTypeIsPromotable(varDsc) && !varDsc->lvUnusedStruct)
{
// Promoted implicit byrefs can get in this state while they are being rewritten
// in global morph.
}
else
{
for (unsigned i = varDsc->lvFieldLclStart; i < varDsc->lvFieldLclStart + varDsc->lvFieldCnt; ++i)
{
LclVarDsc* fieldVarDsc = lvaGetDesc(i);
const char* fieldName;
#if !defined(TARGET_64BIT)
if (varTypeIsLong(varDsc))
{
fieldName = (i == 0) ? "lo" : "hi";
}
else
#endif // !defined(TARGET_64BIT)
{
CORINFO_CLASS_HANDLE typeHnd = varDsc->GetStructHnd();
CORINFO_FIELD_HANDLE fldHnd =
info.compCompHnd->getFieldInClass(typeHnd, fieldVarDsc->lvFldOrdinal);
fieldName = eeGetFieldName(fldHnd);
}
printf("\n");
printf(" ");
printIndent(indentStack);
printf(" %-6s V%02u.%s (offs=0x%02x) -> ", varTypeName(fieldVarDsc->TypeGet()),
tree->AsLclVarCommon()->GetLclNum(), fieldName, fieldVarDsc->lvFldOffset);
gtDispLclVar(i);
if (fieldVarDsc->lvRegister)
{
printf(" ");
fieldVarDsc->PrintVarReg();
}
if (fieldVarDsc->lvTracked && fgLocalVarLivenessDone && tree->IsMultiRegLclVar() &&
tree->AsLclVar()->IsLastUse(i - varDsc->lvFieldLclStart))
{
printf(" (last use)");
}
}
}
}
else // a normal not-promoted lclvar
{
if (varDsc->lvTracked && fgLocalVarLivenessDone && ((tree->gtFlags & GTF_VAR_DEATH) != 0))
{
printf(" (last use)");
}
}
}
break;
case GT_JMP:
{
const char* methodName;
const char* className;
methodName = eeGetMethodName((CORINFO_METHOD_HANDLE)tree->AsVal()->gtVal1, &className);
printf(" %s.%s\n", className, methodName);
}
break;
case GT_CLS_VAR:
printf(" Hnd=%#x", dspPtr(tree->AsClsVar()->gtClsVarHnd));
gtDispFieldSeq(tree->AsClsVar()->gtFieldSeq);
break;
case GT_CLS_VAR_ADDR:
printf(" Hnd=%#x", dspPtr(tree->AsClsVar()->gtClsVarHnd));
break;
case GT_LABEL:
break;
case GT_FTN_ADDR:
{
const char* methodName;
const char* className;
methodName = eeGetMethodName((CORINFO_METHOD_HANDLE)tree->AsFptrVal()->gtFptrMethod, &className);
printf(" %s.%s\n", className, methodName);
}
break;
#if !defined(FEATURE_EH_FUNCLETS)
case GT_END_LFIN:
printf(" endNstLvl=%d", tree->AsVal()->gtVal1);
break;
#endif // !FEATURE_EH_FUNCLETS
// Vanilla leaves. No qualifying information available. So do nothing
case GT_NO_OP:
case GT_START_NONGC:
case GT_START_PREEMPTGC:
case GT_PROF_HOOK:
case GT_CATCH_ARG:
case GT_MEMORYBARRIER:
case GT_ARGPLACE:
case GT_PINVOKE_PROLOG:
case GT_JMPTABLE:
break;
case GT_RET_EXPR:
{
GenTree* const associatedTree = tree->AsRetExpr()->gtInlineCandidate;
printf("(inl return %s ", tree->IsCall() ? " from call" : "expr");
printTreeID(associatedTree);
printf(")");
}
break;
case GT_PHYSREG:
printf(" %s", getRegName(tree->AsPhysReg()->gtSrcReg));
break;
case GT_IL_OFFSET:
printf(" ");
tree->AsILOffset()->gtStmtDI.Dump(true);
break;
case GT_JCC:
case GT_SETCC:
printf(" cond=%s", tree->AsCC()->gtCondition.Name());
break;
case GT_JCMP:
printf(" cond=%s%s", (tree->gtFlags & GTF_JCMP_TST) ? "TEST_" : "",
(tree->gtFlags & GTF_JCMP_EQ) ? "EQ" : "NE");
break;
default:
assert(!"don't know how to display tree leaf node");
}
}
//------------------------------------------------------------------------
// gtDispLeaf: Print a child node to jitstdout.
//
// Arguments:
// tree - the tree to be printed
// indentStack - the specification for the current level of indentation & arcs
// arcType - the type of arc to use for this child
// msg - a contextual method (i.e. from the parent) to print
// topOnly - a boolean indicating whether to print the children, or just the top node
//
// Return Value:
// None.
//
// Notes:
// 'indentStack' may be null, in which case no indentation or arcs are printed
// 'msg' has a default value of null
// 'topOnly' is an optional argument that defaults to false
void Compiler::gtDispChild(GenTree* child,
IndentStack* indentStack,
IndentInfo arcType,
_In_opt_ const char* msg, /* = nullptr */
bool topOnly) /* = false */
{
indentStack->Push(arcType);
gtDispTree(child, indentStack, msg, topOnly);
indentStack->Pop();
}
#ifdef FEATURE_SIMD
// Intrinsic Id to name map
extern const char* const simdIntrinsicNames[] = {
#define SIMD_INTRINSIC(mname, inst, id, name, r, ac, arg1, arg2, arg3, t1, t2, t3, t4, t5, t6, t7, t8, t9, t10) name,
#include "simdintrinsiclist.h"
};
#endif // FEATURE_SIMD
/*****************************************************************************/
void Compiler::gtDispTree(GenTree* tree,
IndentStack* indentStack, /* = nullptr */
_In_ _In_opt_z_ const char* msg, /* = nullptr */
bool topOnly, /* = false */
bool isLIR) /* = false */
{
if (tree == nullptr)
{
printf(" [%08X] <NULL>\n", tree);
printf(""); // null string means flush
return;
}
if (indentStack == nullptr)
{
indentStack = new (this, CMK_DebugOnly) IndentStack(this);
}
if (IsUninitialized(tree))
{
/* Value used to initalize nodes */
printf("Uninitialized tree node!\n");
return;
}
if (tree->gtOper >= GT_COUNT)
{
gtDispNode(tree, indentStack, msg, isLIR);
printf("Bogus operator!\n");
return;
}
/* Is tree a leaf node? */
if (tree->OperIsLeaf() || tree->OperIsLocalStore()) // local stores used to be leaves
{
gtDispNode(tree, indentStack, msg, isLIR);
gtDispLeaf(tree, indentStack);
gtDispCommonEndLine(tree);
if (tree->OperIsLocalStore() && !topOnly)
{
gtDispChild(tree->AsOp()->gtOp1, indentStack, IINone);
}
return;
}
// Determine what kind of arc to propagate.
IndentInfo myArc = IINone;
IndentInfo lowerArc = IINone;
if (indentStack->Depth() > 0)
{
myArc = indentStack->Pop();
switch (myArc)
{
case IIArcBottom:
indentStack->Push(IIArc);
lowerArc = IINone;
break;
case IIArc:
indentStack->Push(IIArc);
lowerArc = IIArc;
break;
case IIArcTop:
indentStack->Push(IINone);
lowerArc = IIArc;
break;
case IINone:
indentStack->Push(IINone);
lowerArc = IINone;
break;
default:
unreached();
break;
}
}
/* Is it a 'simple' unary/binary operator? */
const char* childMsg = nullptr;
if (tree->OperIsSimple())
{
// Now, get the right type of arc for this node
if (myArc != IINone)
{
indentStack->Pop();
indentStack->Push(myArc);
}
gtDispNode(tree, indentStack, msg, isLIR);
// Propagate lowerArc to the lower children.
if (indentStack->Depth() > 0)
{
(void)indentStack->Pop();
indentStack->Push(lowerArc);
}
if (tree->gtOper == GT_CAST)
{
/* Format a message that explains the effect of this GT_CAST */
var_types fromType = genActualType(tree->AsCast()->CastOp()->TypeGet());
var_types toType = tree->CastToType();
var_types finalType = tree->TypeGet();
/* if GTF_UNSIGNED is set then force fromType to an unsigned type */
if (tree->gtFlags & GTF_UNSIGNED)
{
fromType = varTypeToUnsigned(fromType);
}
if (finalType != toType)
{
printf(" %s <-", varTypeName(finalType));
}
printf(" %s <- %s", varTypeName(toType), varTypeName(fromType));
}
if (tree->OperIsBlkOp())
{
if (tree->OperIsCopyBlkOp())
{
printf(" (copy)");
}
else if (tree->OperIsInitBlkOp())
{
printf(" (init)");
}
if (tree->OperIsStoreBlk() && (tree->AsBlk()->gtBlkOpKind != GenTreeBlk::BlkOpKindInvalid))
{
switch (tree->AsBlk()->gtBlkOpKind)
{
#ifdef TARGET_XARCH
case GenTreeBlk::BlkOpKindRepInstr:
printf(" (RepInstr)");
break;
#endif
case GenTreeBlk::BlkOpKindUnroll:
printf(" (Unroll)");
break;
#ifndef TARGET_X86
case GenTreeBlk::BlkOpKindHelper:
printf(" (Helper)");
break;
#endif
default:
unreached();
}
}
}
#if FEATURE_PUT_STRUCT_ARG_STK
else if (tree->OperGet() == GT_PUTARG_STK)
{
const GenTreePutArgStk* putArg = tree->AsPutArgStk();
#if !defined(DEBUG_ARG_SLOTS)
printf(" (%d stackByteSize), (%d byteOffset)", putArg->GetStackByteSize(), putArg->getArgOffset());
#else
if (compMacOsArm64Abi())
{
printf(" (%d stackByteSize), (%d byteOffset)", putArg->GetStackByteSize(), putArg->getArgOffset());
}
else
{
printf(" (%d slots), (%d stackByteSize), (%d slot), (%d byteOffset)", putArg->gtNumSlots,
putArg->GetStackByteSize(), putArg->gtSlotNum, putArg->getArgOffset());
}
#endif
if (putArg->gtPutArgStkKind != GenTreePutArgStk::Kind::Invalid)
{
switch (putArg->gtPutArgStkKind)
{
case GenTreePutArgStk::Kind::RepInstr:
printf(" (RepInstr)");
break;
case GenTreePutArgStk::Kind::PartialRepInstr:
printf(" (PartialRepInstr)");
break;
case GenTreePutArgStk::Kind::Unroll:
printf(" (Unroll)");
break;
case GenTreePutArgStk::Kind::Push:
printf(" (Push)");
break;
case GenTreePutArgStk::Kind::PushAllSlots:
printf(" (PushAllSlots)");
break;
default:
unreached();
}
}
}
#if FEATURE_ARG_SPLIT
else if (tree->OperGet() == GT_PUTARG_SPLIT)
{
const GenTreePutArgSplit* putArg = tree->AsPutArgSplit();
#if !defined(DEBUG_ARG_SLOTS)
printf(" (%d stackByteSize), (%d numRegs)", putArg->GetStackByteSize(), putArg->gtNumRegs);
#else
if (compMacOsArm64Abi())
{
printf(" (%d stackByteSize), (%d numRegs)", putArg->GetStackByteSize(), putArg->gtNumRegs);
}
else
{
printf(" (%d slots), (%d stackByteSize), (%d numRegs)", putArg->gtNumSlots, putArg->GetStackByteSize(),
putArg->gtNumRegs);
}
#endif
}
#endif // FEATURE_ARG_SPLIT
#endif // FEATURE_PUT_STRUCT_ARG_STK
if (tree->OperIs(GT_FIELD))
{
if (FieldSeqStore::IsPseudoField(tree->AsField()->gtFldHnd))
{
printf(" #PseudoField:0x%x", tree->AsField()->gtFldOffset);
}
else
{
printf(" %s", eeGetFieldName(tree->AsField()->gtFldHnd), 0);
}
}
if (tree->gtOper == GT_INTRINSIC)
{
GenTreeIntrinsic* intrinsic = tree->AsIntrinsic();
switch (intrinsic->gtIntrinsicName)
{
case NI_System_Math_Abs:
printf(" abs");
break;
case NI_System_Math_Acos:
printf(" acos");
break;
case NI_System_Math_Acosh:
printf(" acosh");
break;
case NI_System_Math_Asin:
printf(" asin");
break;
case NI_System_Math_Asinh:
printf(" asinh");
break;
case NI_System_Math_Atan:
printf(" atan");
break;
case NI_System_Math_Atanh:
printf(" atanh");
break;
case NI_System_Math_Atan2:
printf(" atan2");
break;
case NI_System_Math_Cbrt:
printf(" cbrt");
break;
case NI_System_Math_Ceiling:
printf(" ceiling");
break;
case NI_System_Math_Cos:
printf(" cos");
break;
case NI_System_Math_Cosh:
printf(" cosh");
break;
case NI_System_Math_Exp:
printf(" exp");
break;
case NI_System_Math_Floor:
printf(" floor");
break;
case NI_System_Math_FMod:
printf(" fmod");
break;
case NI_System_Math_FusedMultiplyAdd:
printf(" fma");
break;
case NI_System_Math_ILogB:
printf(" ilogb");
break;
case NI_System_Math_Log:
printf(" log");
break;
case NI_System_Math_Log2:
printf(" log2");
break;
case NI_System_Math_Log10:
printf(" log10");
break;
case NI_System_Math_Max:
printf(" max");
break;
case NI_System_Math_Min:
printf(" min");
break;
case NI_System_Math_Pow:
printf(" pow");
break;
case NI_System_Math_Round:
printf(" round");
break;
case NI_System_Math_Sin:
printf(" sin");
break;
case NI_System_Math_Sinh:
printf(" sinh");
break;
case NI_System_Math_Sqrt:
printf(" sqrt");
break;
case NI_System_Math_Tan:
printf(" tan");
break;
case NI_System_Math_Tanh:
printf(" tanh");
break;
case NI_System_Math_Truncate:
printf(" truncate");
break;
case NI_System_Object_GetType:
printf(" objGetType");
break;
case NI_System_Runtime_CompilerServices_RuntimeHelpers_IsKnownConstant:
printf(" isKnownConst");
break;
default:
unreached();
}
}
gtDispCommonEndLine(tree);
if (!topOnly)
{
if (tree->AsOp()->gtOp1 != nullptr)
{
// Label the child of the GT_COLON operator
// op1 is the else part
if (tree->gtOper == GT_COLON)
{
childMsg = "else";
}
else if (tree->gtOper == GT_QMARK)
{
childMsg = " if";
}
gtDispChild(tree->AsOp()->gtOp1, indentStack,
(tree->gtGetOp2IfPresent() == nullptr) ? IIArcBottom : IIArc, childMsg, topOnly);
}
if (tree->gtGetOp2IfPresent())
{
// Label the childMsgs of the GT_COLON operator
// op2 is the then part
if (tree->gtOper == GT_COLON)
{
childMsg = "then";
}
gtDispChild(tree->AsOp()->gtOp2, indentStack, IIArcBottom, childMsg, topOnly);
}
}
return;
}
// Now, get the right type of arc for this node
if (myArc != IINone)
{
indentStack->Pop();
indentStack->Push(myArc);
}
gtDispNode(tree, indentStack, msg, isLIR);
// Propagate lowerArc to the lower children.
if (indentStack->Depth() > 0)
{
(void)indentStack->Pop();
indentStack->Push(lowerArc);
}
// See what kind of a special operator we have here, and handle its special children.
switch (tree->gtOper)
{
case GT_FIELD_LIST:
gtDispCommonEndLine(tree);
if (!topOnly)
{
for (GenTreeFieldList::Use& use : tree->AsFieldList()->Uses())
{
char offset[32];
sprintf_s(offset, sizeof(offset), "ofs %u", use.GetOffset());
gtDispChild(use.GetNode(), indentStack, (use.GetNext() == nullptr) ? IIArcBottom : IIArc, offset);
}
}
break;
case GT_PHI:
gtDispCommonEndLine(tree);
if (!topOnly)
{
for (GenTreePhi::Use& use : tree->AsPhi()->Uses())
{
char block[32];
sprintf_s(block, sizeof(block), "pred " FMT_BB, use.GetNode()->AsPhiArg()->gtPredBB->bbNum);
gtDispChild(use.GetNode(), indentStack, (use.GetNext() == nullptr) ? IIArcBottom : IIArc, block);
}
}
break;
case GT_CALL:
{
GenTreeCall* call = tree->AsCall();
GenTree* lastChild = nullptr;
call->VisitOperands([&lastChild](GenTree* operand) -> GenTree::VisitResult {
lastChild = operand;
return GenTree::VisitResult::Continue;
});
if (call->gtCallType != CT_INDIRECT)
{
const char* methodName;
const char* className;
methodName = eeGetMethodName(call->gtCallMethHnd, &className);
printf(" %s.%s", className, methodName);
}
if ((call->gtFlags & GTF_CALL_UNMANAGED) && (call->gtCallMoreFlags & GTF_CALL_M_FRAME_VAR_DEATH))
{
printf(" (FramesRoot last use)");
}
if (((call->gtFlags & GTF_CALL_INLINE_CANDIDATE) != 0) && (call->gtInlineCandidateInfo != nullptr) &&
(call->gtInlineCandidateInfo->exactContextHnd != nullptr))
{
printf(" (exactContextHnd=0x%p)", dspPtr(call->gtInlineCandidateInfo->exactContextHnd));
}
gtDispCommonEndLine(tree);
if (!topOnly)
{
char buf[64];
char* bufp;
bufp = &buf[0];
if ((call->gtCallThisArg != nullptr) && !call->gtCallThisArg->GetNode()->OperIs(GT_NOP, GT_ARGPLACE))
{
if (call->gtCallThisArg->GetNode()->OperIs(GT_ASG))
{
sprintf_s(bufp, sizeof(buf), "this SETUP%c", 0);
}
else
{
sprintf_s(bufp, sizeof(buf), "this in %s%c", compRegVarName(REG_ARG_0), 0);
}
gtDispChild(call->gtCallThisArg->GetNode(), indentStack,
(call->gtCallThisArg->GetNode() == lastChild) ? IIArcBottom : IIArc, bufp, topOnly);
}
if (call->gtCallArgs)
{
gtDispArgList(call, lastChild, indentStack);
}
if (call->gtCallType == CT_INDIRECT)
{
gtDispChild(call->gtCallAddr, indentStack, (call->gtCallAddr == lastChild) ? IIArcBottom : IIArc,
"calli tgt", topOnly);
}
if (call->gtControlExpr != nullptr)
{
gtDispChild(call->gtControlExpr, indentStack,
(call->gtControlExpr == lastChild) ? IIArcBottom : IIArc, "control expr", topOnly);
}
int lateArgIndex = 0;
for (GenTreeCall::Use& use : call->LateArgs())
{
IndentInfo arcType = (use.GetNext() == nullptr) ? IIArcBottom : IIArc;
gtGetLateArgMsg(call, use.GetNode(), lateArgIndex, bufp, sizeof(buf));
gtDispChild(use.GetNode(), indentStack, arcType, bufp, topOnly);
lateArgIndex++;
}
}
}
break;
#if defined(FEATURE_SIMD) || defined(FEATURE_HW_INTRINSICS)
#if defined(FEATURE_SIMD)
case GT_SIMD:
#endif
#if defined(FEATURE_HW_INTRINSICS)
case GT_HWINTRINSIC:
#endif
#if defined(FEATURE_SIMD)
if (tree->OperIs(GT_SIMD))
{
printf(" %s %s", varTypeName(tree->AsSIMD()->GetSimdBaseType()),
simdIntrinsicNames[tree->AsSIMD()->GetSIMDIntrinsicId()]);
}
#endif // defined(FEATURE_SIMD)
#if defined(FEATURE_HW_INTRINSICS)
if (tree->OperIs(GT_HWINTRINSIC))
{
printf(" %s %s", tree->AsHWIntrinsic()->GetSimdBaseType() == TYP_UNKNOWN
? ""
: varTypeName(tree->AsHWIntrinsic()->GetSimdBaseType()),
HWIntrinsicInfo::lookupName(tree->AsHWIntrinsic()->GetHWIntrinsicId()));
}
#endif // defined(FEATURE_HW_INTRINSICS)
gtDispCommonEndLine(tree);
if (!topOnly)
{
size_t index = 0;
size_t count = tree->AsMultiOp()->GetOperandCount();
for (GenTree* operand : tree->AsMultiOp()->Operands())
{
gtDispChild(operand, indentStack, ++index < count ? IIArc : IIArcBottom, nullptr, topOnly);
}
}
break;
#endif // defined(FEATURE_SIMD) || defined(FEATURE_HW_INTRINSICS)
case GT_ARR_ELEM:
gtDispCommonEndLine(tree);
if (!topOnly)
{
gtDispChild(tree->AsArrElem()->gtArrObj, indentStack, IIArc, nullptr, topOnly);
unsigned dim;
for (dim = 0; dim < tree->AsArrElem()->gtArrRank; dim++)
{
IndentInfo arcType = ((dim + 1) == tree->AsArrElem()->gtArrRank) ? IIArcBottom : IIArc;
gtDispChild(tree->AsArrElem()->gtArrInds[dim], indentStack, arcType, nullptr, topOnly);
}
}
break;
case GT_ARR_OFFSET:
gtDispCommonEndLine(tree);
if (!topOnly)
{
gtDispChild(tree->AsArrOffs()->gtOffset, indentStack, IIArc, nullptr, topOnly);
gtDispChild(tree->AsArrOffs()->gtIndex, indentStack, IIArc, nullptr, topOnly);
gtDispChild(tree->AsArrOffs()->gtArrObj, indentStack, IIArcBottom, nullptr, topOnly);
}
break;
case GT_CMPXCHG:
gtDispCommonEndLine(tree);
if (!topOnly)
{
gtDispChild(tree->AsCmpXchg()->gtOpLocation, indentStack, IIArc, nullptr, topOnly);
gtDispChild(tree->AsCmpXchg()->gtOpValue, indentStack, IIArc, nullptr, topOnly);
gtDispChild(tree->AsCmpXchg()->gtOpComparand, indentStack, IIArcBottom, nullptr, topOnly);
}
break;
case GT_STORE_DYN_BLK:
if (tree->OperIsCopyBlkOp())
{
printf(" (copy)");
}
else if (tree->OperIsInitBlkOp())
{
printf(" (init)");
}
gtDispCommonEndLine(tree);
if (!topOnly)
{
gtDispChild(tree->AsStoreDynBlk()->Addr(), indentStack, IIArc, nullptr, topOnly);
if (tree->AsStoreDynBlk()->Data() != nullptr)
{
gtDispChild(tree->AsStoreDynBlk()->Data(), indentStack, IIArc, nullptr, topOnly);
}
gtDispChild(tree->AsStoreDynBlk()->gtDynamicSize, indentStack, IIArcBottom, nullptr, topOnly);
}
break;
default:
printf("<DON'T KNOW HOW TO DISPLAY THIS NODE> :");
printf(""); // null string means flush
break;
}
}
//------------------------------------------------------------------------
// gtGetArgMsg: Construct a message about the given argument
//
// Arguments:
// call - The call for which 'arg' is an argument
// arg - The argument for which a message should be constructed
// argNum - The ordinal number of the arg in the argument list
// bufp - A pointer to the buffer into which the message is written
// bufLength - The length of the buffer pointed to by bufp
//
// Return Value:
// No return value, but bufp is written.
//
// Assumptions:
// 'call' must be a call node
// 'arg' must be an argument to 'call' (else gtArgEntryByNode will assert)
void Compiler::gtGetArgMsg(GenTreeCall* call, GenTree* arg, unsigned argNum, char* bufp, unsigned bufLength)
{
if (call->gtCallLateArgs != nullptr)
{
fgArgTabEntry* curArgTabEntry = gtArgEntryByArgNum(call, argNum);
assert(curArgTabEntry);
if (arg->gtFlags & GTF_LATE_ARG)
{
sprintf_s(bufp, bufLength, "arg%d SETUP%c", argNum, 0);
}
else
{
#ifdef TARGET_ARM
if (curArgTabEntry->IsSplit())
{
regNumber firstReg = curArgTabEntry->GetRegNum();
if (curArgTabEntry->numRegs == 1)
{
sprintf_s(bufp, bufLength, "arg%d %s out+%02x%c", argNum, compRegVarName(firstReg),
(curArgTabEntry->slotNum) * TARGET_POINTER_SIZE, 0);
}
else
{
regNumber lastReg = REG_STK;
char separator = (curArgTabEntry->numRegs == 2) ? ',' : '-';
if (curArgTabEntry->IsHfaRegArg())
{
unsigned lastRegNum = genMapFloatRegNumToRegArgNum(firstReg) + curArgTabEntry->numRegs - 1;
lastReg = genMapFloatRegArgNumToRegNum(lastRegNum);
}
else
{
unsigned lastRegNum = genMapIntRegNumToRegArgNum(firstReg) + curArgTabEntry->numRegs - 1;
lastReg = genMapIntRegArgNumToRegNum(lastRegNum);
}
sprintf_s(bufp, bufLength, "arg%d %s%c%s out+%02x%c", argNum, compRegVarName(firstReg), separator,
compRegVarName(lastReg), (curArgTabEntry->slotNum) * TARGET_POINTER_SIZE, 0);
}
return;
}
#endif // TARGET_ARM
#if FEATURE_FIXED_OUT_ARGS
sprintf_s(bufp, bufLength, "arg%d out+%02x%c", argNum, curArgTabEntry->GetByteOffset(), 0);
#else
sprintf_s(bufp, bufLength, "arg%d on STK%c", argNum, 0);
#endif
}
}
else
{
sprintf_s(bufp, bufLength, "arg%d%c", argNum, 0);
}
}
//------------------------------------------------------------------------
// gtGetLateArgMsg: Construct a message about the given argument
//
// Arguments:
// call - The call for which 'arg' is an argument
// argx - The argument for which a message should be constructed
// lateArgIndex - The ordinal number of the arg in the lastArg list
// bufp - A pointer to the buffer into which the message is written
// bufLength - The length of the buffer pointed to by bufp
//
// Return Value:
// No return value, but bufp is written.
//
// Assumptions:
// 'call' must be a call node
// 'arg' must be an argument to 'call' (else gtArgEntryByNode will assert)
void Compiler::gtGetLateArgMsg(GenTreeCall* call, GenTree* argx, int lateArgIndex, char* bufp, unsigned bufLength)
{
assert(!argx->IsArgPlaceHolderNode()); // No place holders nodes are in gtCallLateArgs;
fgArgTabEntry* curArgTabEntry = gtArgEntryByLateArgIndex(call, lateArgIndex);
assert(curArgTabEntry);
regNumber argReg = curArgTabEntry->GetRegNum();
#if FEATURE_FIXED_OUT_ARGS
if (argReg == REG_STK)
{
sprintf_s(bufp, bufLength, "arg%d in out+%02x%c", curArgTabEntry->argNum, curArgTabEntry->GetByteOffset(), 0);
}
else
#endif
{
if (curArgTabEntry->use == call->gtCallThisArg)
{
sprintf_s(bufp, bufLength, "this in %s%c", compRegVarName(argReg), 0);
}
#ifdef TARGET_ARM
else if (curArgTabEntry->IsSplit())
{
regNumber firstReg = curArgTabEntry->GetRegNum();
unsigned argNum = curArgTabEntry->argNum;
if (curArgTabEntry->numRegs == 1)
{
sprintf_s(bufp, bufLength, "arg%d %s out+%02x%c", argNum, compRegVarName(firstReg),
(curArgTabEntry->slotNum) * TARGET_POINTER_SIZE, 0);
}
else
{
regNumber lastReg = REG_STK;
char separator = (curArgTabEntry->numRegs == 2) ? ',' : '-';
if (curArgTabEntry->IsHfaRegArg())
{
unsigned lastRegNum = genMapFloatRegNumToRegArgNum(firstReg) + curArgTabEntry->numRegs - 1;
lastReg = genMapFloatRegArgNumToRegNum(lastRegNum);
}
else
{
unsigned lastRegNum = genMapIntRegNumToRegArgNum(firstReg) + curArgTabEntry->numRegs - 1;
lastReg = genMapIntRegArgNumToRegNum(lastRegNum);
}
sprintf_s(bufp, bufLength, "arg%d %s%c%s out+%02x%c", argNum, compRegVarName(firstReg), separator,
compRegVarName(lastReg), (curArgTabEntry->slotNum) * TARGET_POINTER_SIZE, 0);
}
return;
}
#endif // TARGET_ARM
else
{
#if FEATURE_MULTIREG_ARGS
if (curArgTabEntry->numRegs >= 2)
{
char separator = (curArgTabEntry->numRegs == 2) ? ',' : '-';
sprintf_s(bufp, bufLength, "arg%d %s%c%s%c", curArgTabEntry->argNum, compRegVarName(argReg), separator,
compRegVarName(curArgTabEntry->GetRegNum(curArgTabEntry->numRegs - 1)), 0);
}
else
#endif
{
sprintf_s(bufp, bufLength, "arg%d in %s%c", curArgTabEntry->argNum, compRegVarName(argReg), 0);
}
}
}
}
//------------------------------------------------------------------------
// gtDispArgList: Dump the tree for a call arg list
//
// Arguments:
// call - the call to dump arguments for
// lastCallOperand - the call's last operand (to determine the arc types)
// indentStack - the specification for the current level of indentation & arcs
//
// Return Value:
// None.
//
void Compiler::gtDispArgList(GenTreeCall* call, GenTree* lastCallOperand, IndentStack* indentStack)
{
unsigned argnum = 0;
if (call->gtCallThisArg != nullptr)
{
argnum++;
}
for (GenTreeCall::Use& use : call->Args())
{
GenTree* argNode = use.GetNode();
if (!argNode->IsNothingNode() && !argNode->IsArgPlaceHolderNode())
{
char buf[256];
gtGetArgMsg(call, argNode, argnum, buf, sizeof(buf));
gtDispChild(argNode, indentStack, (argNode == lastCallOperand) ? IIArcBottom : IIArc, buf, false);
}
argnum++;
}
}
// gtDispStmt: Print a statement to jitstdout.
//
// Arguments:
// stmt - the statement to be printed;
// msg - an additional message to print before the statement.
//
void Compiler::gtDispStmt(Statement* stmt, const char* msg /* = nullptr */)
{
if (opts.compDbgInfo)
{
if (msg != nullptr)
{
printf("%s ", msg);
}
printStmtID(stmt);
printf(" ( ");
const DebugInfo& di = stmt->GetDebugInfo();
// For statements in the root we display just the location without the
// inline context info.
if (di.GetInlineContext() == nullptr || di.GetInlineContext()->IsRoot())
{
di.GetLocation().Dump();
}
else
{
stmt->GetDebugInfo().Dump(false);
}
printf(" ... ");
IL_OFFSET lastILOffs = stmt->GetLastILOffset();
if (lastILOffs == BAD_IL_OFFSET)
{
printf("???");
}
else
{
printf("0x%03X", lastILOffs);
}
printf(" )");
DebugInfo par;
if (stmt->GetDebugInfo().GetParent(&par))
{
printf(" <- ");
par.Dump(true);
}
printf("\n");
}
gtDispTree(stmt->GetRootNode());
}
//------------------------------------------------------------------------
// gtDispBlockStmts: dumps all statements inside `block`.
//
// Arguments:
// block - the block to display statements for.
//
void Compiler::gtDispBlockStmts(BasicBlock* block)
{
for (Statement* const stmt : block->Statements())
{
gtDispStmt(stmt);
printf("\n");
}
}
//------------------------------------------------------------------------
// Compiler::gtDispRange: dumps a range of LIR.
//
// Arguments:
// range - the range of LIR to display.
//
void Compiler::gtDispRange(LIR::ReadOnlyRange const& range)
{
for (GenTree* node : range)
{
gtDispLIRNode(node);
}
}
//------------------------------------------------------------------------
// Compiler::gtDispTreeRange: dumps the LIR range that contains all of the
// nodes in the dataflow tree rooted at a given
// node.
//
// Arguments:
// containingRange - the LIR range that contains the root node.
// tree - the root of the dataflow tree.
//
void Compiler::gtDispTreeRange(LIR::Range& containingRange, GenTree* tree)
{
bool unused;
gtDispRange(containingRange.GetTreeRange(tree, &unused));
}
//------------------------------------------------------------------------
// Compiler::gtDispLIRNode: dumps a single LIR node.
//
// Arguments:
// node - the LIR node to dump.
// prefixMsg - an optional prefix for each line of output.
//
void Compiler::gtDispLIRNode(GenTree* node, const char* prefixMsg /* = nullptr */)
{
auto displayOperand = [](GenTree* operand, const char* message, IndentInfo operandArc, IndentStack& indentStack,
size_t prefixIndent) {
assert(operand != nullptr);
assert(message != nullptr);
if (prefixIndent != 0)
{
printf("%*s", (int)prefixIndent, "");
}
// 50 spaces for alignment
printf("%-50s", "");
#if FEATURE_SET_FLAGS
// additional flag enlarges the flag field by one character
printf(" ");
#endif
indentStack.Push(operandArc);
indentStack.print();
indentStack.Pop();
operandArc = IIArc;
printf(" t%-5d %-6s %s\n", operand->gtTreeID, varTypeName(operand->TypeGet()), message);
};
IndentStack indentStack(this);
size_t prefixIndent = 0;
if (prefixMsg != nullptr)
{
prefixIndent = strlen(prefixMsg);
}
const int bufLength = 256;
char buf[bufLength];
const bool nodeIsCall = node->IsCall();
// Visit operands
IndentInfo operandArc = IIArcTop;
for (GenTree* operand : node->Operands())
{
if (operand->IsArgPlaceHolderNode() || !operand->IsValue())
{
// Either of these situations may happen with calls.
continue;
}
if (nodeIsCall)
{
GenTreeCall* call = node->AsCall();
if ((call->gtCallThisArg != nullptr) && (operand == call->gtCallThisArg->GetNode()))
{
sprintf_s(buf, sizeof(buf), "this in %s", compRegVarName(REG_ARG_0));
displayOperand(operand, buf, operandArc, indentStack, prefixIndent);
}
else if (operand == call->gtCallAddr)
{
displayOperand(operand, "calli tgt", operandArc, indentStack, prefixIndent);
}
else if (operand == call->gtControlExpr)
{
displayOperand(operand, "control expr", operandArc, indentStack, prefixIndent);
}
else if (operand == call->gtCallCookie)
{
displayOperand(operand, "cookie", operandArc, indentStack, prefixIndent);
}
else
{
fgArgTabEntry* curArgTabEntry = gtArgEntryByNode(call, operand);
assert(curArgTabEntry);
if (!curArgTabEntry->isLateArg())
{
gtGetArgMsg(call, operand, curArgTabEntry->argNum, buf, sizeof(buf));
}
else
{
gtGetLateArgMsg(call, operand, curArgTabEntry->GetLateArgInx(), buf, sizeof(buf));
}
displayOperand(operand, buf, operandArc, indentStack, prefixIndent);
}
}
else if (node->OperIs(GT_STORE_DYN_BLK))
{
if (operand == node->AsBlk()->Addr())
{
displayOperand(operand, "lhs", operandArc, indentStack, prefixIndent);
}
else if (operand == node->AsBlk()->Data())
{
displayOperand(operand, "rhs", operandArc, indentStack, prefixIndent);
}
else
{
assert(operand == node->AsStoreDynBlk()->gtDynamicSize);
displayOperand(operand, "size", operandArc, indentStack, prefixIndent);
}
}
else if (node->OperIs(GT_ASG))
{
if (operand == node->gtGetOp1())
{
displayOperand(operand, "lhs", operandArc, indentStack, prefixIndent);
}
else
{
displayOperand(operand, "rhs", operandArc, indentStack, prefixIndent);
}
}
else
{
displayOperand(operand, "", operandArc, indentStack, prefixIndent);
}
operandArc = IIArc;
}
// Visit the operator
if (prefixMsg != nullptr)
{
printf("%s", prefixMsg);
}
const bool topOnly = true;
const bool isLIR = true;
gtDispTree(node, &indentStack, nullptr, topOnly, isLIR);
}
/*****************************************************************************/
#endif // DEBUG
/*****************************************************************************
*
* Check if the given node can be folded,
* and call the methods to perform the folding
*/
GenTree* Compiler::gtFoldExpr(GenTree* tree)
{
unsigned kind = tree->OperKind();
/* We must have a simple operation to fold */
// If we're in CSE, it's not safe to perform tree
// folding given that it can will potentially
// change considered CSE candidates.
if (optValnumCSE_phase)
{
return tree;
}
if (!(kind & GTK_SMPOP))
{
return tree;
}
GenTree* op1 = tree->AsOp()->gtOp1;
/* Filter out non-foldable trees that can have constant children */
assert(kind & (GTK_UNOP | GTK_BINOP));
switch (tree->gtOper)
{
case GT_RETFILT:
case GT_RETURN:
case GT_IND:
return tree;
default:
break;
}
/* try to fold the current node */
if ((kind & GTK_UNOP) && op1)
{
if (op1->OperIsConst())
{
return gtFoldExprConst(tree);
}
}
else if ((kind & GTK_BINOP) && op1 && tree->AsOp()->gtOp2 &&
// Don't take out conditionals for debugging
(opts.OptimizationEnabled() || !tree->OperIsCompare()))
{
GenTree* op2 = tree->AsOp()->gtOp2;
// The atomic operations are exempted here because they are never computable statically;
// one of their arguments is an address.
if (op1->OperIsConst() && op2->OperIsConst() && !tree->OperIsAtomicOp())
{
/* both nodes are constants - fold the expression */
return gtFoldExprConst(tree);
}
else if (op1->OperIsConst() || op2->OperIsConst())
{
/* at least one is a constant - see if we have a
* special operator that can use only one constant
* to fold - e.g. booleans */
return gtFoldExprSpecial(tree);
}
else if (tree->OperIsCompare())
{
/* comparisons of two local variables can sometimes be folded */
return gtFoldExprCompare(tree);
}
}
/* Return the original node (folded/bashed or not) */
return tree;
}
//------------------------------------------------------------------------
// gtFoldExprCall: see if a call is foldable
//
// Arguments:
// call - call to examine
//
// Returns:
// The original call if no folding happened.
// An alternative tree if folding happens.
//
// Notes:
// Checks for calls to Type.op_Equality, Type.op_Inequality, and
// Enum.HasFlag, and if the call is to one of these,
// attempts to optimize.
GenTree* Compiler::gtFoldExprCall(GenTreeCall* call)
{
// Can only fold calls to special intrinsics.
if ((call->gtCallMoreFlags & GTF_CALL_M_SPECIAL_INTRINSIC) == 0)
{
return call;
}
// Defer folding if not optimizing.
if (opts.OptimizationDisabled())
{
return call;
}
// Check for a new-style jit intrinsic.
const NamedIntrinsic ni = lookupNamedIntrinsic(call->gtCallMethHnd);
switch (ni)
{
case NI_System_Enum_HasFlag:
{
GenTree* thisOp = call->gtCallThisArg->GetNode();
GenTree* flagOp = call->gtCallArgs->GetNode();
GenTree* result = gtOptimizeEnumHasFlag(thisOp, flagOp);
if (result != nullptr)
{
return result;
}
break;
}
case NI_System_Type_op_Equality:
case NI_System_Type_op_Inequality:
{
noway_assert(call->TypeGet() == TYP_INT);
GenTree* op1 = call->gtCallArgs->GetNode();
GenTree* op2 = call->gtCallArgs->GetNext()->GetNode();
// If either operand is known to be a RuntimeType, this can be folded
GenTree* result = gtFoldTypeEqualityCall(ni == NI_System_Type_op_Equality, op1, op2);
if (result != nullptr)
{
return result;
}
break;
}
default:
break;
}
return call;
}
//------------------------------------------------------------------------
// gtFoldTypeEqualityCall: see if a (potential) type equality call is foldable
//
// Arguments:
// isEq -- is it == or != operator
// op1 -- first argument to call
// op2 -- second argument to call
//
// Returns:
// nulltpr if no folding happened.
// An alternative tree if folding happens.
//
// Notes:
// If either operand is known to be a a RuntimeType, then the type
// equality methods will simply check object identity and so we can
// fold the call into a simple compare of the call's operands.
GenTree* Compiler::gtFoldTypeEqualityCall(bool isEq, GenTree* op1, GenTree* op2)
{
if ((gtGetTypeProducerKind(op1) == TPK_Unknown) && (gtGetTypeProducerKind(op2) == TPK_Unknown))
{
return nullptr;
}
const genTreeOps simpleOp = isEq ? GT_EQ : GT_NE;
JITDUMP("\nFolding call to Type:op_%s to a simple compare via %s\n", isEq ? "Equality" : "Inequality",
GenTree::OpName(simpleOp));
GenTree* compare = gtNewOperNode(simpleOp, TYP_INT, op1, op2);
return compare;
}
/*****************************************************************************
*
* Some comparisons can be folded:
*
* locA == locA
* classVarA == classVarA
* locA + locB == locB + locA
*
*/
GenTree* Compiler::gtFoldExprCompare(GenTree* tree)
{
GenTree* op1 = tree->AsOp()->gtOp1;
GenTree* op2 = tree->AsOp()->gtOp2;
assert(tree->OperIsCompare());
/* Filter out cases that cannot be folded here */
/* Do not fold floats or doubles (e.g. NaN != Nan) */
if (varTypeIsFloating(op1->TypeGet()))
{
return tree;
}
// Currently we can only fold when the two subtrees exactly match
// and everything is side effect free.
//
if (((tree->gtFlags & GTF_SIDE_EFFECT) != 0) || !GenTree::Compare(op1, op2, true))
{
// No folding.
//
return tree;
}
// GTF_ORDER_SIDEEFF here may indicate volatile subtrees.
// Or it may indicate a non-null assertion prop into an indir subtree.
//
// Check the operands.
//
if ((tree->gtFlags & GTF_ORDER_SIDEEFF) != 0)
{
// If op1 is "volatle" and op2 is not, we can still fold.
//
const bool op1MayBeVolatile = (op1->gtFlags & GTF_ORDER_SIDEEFF) != 0;
const bool op2MayBeVolatile = (op2->gtFlags & GTF_ORDER_SIDEEFF) != 0;
if (!op1MayBeVolatile || op2MayBeVolatile)
{
// No folding.
//
return tree;
}
}
GenTree* cons;
switch (tree->gtOper)
{
case GT_EQ:
case GT_LE:
case GT_GE:
cons = gtNewIconNode(true); /* Folds to GT_CNS_INT(true) */
break;
case GT_NE:
case GT_LT:
case GT_GT:
cons = gtNewIconNode(false); /* Folds to GT_CNS_INT(false) */
break;
default:
assert(!"Unexpected relOp");
return tree;
}
/* The node has beeen folded into 'cons' */
JITDUMP("\nFolding comparison with identical operands:\n");
DISPTREE(tree);
if (fgGlobalMorph)
{
fgMorphTreeDone(cons);
}
else
{
cons->gtNext = tree->gtNext;
cons->gtPrev = tree->gtPrev;
}
JITDUMP("Bashed to %s:\n", cons->AsIntConCommon()->IconValue() ? "true" : "false");
DISPTREE(cons);
return cons;
}
//------------------------------------------------------------------------
// gtCreateHandleCompare: generate a type handle comparison
//
// Arguments:
// oper -- comparison operation (equal/not equal)
// op1 -- first operand
// op2 -- second operand
// typeCheckInliningResult -- indicates how the comparison should happen
//
// Returns:
// Type comparison tree
//
GenTree* Compiler::gtCreateHandleCompare(genTreeOps oper,
GenTree* op1,
GenTree* op2,
CorInfoInlineTypeCheck typeCheckInliningResult)
{
// If we can compare pointers directly, just emit the binary operation
if (typeCheckInliningResult == CORINFO_INLINE_TYPECHECK_PASS)
{
return gtNewOperNode(oper, TYP_INT, op1, op2);
}
assert(typeCheckInliningResult == CORINFO_INLINE_TYPECHECK_USE_HELPER);
// Emit a call to a runtime helper
GenTreeCall::Use* helperArgs = gtNewCallArgs(op1, op2);
GenTree* ret = gtNewHelperCallNode(CORINFO_HELP_ARE_TYPES_EQUIVALENT, TYP_INT, helperArgs);
if (oper == GT_EQ)
{
ret = gtNewOperNode(GT_NE, TYP_INT, ret, gtNewIconNode(0, TYP_INT));
}
else
{
assert(oper == GT_NE);
ret = gtNewOperNode(GT_EQ, TYP_INT, ret, gtNewIconNode(0, TYP_INT));
}
return ret;
}
//------------------------------------------------------------------------
// gtFoldTypeCompare: see if a type comparison can be further simplified
//
// Arguments:
// tree -- tree possibly comparing types
//
// Returns:
// An alternative tree if folding happens.
// Original tree otherwise.
//
// Notes:
// Checks for
// typeof(...) == obj.GetType()
// typeof(...) == typeof(...)
// obj1.GetType() == obj2.GetType()
//
// And potentially optimizes away the need to obtain actual
// RuntimeType objects to do the comparison.
GenTree* Compiler::gtFoldTypeCompare(GenTree* tree)
{
// Only handle EQ and NE
// (maybe relop vs null someday)
const genTreeOps oper = tree->OperGet();
if ((oper != GT_EQ) && (oper != GT_NE))
{
return tree;
}
// Screen for the right kinds of operands
GenTree* const op1 = tree->AsOp()->gtOp1;
const TypeProducerKind op1Kind = gtGetTypeProducerKind(op1);
if (op1Kind == TPK_Unknown)
{
return tree;
}
GenTree* const op2 = tree->AsOp()->gtOp2;
const TypeProducerKind op2Kind = gtGetTypeProducerKind(op2);
if (op2Kind == TPK_Unknown)
{
return tree;
}
// If both types are created via handles, we can simply compare
// handles instead of the types that they'd create.
if ((op1Kind == TPK_Handle) && (op2Kind == TPK_Handle))
{
JITDUMP("Optimizing compare of types-from-handles to instead compare handles\n");
GenTree* op1ClassFromHandle = tree->AsOp()->gtOp1->AsCall()->gtCallArgs->GetNode();
GenTree* op2ClassFromHandle = tree->AsOp()->gtOp2->AsCall()->gtCallArgs->GetNode();
CORINFO_CLASS_HANDLE cls1Hnd = NO_CLASS_HANDLE;
CORINFO_CLASS_HANDLE cls2Hnd = NO_CLASS_HANDLE;
// Try and find class handles from op1 and op2
cls1Hnd = gtGetHelperArgClassHandle(op1ClassFromHandle);
cls2Hnd = gtGetHelperArgClassHandle(op2ClassFromHandle);
// If we have both class handles, try and resolve the type equality test completely.
bool resolveFailed = false;
if ((cls1Hnd != NO_CLASS_HANDLE) && (cls2Hnd != NO_CLASS_HANDLE))
{
JITDUMP("Asking runtime to compare %p (%s) and %p (%s) for equality\n", dspPtr(cls1Hnd),
eeGetClassName(cls1Hnd), dspPtr(cls2Hnd), eeGetClassName(cls2Hnd));
TypeCompareState s = info.compCompHnd->compareTypesForEquality(cls1Hnd, cls2Hnd);
if (s != TypeCompareState::May)
{
// Type comparison result is known.
const bool typesAreEqual = (s == TypeCompareState::Must);
const bool operatorIsEQ = (oper == GT_EQ);
const int compareResult = operatorIsEQ ^ typesAreEqual ? 0 : 1;
JITDUMP("Runtime reports comparison is known at jit time: %u\n", compareResult);
GenTree* result = gtNewIconNode(compareResult);
return result;
}
else
{
resolveFailed = true;
}
}
if (resolveFailed)
{
JITDUMP("Runtime reports comparison is NOT known at jit time\n");
}
else
{
JITDUMP("Could not find handle for %s%s\n", (cls1Hnd == NO_CLASS_HANDLE) ? " cls1" : "",
(cls2Hnd == NO_CLASS_HANDLE) ? " cls2" : "");
}
// We can't answer the equality comparison definitively at jit
// time, but can still simplify the comparison.
//
// Find out how we can compare the two handles.
// NOTE: We're potentially passing NO_CLASS_HANDLE, but the runtime knows what to do with it here.
CorInfoInlineTypeCheck inliningKind =
info.compCompHnd->canInlineTypeCheck(cls1Hnd, CORINFO_INLINE_TYPECHECK_SOURCE_TOKEN);
// If the first type needs helper, check the other type: it might be okay with a simple compare.
if (inliningKind == CORINFO_INLINE_TYPECHECK_USE_HELPER)
{
inliningKind = info.compCompHnd->canInlineTypeCheck(cls2Hnd, CORINFO_INLINE_TYPECHECK_SOURCE_TOKEN);
}
assert(inliningKind == CORINFO_INLINE_TYPECHECK_PASS || inliningKind == CORINFO_INLINE_TYPECHECK_USE_HELPER);
GenTree* compare = gtCreateHandleCompare(oper, op1ClassFromHandle, op2ClassFromHandle, inliningKind);
// Drop any now-irrelvant flags
compare->gtFlags |= tree->gtFlags & (GTF_RELOP_JMP_USED | GTF_DONT_CSE);
return compare;
}
if ((op1Kind == TPK_GetType) && (op2Kind == TPK_GetType))
{
GenTree* arg1;
if (op1->OperGet() == GT_INTRINSIC)
{
arg1 = op1->AsUnOp()->gtOp1;
}
else
{
arg1 = op1->AsCall()->gtCallThisArg->GetNode();
}
arg1 = gtNewMethodTableLookup(arg1);
GenTree* arg2;
if (op2->OperGet() == GT_INTRINSIC)
{
arg2 = op2->AsUnOp()->gtOp1;
}
else
{
arg2 = op2->AsCall()->gtCallThisArg->GetNode();
}
arg2 = gtNewMethodTableLookup(arg2);
CorInfoInlineTypeCheck inliningKind =
info.compCompHnd->canInlineTypeCheck(nullptr, CORINFO_INLINE_TYPECHECK_SOURCE_VTABLE);
assert(inliningKind == CORINFO_INLINE_TYPECHECK_PASS || inliningKind == CORINFO_INLINE_TYPECHECK_USE_HELPER);
GenTree* compare = gtCreateHandleCompare(oper, arg1, arg2, inliningKind);
// Drop any now-irrelvant flags
compare->gtFlags |= tree->gtFlags & (GTF_RELOP_JMP_USED | GTF_DONT_CSE);
return compare;
}
// If one operand creates a type from a handle and the other operand is fetching the type from an object,
// we can sometimes optimize the type compare into a simpler
// method table comparison.
//
// TODO: if other operand is null...
if (!(((op1Kind == TPK_GetType) && (op2Kind == TPK_Handle)) ||
((op1Kind == TPK_Handle) && (op2Kind == TPK_GetType))))
{
return tree;
}
GenTree* const opHandle = (op1Kind == TPK_Handle) ? op1 : op2;
GenTree* const opOther = (op1Kind == TPK_Handle) ? op2 : op1;
// Tunnel through the handle operand to get at the class handle involved.
GenTree* const opHandleArgument = opHandle->AsCall()->gtCallArgs->GetNode();
CORINFO_CLASS_HANDLE clsHnd = gtGetHelperArgClassHandle(opHandleArgument);
// If we couldn't find the class handle, give up.
if (clsHnd == NO_CLASS_HANDLE)
{
return tree;
}
// Ask the VM if this type can be equality tested by a simple method
// table comparison.
CorInfoInlineTypeCheck typeCheckInliningResult =
info.compCompHnd->canInlineTypeCheck(clsHnd, CORINFO_INLINE_TYPECHECK_SOURCE_VTABLE);
if (typeCheckInliningResult == CORINFO_INLINE_TYPECHECK_NONE)
{
return tree;
}
// We're good to go.
JITDUMP("Optimizing compare of obj.GetType()"
" and type-from-handle to compare method table pointer\n");
// opHandleArgument is the method table we're looking for.
GenTree* const knownMT = opHandleArgument;
// Fetch object method table from the object itself.
GenTree* objOp = nullptr;
// Note we may see intrinsified or regular calls to GetType
if (opOther->OperGet() == GT_INTRINSIC)
{
objOp = opOther->AsUnOp()->gtOp1;
}
else
{
objOp = opOther->AsCall()->gtCallThisArg->GetNode();
}
bool pIsExact = false;
bool pIsNonNull = false;
CORINFO_CLASS_HANDLE objCls = gtGetClassHandle(objOp, &pIsExact, &pIsNonNull);
// if both classes are "final" (e.g. System.String[]) we can replace the comparison
// with `true/false` + null check.
if ((objCls != NO_CLASS_HANDLE) && (pIsExact || impIsClassExact(objCls)))
{
TypeCompareState tcs = info.compCompHnd->compareTypesForEquality(objCls, clsHnd);
if (tcs != TypeCompareState::May)
{
const bool operatorIsEQ = oper == GT_EQ;
const bool typesAreEqual = tcs == TypeCompareState::Must;
GenTree* compareResult = gtNewIconNode((operatorIsEQ ^ typesAreEqual) ? 0 : 1);
if (!pIsNonNull)
{
// we still have to emit a null-check
// obj.GetType == typeof() -> (nullcheck) true/false
GenTree* nullcheck = gtNewNullCheck(objOp, compCurBB);
return gtNewOperNode(GT_COMMA, tree->TypeGet(), nullcheck, compareResult);
}
else if (objOp->gtFlags & GTF_ALL_EFFECT)
{
return gtNewOperNode(GT_COMMA, tree->TypeGet(), objOp, compareResult);
}
else
{
return compareResult;
}
}
}
// Fetch the method table from the object
GenTree* const objMT = gtNewMethodTableLookup(objOp);
// Compare the two method tables
GenTree* const compare = gtCreateHandleCompare(oper, objMT, knownMT, typeCheckInliningResult);
// Drop any now irrelevant flags
compare->gtFlags |= tree->gtFlags & (GTF_RELOP_JMP_USED | GTF_DONT_CSE);
// And we're done
return compare;
}
//------------------------------------------------------------------------
// gtGetHelperArgClassHandle: find the compile time class handle from
// a helper call argument tree
//
// Arguments:
// tree - tree that passes the handle to the helper
//
// Returns:
// The compile time class handle if known.
//
CORINFO_CLASS_HANDLE Compiler::gtGetHelperArgClassHandle(GenTree* tree)
{
CORINFO_CLASS_HANDLE result = NO_CLASS_HANDLE;
// Walk through any wrapping nop.
if ((tree->gtOper == GT_NOP) && (tree->gtType == TYP_I_IMPL))
{
tree = tree->AsOp()->gtOp1;
}
// The handle could be a literal constant
if ((tree->OperGet() == GT_CNS_INT) && (tree->TypeGet() == TYP_I_IMPL))
{
assert(tree->IsIconHandle(GTF_ICON_CLASS_HDL));
result = (CORINFO_CLASS_HANDLE)tree->AsIntCon()->gtCompileTimeHandle;
}
// Or the result of a runtime lookup
else if (tree->OperGet() == GT_RUNTIMELOOKUP)
{
result = tree->AsRuntimeLookup()->GetClassHandle();
}
// Or something reached indirectly
else if (tree->gtOper == GT_IND)
{
// The handle indirs we are looking for will be marked as non-faulting.
// Certain others (eg from refanytype) may not be.
if (tree->gtFlags & GTF_IND_NONFAULTING)
{
GenTree* handleTreeInternal = tree->AsOp()->gtOp1;
if ((handleTreeInternal->OperGet() == GT_CNS_INT) && (handleTreeInternal->TypeGet() == TYP_I_IMPL))
{
// These handle constants should be class handles.
assert(handleTreeInternal->IsIconHandle(GTF_ICON_CLASS_HDL));
result = (CORINFO_CLASS_HANDLE)handleTreeInternal->AsIntCon()->gtCompileTimeHandle;
}
}
}
return result;
}
//------------------------------------------------------------------------
// gtFoldExprSpecial -- optimize binary ops with one constant operand
//
// Arguments:
// tree - tree to optimize
//
// Return value:
// Tree (possibly modified at root or below), or a new tree
// Any new tree is fully morphed, if necessary.
//
GenTree* Compiler::gtFoldExprSpecial(GenTree* tree)
{
GenTree* op1 = tree->AsOp()->gtOp1;
GenTree* op2 = tree->AsOp()->gtOp2;
genTreeOps oper = tree->OperGet();
GenTree* op;
GenTree* cons;
ssize_t val;
assert(tree->OperKind() & GTK_BINOP);
/* Filter out operators that cannot be folded here */
if (oper == GT_CAST)
{
return tree;
}
/* We only consider TYP_INT for folding
* Do not fold pointer arithmetic (e.g. addressing modes!) */
if (oper != GT_QMARK && !varTypeIsIntOrI(tree->gtType))
{
return tree;
}
/* Find out which is the constant node */
if (op1->IsCnsIntOrI())
{
op = op2;
cons = op1;
}
else if (op2->IsCnsIntOrI())
{
op = op1;
cons = op2;
}
else
{
return tree;
}
/* Get the constant value */
val = cons->AsIntConCommon()->IconValue();
// Transforms that would drop op cannot be performed if op has side effects
bool opHasSideEffects = (op->gtFlags & GTF_SIDE_EFFECT) != 0;
// Helper function that creates a new IntCon node and morphs it, if required
auto NewMorphedIntConNode = [&](int value) -> GenTreeIntCon* {
GenTreeIntCon* icon = gtNewIconNode(value);
if (fgGlobalMorph)
{
fgMorphTreeDone(icon);
}
return icon;
};
// Here `op` is the non-constant operand, `cons` is the constant operand
// and `val` is the constant value.
switch (oper)
{
case GT_LE:
if (tree->IsUnsigned() && (val == 0) && (op1 == cons) && !opHasSideEffects)
{
// unsigned (0 <= x) is always true
op = NewMorphedIntConNode(1);
goto DONE_FOLD;
}
break;
case GT_GE:
if (tree->IsUnsigned() && (val == 0) && (op2 == cons) && !opHasSideEffects)
{
// unsigned (x >= 0) is always true
op = NewMorphedIntConNode(1);
goto DONE_FOLD;
}
break;
case GT_LT:
if (tree->IsUnsigned() && (val == 0) && (op2 == cons) && !opHasSideEffects)
{
// unsigned (x < 0) is always false
op = NewMorphedIntConNode(0);
goto DONE_FOLD;
}
break;
case GT_GT:
if (tree->IsUnsigned() && (val == 0) && (op1 == cons) && !opHasSideEffects)
{
// unsigned (0 > x) is always false
op = NewMorphedIntConNode(0);
goto DONE_FOLD;
}
FALLTHROUGH;
case GT_EQ:
case GT_NE:
// Optimize boxed value classes; these are always false. This IL is
// generated when a generic value is tested against null:
// <T> ... foo(T x) { ... if ((object)x == null) ...
if ((val == 0) && op->IsBoxedValue())
{
JITDUMP("\nAttempting to optimize BOX(valueType) %s null [%06u]\n", GenTree::OpName(oper),
dspTreeID(tree));
// We don't expect GT_GT with signed compares, and we
// can't predict the result if we do see it, since the
// boxed object addr could have its high bit set.
if ((oper == GT_GT) && !tree->IsUnsigned())
{
JITDUMP(" bailing; unexpected signed compare via GT_GT\n");
}
else
{
// The tree under the box must be side effect free
// since we will drop it if we optimize.
assert(!gtTreeHasSideEffects(op->AsBox()->BoxOp(), GTF_SIDE_EFFECT));
// See if we can optimize away the box and related statements.
GenTree* boxSourceTree = gtTryRemoveBoxUpstreamEffects(op);
bool didOptimize = (boxSourceTree != nullptr);
// If optimization succeeded, remove the box.
if (didOptimize)
{
// Set up the result of the compare.
int compareResult = 0;
if (oper == GT_GT)
{
// GT_GT(null, box) == false
// GT_GT(box, null) == true
compareResult = (op1 == op);
}
else if (oper == GT_EQ)
{
// GT_EQ(box, null) == false
// GT_EQ(null, box) == false
compareResult = 0;
}
else
{
assert(oper == GT_NE);
// GT_NE(box, null) == true
// GT_NE(null, box) == true
compareResult = 1;
}
JITDUMP("\nSuccess: replacing BOX(valueType) %s null with %d\n", GenTree::OpName(oper),
compareResult);
return NewMorphedIntConNode(compareResult);
}
}
}
else
{
return gtFoldBoxNullable(tree);
}
break;
case GT_ADD:
if (val == 0)
{
goto DONE_FOLD;
}
break;
case GT_MUL:
if (val == 1)
{
goto DONE_FOLD;
}
else if (val == 0)
{
/* Multiply by zero - return the 'zero' node, but not if side effects */
if (!opHasSideEffects)
{
op = cons;
goto DONE_FOLD;
}
}
break;
case GT_DIV:
case GT_UDIV:
if ((op2 == cons) && (val == 1) && !op1->OperIsConst())
{
goto DONE_FOLD;
}
break;
case GT_SUB:
if ((op2 == cons) && (val == 0) && !op1->OperIsConst())
{
goto DONE_FOLD;
}
break;
case GT_AND:
if (val == 0)
{
/* AND with zero - return the 'zero' node, but not if side effects */
if (!opHasSideEffects)
{
op = cons;
goto DONE_FOLD;
}
}
else
{
/* The GTF_BOOLEAN flag is set for nodes that are part
* of a boolean expression, thus all their children
* are known to evaluate to only 0 or 1 */
if (tree->gtFlags & GTF_BOOLEAN)
{
/* The constant value must be 1
* AND with 1 stays the same */
assert(val == 1);
goto DONE_FOLD;
}
}
break;
case GT_OR:
if (val == 0)
{
goto DONE_FOLD;
}
else if (tree->gtFlags & GTF_BOOLEAN)
{
/* The constant value must be 1 - OR with 1 is 1 */
assert(val == 1);
/* OR with one - return the 'one' node, but not if side effects */
if (!opHasSideEffects)
{
op = cons;
goto DONE_FOLD;
}
}
break;
case GT_LSH:
case GT_RSH:
case GT_RSZ:
case GT_ROL:
case GT_ROR:
if (val == 0)
{
if (op2 == cons)
{
goto DONE_FOLD;
}
else if (!opHasSideEffects)
{
op = cons;
goto DONE_FOLD;
}
}
break;
case GT_QMARK:
{
assert(op1 == cons && op2 == op && op2->gtOper == GT_COLON);
assert(op2->AsOp()->gtOp1 && op2->AsOp()->gtOp2);
assert(val == 0 || val == 1);
if (val)
{
op = op2->AsColon()->ThenNode();
}
else
{
op = op2->AsColon()->ElseNode();
}
// Clear colon flags only if the qmark itself is not conditionaly executed
if ((tree->gtFlags & GTF_COLON_COND) == 0)
{
fgWalkTreePre(&op, gtClearColonCond);
}
}
goto DONE_FOLD;
default:
break;
}
/* The node is not foldable */
return tree;
DONE_FOLD:
/* The node has beeen folded into 'op' */
// If there was an assigment update, we just morphed it into
// a use, update the flags appropriately
if (op->gtOper == GT_LCL_VAR)
{
assert(tree->OperIs(GT_ASG) || (op->gtFlags & (GTF_VAR_USEASG | GTF_VAR_DEF)) == 0);
op->gtFlags &= ~(GTF_VAR_USEASG | GTF_VAR_DEF);
}
JITDUMP("\nFolding binary operator with a constant operand:\n");
DISPTREE(tree);
JITDUMP("Transformed into:\n");
DISPTREE(op);
return op;
}
//------------------------------------------------------------------------
// gtFoldBoxNullable -- optimize a boxed nullable feeding a compare to zero
//
// Arguments:
// tree - binop tree to potentially optimize, must be
// GT_GT, GT_EQ, or GT_NE
//
// Return value:
// Tree (possibly modified below the root).
//
GenTree* Compiler::gtFoldBoxNullable(GenTree* tree)
{
assert(tree->OperKind() & GTK_BINOP);
assert(tree->OperIs(GT_GT, GT_EQ, GT_NE));
genTreeOps const oper = tree->OperGet();
if ((oper == GT_GT) && !tree->IsUnsigned())
{
return tree;
}
GenTree* const op1 = tree->AsOp()->gtOp1;
GenTree* const op2 = tree->AsOp()->gtOp2;
GenTree* op;
GenTree* cons;
if (op1->IsCnsIntOrI())
{
op = op2;
cons = op1;
}
else if (op2->IsCnsIntOrI())
{
op = op1;
cons = op2;
}
else
{
return tree;
}
ssize_t const val = cons->AsIntConCommon()->IconValue();
if (val != 0)
{
return tree;
}
if (!op->IsCall())
{
return tree;
}
GenTreeCall* const call = op->AsCall();
if (!call->IsHelperCall(this, CORINFO_HELP_BOX_NULLABLE))
{
return tree;
}
JITDUMP("\nAttempting to optimize BOX_NULLABLE(&x) %s null [%06u]\n", GenTree::OpName(oper), dspTreeID(tree));
// Get the address of the struct being boxed
GenTree* const arg = call->gtCallArgs->GetNext()->GetNode();
if (arg->OperIs(GT_ADDR) && ((arg->gtFlags & GTF_LATE_ARG) == 0))
{
CORINFO_CLASS_HANDLE nullableHnd = gtGetStructHandle(arg->AsOp()->gtOp1);
CORINFO_FIELD_HANDLE fieldHnd = info.compCompHnd->getFieldInClass(nullableHnd, 0);
// Replace the box with an access of the nullable 'hasValue' field.
JITDUMP("\nSuccess: replacing BOX_NULLABLE(&x) [%06u] with x.hasValue\n", dspTreeID(op));
GenTree* newOp = gtNewFieldRef(TYP_BOOL, fieldHnd, arg, 0);
if (op == op1)
{
tree->AsOp()->gtOp1 = newOp;
}
else
{
tree->AsOp()->gtOp2 = newOp;
}
cons->gtType = TYP_INT;
}
return tree;
}
//------------------------------------------------------------------------
// gtTryRemoveBoxUpstreamEffects: given an unused value type box,
// try and remove the upstream allocation and unnecessary parts of
// the copy.
//
// Arguments:
// op - the box node to optimize
// options - controls whether and how trees are modified
// (see notes)
//
// Return Value:
// A tree representing the original value to box, if removal
// is successful/possible (but see note). nullptr if removal fails.
//
// Notes:
// Value typed box gets special treatment because it has associated
// side effects that can be removed if the box result is not used.
//
// By default (options == BR_REMOVE_AND_NARROW) this method will
// try and remove unnecessary trees and will try and reduce remaning
// operations to the minimal set, possibly narrowing the width of
// loads from the box source if it is a struct.
//
// To perform a trial removal, pass BR_DONT_REMOVE. This can be
// useful to determine if this optimization should only be
// performed if some other conditions hold true.
//
// To remove but not alter the access to the box source, pass
// BR_REMOVE_BUT_NOT_NARROW.
//
// To remove and return the tree for the type handle used for
// the boxed newobj, pass BR_REMOVE_BUT_NOT_NARROW_WANT_TYPE_HANDLE.
// This can be useful when the only part of the box that is "live"
// is its type.
//
// If removal fails, is is possible that a subsequent pass may be
// able to optimize. Blocking side effects may now be minimized
// (null or bounds checks might have been removed) or might be
// better known (inline return placeholder updated with the actual
// return expression). So the box is perhaps best left as is to
// help trigger this re-examination.
GenTree* Compiler::gtTryRemoveBoxUpstreamEffects(GenTree* op, BoxRemovalOptions options)
{
assert(op->IsBoxedValue());
// grab related parts for the optimization
GenTreeBox* box = op->AsBox();
Statement* asgStmt = box->gtAsgStmtWhenInlinedBoxValue;
Statement* copyStmt = box->gtCopyStmtWhenInlinedBoxValue;
JITDUMP("gtTryRemoveBoxUpstreamEffects: %s to %s of BOX (valuetype)"
" [%06u] (assign/newobj " FMT_STMT " copy " FMT_STMT "\n",
(options == BR_DONT_REMOVE) ? "checking if it is possible" : "attempting",
(options == BR_MAKE_LOCAL_COPY) ? "make local unboxed version" : "remove side effects", dspTreeID(op),
asgStmt->GetID(), copyStmt->GetID());
// If we don't recognize the form of the assign, bail.
GenTree* asg = asgStmt->GetRootNode();
if (asg->gtOper != GT_ASG)
{
JITDUMP(" bailing; unexpected assignment op %s\n", GenTree::OpName(asg->gtOper));
return nullptr;
}
// If we're eventually going to return the type handle, remember it now.
GenTree* boxTypeHandle = nullptr;
if ((options == BR_REMOVE_AND_NARROW_WANT_TYPE_HANDLE) || (options == BR_DONT_REMOVE_WANT_TYPE_HANDLE))
{
GenTree* asgSrc = asg->AsOp()->gtOp2;
genTreeOps asgSrcOper = asgSrc->OperGet();
// Allocation may be via AllocObj or via helper call, depending
// on when this is invoked and whether the jit is using AllocObj
// for R2R allocations.
if (asgSrcOper == GT_ALLOCOBJ)
{
GenTreeAllocObj* allocObj = asgSrc->AsAllocObj();
boxTypeHandle = allocObj->AsOp()->gtOp1;
}
else if (asgSrcOper == GT_CALL)
{
GenTreeCall* newobjCall = asgSrc->AsCall();
GenTreeCall::Use* newobjArgs = newobjCall->gtCallArgs;
// In R2R expansions the handle may not be an explicit operand to the helper,
// so we can't remove the box.
if (newobjArgs == nullptr)
{
assert(newobjCall->IsHelperCall(this, CORINFO_HELP_READYTORUN_NEW));
JITDUMP(" bailing; newobj via R2R helper\n");
return nullptr;
}
boxTypeHandle = newobjArgs->GetNode();
}
else
{
unreached();
}
assert(boxTypeHandle != nullptr);
}
// If we don't recognize the form of the copy, bail.
GenTree* copy = copyStmt->GetRootNode();
if (copy->gtOper != GT_ASG)
{
// GT_RET_EXPR is a tolerable temporary failure.
// The jit will revisit this optimization after
// inlining is done.
if (copy->gtOper == GT_RET_EXPR)
{
JITDUMP(" bailing; must wait for replacement of copy %s\n", GenTree::OpName(copy->gtOper));
}
else
{
// Anything else is a missed case we should
// figure out how to handle. One known case
// is GT_COMMAs enclosing the GT_ASG we are
// looking for.
JITDUMP(" bailing; unexpected copy op %s\n", GenTree::OpName(copy->gtOper));
}
return nullptr;
}
// Handle case where we are optimizing the box into a local copy
if (options == BR_MAKE_LOCAL_COPY)
{
// Drill into the box to get at the box temp local and the box type
GenTree* boxTemp = box->BoxOp();
assert(boxTemp->IsLocal());
const unsigned boxTempLcl = boxTemp->AsLclVar()->GetLclNum();
assert(lvaTable[boxTempLcl].lvType == TYP_REF);
CORINFO_CLASS_HANDLE boxClass = lvaTable[boxTempLcl].lvClassHnd;
assert(boxClass != nullptr);
// Verify that the copyDst has the expected shape
// (blk|obj|ind (add (boxTempLcl, ptr-size)))
//
// The shape here is constrained to the patterns we produce
// over in impImportAndPushBox for the inlined box case.
GenTree* copyDst = copy->AsOp()->gtOp1;
if (!copyDst->OperIs(GT_BLK, GT_IND, GT_OBJ))
{
JITDUMP("Unexpected copy dest operator %s\n", GenTree::OpName(copyDst->gtOper));
return nullptr;
}
GenTree* copyDstAddr = copyDst->AsOp()->gtOp1;
if (copyDstAddr->OperGet() != GT_ADD)
{
JITDUMP("Unexpected copy dest address tree\n");
return nullptr;
}
GenTree* copyDstAddrOp1 = copyDstAddr->AsOp()->gtOp1;
if ((copyDstAddrOp1->OperGet() != GT_LCL_VAR) || (copyDstAddrOp1->AsLclVarCommon()->GetLclNum() != boxTempLcl))
{
JITDUMP("Unexpected copy dest address 1st addend\n");
return nullptr;
}
GenTree* copyDstAddrOp2 = copyDstAddr->AsOp()->gtOp2;
if (!copyDstAddrOp2->IsIntegralConst(TARGET_POINTER_SIZE))
{
JITDUMP("Unexpected copy dest address 2nd addend\n");
return nullptr;
}
// Screening checks have all passed. Do the transformation.
//
// Retype the box temp to be a struct
JITDUMP("Retyping box temp V%02u to struct %s\n", boxTempLcl, eeGetClassName(boxClass));
lvaTable[boxTempLcl].lvType = TYP_UNDEF;
const bool isUnsafeValueClass = false;
lvaSetStruct(boxTempLcl, boxClass, isUnsafeValueClass);
var_types boxTempType = lvaTable[boxTempLcl].lvType;
// Remove the newobj and assigment to box temp
JITDUMP("Bashing NEWOBJ [%06u] to NOP\n", dspTreeID(asg));
asg->gtBashToNOP();
// Update the copy from the value to be boxed to the box temp
GenTree* newDst = gtNewOperNode(GT_ADDR, TYP_BYREF, gtNewLclvNode(boxTempLcl, boxTempType));
copyDst->AsOp()->gtOp1 = newDst;
// Return the address of the now-struct typed box temp
GenTree* retValue = gtNewOperNode(GT_ADDR, TYP_BYREF, gtNewLclvNode(boxTempLcl, boxTempType));
return retValue;
}
// If the copy is a struct copy, make sure we know how to isolate
// any source side effects.
GenTree* copySrc = copy->AsOp()->gtOp2;
// If the copy source is from a pending inline, wait for it to resolve.
if (copySrc->gtOper == GT_RET_EXPR)
{
JITDUMP(" bailing; must wait for replacement of copy source %s\n", GenTree::OpName(copySrc->gtOper));
return nullptr;
}
bool hasSrcSideEffect = false;
bool isStructCopy = false;
if (gtTreeHasSideEffects(copySrc, GTF_SIDE_EFFECT))
{
hasSrcSideEffect = true;
if (varTypeIsStruct(copySrc->gtType))
{
isStructCopy = true;
if ((copySrc->gtOper != GT_OBJ) && (copySrc->gtOper != GT_IND) && (copySrc->gtOper != GT_FIELD))
{
// We don't know how to handle other cases, yet.
JITDUMP(" bailing; unexpected copy source struct op with side effect %s\n",
GenTree::OpName(copySrc->gtOper));
return nullptr;
}
}
}
// If this was a trial removal, we're done.
if (options == BR_DONT_REMOVE)
{
return copySrc;
}
if (options == BR_DONT_REMOVE_WANT_TYPE_HANDLE)
{
return boxTypeHandle;
}
// Otherwise, proceed with the optimization.
//
// Change the assignment expression to a NOP.
JITDUMP("\nBashing NEWOBJ [%06u] to NOP\n", dspTreeID(asg));
asg->gtBashToNOP();
// Change the copy expression so it preserves key
// source side effects.
JITDUMP("\nBashing COPY [%06u]", dspTreeID(copy));
if (!hasSrcSideEffect)
{
// If there were no copy source side effects just bash
// the copy to a NOP.
copy->gtBashToNOP();
JITDUMP(" to NOP; no source side effects.\n");
}
else if (!isStructCopy)
{
// For scalar types, go ahead and produce the
// value as the copy is fairly cheap and likely
// the optimizer can trim things down to just the
// minimal side effect parts.
copyStmt->SetRootNode(copySrc);
JITDUMP(" to scalar read via [%06u]\n", dspTreeID(copySrc));
}
else
{
// For struct types read the first byte of the
// source struct; there's no need to read the
// entire thing, and no place to put it.
assert(copySrc->OperIs(GT_OBJ, GT_IND, GT_FIELD));
copyStmt->SetRootNode(copySrc);
if (options == BR_REMOVE_AND_NARROW || options == BR_REMOVE_AND_NARROW_WANT_TYPE_HANDLE)
{
JITDUMP(" to read first byte of struct via modified [%06u]\n", dspTreeID(copySrc));
gtChangeOperToNullCheck(copySrc, compCurBB);
}
else
{
JITDUMP(" to read entire struct via modified [%06u]\n", dspTreeID(copySrc));
}
}
if (fgStmtListThreaded)
{
fgSetStmtSeq(asgStmt);
fgSetStmtSeq(copyStmt);
}
// Box effects were successfully optimized.
if (options == BR_REMOVE_AND_NARROW_WANT_TYPE_HANDLE)
{
return boxTypeHandle;
}
else
{
return copySrc;
}
}
//------------------------------------------------------------------------
// gtOptimizeEnumHasFlag: given the operands for a call to Enum.HasFlag,
// try and optimize the call to a simple and/compare tree.
//
// Arguments:
// thisOp - first argument to the call
// flagOp - second argument to the call
//
// Return Value:
// A new cmp/amd tree if successful. nullptr on failure.
//
// Notes:
// If successful, may allocate new temps and modify connected
// statements.
GenTree* Compiler::gtOptimizeEnumHasFlag(GenTree* thisOp, GenTree* flagOp)
{
JITDUMP("Considering optimizing call to Enum.HasFlag....\n");
// Operands must be boxes
if (!thisOp->IsBoxedValue() || !flagOp->IsBoxedValue())
{
JITDUMP("bailing, need both inputs to be BOXes\n");
return nullptr;
}
// Operands must have same type
bool isExactThis = false;
bool isNonNullThis = false;
CORINFO_CLASS_HANDLE thisHnd = gtGetClassHandle(thisOp, &isExactThis, &isNonNullThis);
if (thisHnd == nullptr)
{
JITDUMP("bailing, can't find type for 'this' operand\n");
return nullptr;
}
// A boxed thisOp should have exact type and non-null instance
assert(isExactThis);
assert(isNonNullThis);
bool isExactFlag = false;
bool isNonNullFlag = false;
CORINFO_CLASS_HANDLE flagHnd = gtGetClassHandle(flagOp, &isExactFlag, &isNonNullFlag);
if (flagHnd == nullptr)
{
JITDUMP("bailing, can't find type for 'flag' operand\n");
return nullptr;
}
// A boxed flagOp should have exact type and non-null instance
assert(isExactFlag);
assert(isNonNullFlag);
if (flagHnd != thisHnd)
{
JITDUMP("bailing, operand types differ\n");
return nullptr;
}
// If we have a shared type instance we can't safely check type
// equality, so bail.
DWORD classAttribs = info.compCompHnd->getClassAttribs(thisHnd);
if (classAttribs & CORINFO_FLG_SHAREDINST)
{
JITDUMP("bailing, have shared instance type\n");
return nullptr;
}
// Simulate removing the box for thisOP. We need to know that it can
// be safely removed before we can optimize.
GenTree* thisVal = gtTryRemoveBoxUpstreamEffects(thisOp, BR_DONT_REMOVE);
if (thisVal == nullptr)
{
// Note we may fail here if the this operand comes from
// a call. We should be able to retry this post-inlining.
JITDUMP("bailing, can't undo box of 'this' operand\n");
return nullptr;
}
// Do likewise with flagOp.
GenTree* flagVal = gtTryRemoveBoxUpstreamEffects(flagOp, BR_DONT_REMOVE);
if (flagVal == nullptr)
{
// Note we may fail here if the flag operand comes from
// a call. We should be able to retry this post-inlining.
JITDUMP("bailing, can't undo box of 'flag' operand\n");
return nullptr;
}
// Only proceed when both box sources have the same actual type.
// (this rules out long/int mismatches)
if (genActualType(thisVal->TypeGet()) != genActualType(flagVal->TypeGet()))
{
JITDUMP("bailing, pre-boxed values have different types\n");
return nullptr;
}
// Yes, both boxes can be cleaned up. Optimize.
JITDUMP("Optimizing call to Enum.HasFlag\n");
// Undo the boxing of the Ops and prepare to operate directly
// on the pre-boxed values.
thisVal = gtTryRemoveBoxUpstreamEffects(thisOp, BR_REMOVE_BUT_NOT_NARROW);
flagVal = gtTryRemoveBoxUpstreamEffects(flagOp, BR_REMOVE_BUT_NOT_NARROW);
// Our trial removals above should guarantee successful removals here.
assert(thisVal != nullptr);
assert(flagVal != nullptr);
assert(genActualType(thisVal->TypeGet()) == genActualType(flagVal->TypeGet()));
// Type to use for optimized check
var_types type = genActualType(thisVal->TypeGet());
// The thisVal and flagVal trees come from earlier statements.
//
// Unless they are invariant values, we need to evaluate them both
// to temps at those points to safely transmit the values here.
//
// Also we need to use the flag twice, so we need two trees for it.
GenTree* thisValOpt = nullptr;
GenTree* flagValOpt = nullptr;
GenTree* flagValOptCopy = nullptr;
if (thisVal->IsIntegralConst())
{
thisValOpt = gtClone(thisVal);
assert(thisValOpt != nullptr);
}
else
{
const unsigned thisTmp = lvaGrabTemp(true DEBUGARG("Enum:HasFlag this temp"));
GenTree* thisAsg = gtNewTempAssign(thisTmp, thisVal);
Statement* thisAsgStmt = thisOp->AsBox()->gtCopyStmtWhenInlinedBoxValue;
thisAsgStmt->SetRootNode(thisAsg);
thisValOpt = gtNewLclvNode(thisTmp, type);
}
if (flagVal->IsIntegralConst())
{
flagValOpt = gtClone(flagVal);
assert(flagValOpt != nullptr);
flagValOptCopy = gtClone(flagVal);
assert(flagValOptCopy != nullptr);
}
else
{
const unsigned flagTmp = lvaGrabTemp(true DEBUGARG("Enum:HasFlag flag temp"));
GenTree* flagAsg = gtNewTempAssign(flagTmp, flagVal);
Statement* flagAsgStmt = flagOp->AsBox()->gtCopyStmtWhenInlinedBoxValue;
flagAsgStmt->SetRootNode(flagAsg);
flagValOpt = gtNewLclvNode(flagTmp, type);
flagValOptCopy = gtNewLclvNode(flagTmp, type);
}
// Turn the call into (thisValTmp & flagTmp) == flagTmp.
GenTree* andTree = gtNewOperNode(GT_AND, type, thisValOpt, flagValOpt);
GenTree* cmpTree = gtNewOperNode(GT_EQ, TYP_INT, andTree, flagValOptCopy);
JITDUMP("Optimized call to Enum.HasFlag\n");
return cmpTree;
}
/*****************************************************************************
*
* Fold the given constant tree.
*/
#ifdef _PREFAST_
#pragma warning(push)
#pragma warning(disable : 21000) // Suppress PREFast warning about overly large function
#endif
GenTree* Compiler::gtFoldExprConst(GenTree* tree)
{
SSIZE_T i1, i2, itemp;
INT64 lval1, lval2, ltemp;
float f1, f2;
double d1, d2;
var_types switchType;
FieldSeqNode* fieldSeq = FieldSeqStore::NotAField(); // default unless we override it when folding
assert(tree->OperIsUnary() || tree->OperIsBinary());
GenTree* op1 = tree->gtGetOp1();
GenTree* op2 = tree->gtGetOp2IfPresent();
if (!opts.OptEnabled(CLFLG_CONSTANTFOLD))
{
return tree;
}
if (tree->OperIs(GT_NOP, GT_ALLOCOBJ, GT_RUNTIMELOOKUP))
{
return tree;
}
// This condition exists to preserve previous behavior.
// TODO-CQ: enable folding for bounds checks nodes.
if (tree->OperIs(GT_BOUNDS_CHECK))
{
return tree;
}
#ifdef FEATURE_SIMD
if (tree->OperIs(GT_SIMD))
{
return tree;
}
#endif // FEATURE_SIMD
#ifdef FEATURE_HW_INTRINSICS
if (tree->OperIs(GT_HWINTRINSIC))
{
return tree;
}
#endif
if (tree->OperIsUnary())
{
assert(op1->OperIsConst());
switch (op1->TypeGet())
{
case TYP_INT:
// Fold constant INT unary operator.
if (!op1->AsIntCon()->ImmedValCanBeFolded(this, tree->OperGet()))
{
return tree;
}
i1 = (INT32)op1->AsIntCon()->IconValue();
// If we fold a unary oper, then the folded constant
// is considered a ConstantIndexField if op1 was one.
if ((op1->AsIntCon()->gtFieldSeq != nullptr) && op1->AsIntCon()->gtFieldSeq->IsConstantIndexFieldSeq())
{
fieldSeq = op1->AsIntCon()->gtFieldSeq;
}
switch (tree->OperGet())
{
case GT_NOT:
i1 = ~i1;
break;
case GT_NEG:
i1 = -i1;
break;
case GT_BSWAP:
i1 = ((i1 >> 24) & 0xFF) | ((i1 >> 8) & 0xFF00) | ((i1 << 8) & 0xFF0000) |
((i1 << 24) & 0xFF000000);
break;
case GT_BSWAP16:
i1 = ((i1 >> 8) & 0xFF) | ((i1 << 8) & 0xFF00);
break;
case GT_CAST:
// assert (genActualType(tree->CastToType()) == tree->TypeGet());
if (tree->gtOverflow() &&
CheckedOps::CastFromIntOverflows((INT32)i1, tree->CastToType(), tree->IsUnsigned()))
{
goto INTEGRAL_OVF;
}
switch (tree->CastToType())
{
case TYP_BYTE:
i1 = INT32(INT8(i1));
goto CNS_INT;
case TYP_SHORT:
i1 = INT32(INT16(i1));
goto CNS_INT;
case TYP_USHORT:
i1 = INT32(UINT16(i1));
goto CNS_INT;
case TYP_BOOL:
case TYP_UBYTE:
i1 = INT32(UINT8(i1));
goto CNS_INT;
case TYP_UINT:
case TYP_INT:
goto CNS_INT;
case TYP_ULONG:
if (tree->IsUnsigned())
{
lval1 = UINT64(UINT32(i1));
}
else
{
lval1 = UINT64(INT32(i1));
}
goto CNS_LONG;
case TYP_LONG:
if (tree->IsUnsigned())
{
lval1 = INT64(UINT32(i1));
}
else
{
lval1 = INT64(INT32(i1));
}
goto CNS_LONG;
case TYP_FLOAT:
if (tree->IsUnsigned())
{
f1 = forceCastToFloat(UINT32(i1));
}
else
{
f1 = forceCastToFloat(INT32(i1));
}
d1 = f1;
goto CNS_DOUBLE;
case TYP_DOUBLE:
if (tree->IsUnsigned())
{
d1 = (double)UINT32(i1);
}
else
{
d1 = (double)INT32(i1);
}
goto CNS_DOUBLE;
default:
assert(!"Bad CastToType() in gtFoldExprConst() for a cast from int");
return tree;
}
default:
return tree;
}
goto CNS_INT;
case TYP_LONG:
// Fold constant LONG unary operator.
if (!op1->AsIntConCommon()->ImmedValCanBeFolded(this, tree->OperGet()))
{
return tree;
}
lval1 = op1->AsIntConCommon()->LngValue();
switch (tree->OperGet())
{
case GT_NOT:
lval1 = ~lval1;
break;
case GT_NEG:
lval1 = -lval1;
break;
case GT_BSWAP:
lval1 = ((lval1 >> 56) & 0xFF) | ((lval1 >> 40) & 0xFF00) | ((lval1 >> 24) & 0xFF0000) |
((lval1 >> 8) & 0xFF000000) | ((lval1 << 8) & 0xFF00000000) |
((lval1 << 24) & 0xFF0000000000) | ((lval1 << 40) & 0xFF000000000000) |
((lval1 << 56) & 0xFF00000000000000);
break;
case GT_CAST:
assert(tree->TypeIs(genActualType(tree->CastToType())));
if (tree->gtOverflow() &&
CheckedOps::CastFromLongOverflows(lval1, tree->CastToType(), tree->IsUnsigned()))
{
goto INTEGRAL_OVF;
}
switch (tree->CastToType())
{
case TYP_BYTE:
i1 = INT32(INT8(lval1));
goto CNS_INT;
case TYP_SHORT:
i1 = INT32(INT16(lval1));
goto CNS_INT;
case TYP_USHORT:
i1 = INT32(UINT16(lval1));
goto CNS_INT;
case TYP_UBYTE:
i1 = INT32(UINT8(lval1));
goto CNS_INT;
case TYP_INT:
i1 = INT32(lval1);
goto CNS_INT;
case TYP_UINT:
i1 = UINT32(lval1);
goto CNS_INT;
case TYP_ULONG:
case TYP_LONG:
goto CNS_LONG;
case TYP_FLOAT:
case TYP_DOUBLE:
if (tree->IsUnsigned() && (lval1 < 0))
{
d1 = FloatingPointUtils::convertUInt64ToDouble((unsigned __int64)lval1);
}
else
{
d1 = (double)lval1;
}
if (tree->CastToType() == TYP_FLOAT)
{
f1 = forceCastToFloat(d1); // truncate precision
d1 = f1;
}
goto CNS_DOUBLE;
default:
assert(!"Bad CastToType() in gtFoldExprConst() for a cast from long");
return tree;
}
default:
return tree;
}
goto CNS_LONG;
case TYP_FLOAT:
case TYP_DOUBLE:
assert(op1->OperIs(GT_CNS_DBL));
// Fold constant DOUBLE unary operator.
d1 = op1->AsDblCon()->gtDconVal;
switch (tree->OperGet())
{
case GT_NEG:
d1 = -d1;
break;
case GT_CAST:
f1 = forceCastToFloat(d1);
if ((op1->TypeIs(TYP_DOUBLE) && CheckedOps::CastFromDoubleOverflows(d1, tree->CastToType())) ||
(op1->TypeIs(TYP_FLOAT) && CheckedOps::CastFromFloatOverflows(f1, tree->CastToType())))
{
// The conversion overflows. The ECMA spec says, in III 3.27, that
// "...if overflow occurs converting a floating point type to an integer, ...,
// the value returned is unspecified." However, it would at least be
// desirable to have the same value returned for casting an overflowing
// constant to an int as would be obtained by passing that constant as
// a parameter and then casting that parameter to an int type.
// Don't fold overflowing converions, as the value returned by
// JIT's codegen doesn't always match with the C compiler's cast result.
// We want the behavior to be the same with or without folding.
return tree;
}
assert(tree->TypeIs(genActualType(tree->CastToType())));
switch (tree->CastToType())
{
case TYP_BYTE:
i1 = INT32(INT8(d1));
goto CNS_INT;
case TYP_SHORT:
i1 = INT32(INT16(d1));
goto CNS_INT;
case TYP_USHORT:
i1 = INT32(UINT16(d1));
goto CNS_INT;
case TYP_UBYTE:
i1 = INT32(UINT8(d1));
goto CNS_INT;
case TYP_INT:
i1 = INT32(d1);
goto CNS_INT;
case TYP_UINT:
i1 = forceCastToUInt32(d1);
goto CNS_INT;
case TYP_LONG:
lval1 = INT64(d1);
goto CNS_LONG;
case TYP_ULONG:
lval1 = FloatingPointUtils::convertDoubleToUInt64(d1);
goto CNS_LONG;
case TYP_FLOAT:
d1 = forceCastToFloat(d1);
goto CNS_DOUBLE;
case TYP_DOUBLE:
if (op1->TypeIs(TYP_FLOAT))
{
d1 = forceCastToFloat(d1); // Truncate precision.
}
goto CNS_DOUBLE; // Redundant cast.
default:
assert(!"Bad CastToType() in gtFoldExprConst() for a cast from double/float");
break;
}
return tree;
default:
return tree;
}
goto CNS_DOUBLE;
default:
// Not a foldable typ - e.g. RET const.
return tree;
}
}
// We have a binary operator.
assert(tree->OperIsBinary());
assert(op2 != nullptr);
assert(op1->OperIsConst());
assert(op2->OperIsConst());
if (tree->OperIs(GT_COMMA))
{
return op2;
}
switchType = op1->TypeGet();
// Normally we will just switch on op1 types, but for the case where
// only op2 is a GC type and op1 is not a GC type, we use the op2 type.
// This makes us handle this as a case of folding for GC type.
if (varTypeIsGC(op2->gtType) && !varTypeIsGC(op1->gtType))
{
switchType = op2->TypeGet();
}
switch (switchType)
{
// Fold constant REF of BYREF binary operator.
// These can only be comparisons or null pointers.
case TYP_REF:
// String nodes are an RVA at this point.
if (op1->OperIs(GT_CNS_STR) || op2->OperIs(GT_CNS_STR))
{
// Fold "ldstr" ==/!= null.
if (op2->IsIntegralConst(0))
{
if (tree->OperIs(GT_EQ))
{
i1 = 0;
goto FOLD_COND;
}
if (tree->OperIs(GT_NE) || (tree->OperIs(GT_GT) && tree->IsUnsigned()))
{
i1 = 1;
goto FOLD_COND;
}
}
return tree;
}
FALLTHROUGH;
case TYP_BYREF:
i1 = op1->AsIntConCommon()->IconValue();
i2 = op2->AsIntConCommon()->IconValue();
switch (tree->OperGet())
{
case GT_EQ:
i1 = (i1 == i2);
goto FOLD_COND;
case GT_NE:
i1 = (i1 != i2);
goto FOLD_COND;
case GT_ADD:
noway_assert(!tree->TypeIs(TYP_REF));
// We only fold a GT_ADD that involves a null reference.
if ((op1->TypeIs(TYP_REF) && (i1 == 0)) || (op2->TypeIs(TYP_REF) && (i2 == 0)))
{
JITDUMP("\nFolding operator with constant nodes into a constant:\n");
DISPTREE(tree);
// Fold into GT_IND of null byref.
tree->BashToConst(0, TYP_BYREF);
if (vnStore != nullptr)
{
fgValueNumberTreeConst(tree);
}
JITDUMP("\nFolded to null byref:\n");
DISPTREE(tree);
goto DONE;
}
break;
default:
break;
}
return tree;
// Fold constant INT binary operator.
case TYP_INT:
assert(tree->TypeIs(TYP_INT) || varTypeIsGC(tree) || tree->OperIs(GT_MKREFANY));
// No GC pointer types should be folded here...
assert(!varTypeIsGC(op1->TypeGet()) && !varTypeIsGC(op2->TypeGet()));
if (!op1->AsIntConCommon()->ImmedValCanBeFolded(this, tree->OperGet()))
{
return tree;
}
if (!op2->AsIntConCommon()->ImmedValCanBeFolded(this, tree->OperGet()))
{
return tree;
}
i1 = op1->AsIntConCommon()->IconValue();
i2 = op2->AsIntConCommon()->IconValue();
switch (tree->OperGet())
{
case GT_EQ:
i1 = (INT32(i1) == INT32(i2));
break;
case GT_NE:
i1 = (INT32(i1) != INT32(i2));
break;
case GT_LT:
if (tree->IsUnsigned())
{
i1 = (UINT32(i1) < UINT32(i2));
}
else
{
i1 = (INT32(i1) < INT32(i2));
}
break;
case GT_LE:
if (tree->IsUnsigned())
{
i1 = (UINT32(i1) <= UINT32(i2));
}
else
{
i1 = (INT32(i1) <= INT32(i2));
}
break;
case GT_GE:
if (tree->IsUnsigned())
{
i1 = (UINT32(i1) >= UINT32(i2));
}
else
{
i1 = (INT32(i1) >= INT32(i2));
}
break;
case GT_GT:
if (tree->IsUnsigned())
{
i1 = (UINT32(i1) > UINT32(i2));
}
else
{
i1 = (INT32(i1) > INT32(i2));
}
break;
case GT_ADD:
itemp = i1 + i2;
if (tree->gtOverflow() && CheckedOps::AddOverflows(INT32(i1), INT32(i2), tree->IsUnsigned()))
{
goto INTEGRAL_OVF;
}
i1 = itemp;
fieldSeq = GetFieldSeqStore()->Append(op1->AsIntCon()->gtFieldSeq, op2->AsIntCon()->gtFieldSeq);
break;
case GT_SUB:
itemp = i1 - i2;
if (tree->gtOverflow() && CheckedOps::SubOverflows(INT32(i1), INT32(i2), tree->IsUnsigned()))
{
goto INTEGRAL_OVF;
}
i1 = itemp;
break;
case GT_MUL:
itemp = i1 * i2;
if (tree->gtOverflow() && CheckedOps::MulOverflows(INT32(i1), INT32(i2), tree->IsUnsigned()))
{
goto INTEGRAL_OVF;
}
// For the very particular case of the "constant array index" pseudo-field, we
// assume that multiplication is by the field width, and preserves that field.
// This could obviously be made more robust by a more complicated set of annotations...
if ((op1->AsIntCon()->gtFieldSeq != nullptr) &&
op1->AsIntCon()->gtFieldSeq->IsConstantIndexFieldSeq())
{
assert(op2->AsIntCon()->gtFieldSeq == FieldSeqStore::NotAField());
fieldSeq = op1->AsIntCon()->gtFieldSeq;
}
else if ((op2->AsIntCon()->gtFieldSeq != nullptr) &&
op2->AsIntCon()->gtFieldSeq->IsConstantIndexFieldSeq())
{
assert(op1->AsIntCon()->gtFieldSeq == FieldSeqStore::NotAField());
fieldSeq = op2->AsIntCon()->gtFieldSeq;
}
i1 = itemp;
break;
case GT_OR:
i1 |= i2;
break;
case GT_XOR:
i1 ^= i2;
break;
case GT_AND:
i1 &= i2;
break;
case GT_LSH:
i1 <<= (i2 & 0x1f);
break;
case GT_RSH:
i1 >>= (i2 & 0x1f);
break;
case GT_RSZ:
// logical shift -> make it unsigned to not propagate the sign bit.
i1 = UINT32(i1) >> (i2 & 0x1f);
break;
case GT_ROL:
i1 = (i1 << (i2 & 0x1f)) | (UINT32(i1) >> ((32 - i2) & 0x1f));
break;
case GT_ROR:
i1 = (i1 << ((32 - i2) & 0x1f)) | (UINT32(i1) >> (i2 & 0x1f));
break;
// DIV and MOD can throw an exception - if the division is by 0
// or there is overflow - when dividing MIN by -1.
case GT_DIV:
case GT_MOD:
case GT_UDIV:
case GT_UMOD:
if (INT32(i2) == 0)
{
// Division by zero.
// We have to evaluate this expression and throw an exception.
return tree;
}
else if ((INT32(i2) == -1) && (UINT32(i1) == 0x80000000))
{
// Overflow Division.
// We have to evaluate this expression and throw an exception.
return tree;
}
if (tree->OperIs(GT_DIV))
{
i1 = INT32(i1) / INT32(i2);
}
else if (tree->OperIs(GT_MOD))
{
i1 = INT32(i1) % INT32(i2);
}
else if (tree->OperIs(GT_UDIV))
{
i1 = UINT32(i1) / UINT32(i2);
}
else
{
assert(tree->OperIs(GT_UMOD));
i1 = UINT32(i1) % UINT32(i2);
}
break;
default:
return tree;
}
// We get here after folding to a GT_CNS_INT type.
// change the node to the new type / value and make sure the node sizes are OK.
CNS_INT:
FOLD_COND:
JITDUMP("\nFolding operator with constant nodes into a constant:\n");
DISPTREE(tree);
// Also all conditional folding jumps here since the node hanging from
// GT_JTRUE has to be a GT_CNS_INT - value 0 or 1.
// Some operations are performed as 64 bit instead of 32 bit so the upper 32 bits
// need to be discarded. Since constant values are stored as ssize_t and the node
// has TYP_INT the result needs to be sign extended rather than zero extended.
tree->BashToConst(static_cast<int>(i1));
tree->AsIntCon()->gtFieldSeq = fieldSeq;
if (vnStore != nullptr)
{
fgValueNumberTreeConst(tree);
}
JITDUMP("Bashed to int constant:\n");
DISPTREE(tree);
goto DONE;
// Fold constant LONG binary operator.
case TYP_LONG:
// No GC pointer types should be folded here...
assert(!varTypeIsGC(op1->TypeGet()) && !varTypeIsGC(op2->TypeGet()));
// op1 is known to be a TYP_LONG, op2 is normally a TYP_LONG, unless we have a shift operator in which case
// it is a TYP_INT.
assert(op2->TypeIs(TYP_LONG, TYP_INT));
if (!op1->AsIntConCommon()->ImmedValCanBeFolded(this, tree->OperGet()))
{
return tree;
}
if (!op2->AsIntConCommon()->ImmedValCanBeFolded(this, tree->OperGet()))
{
return tree;
}
lval1 = op1->AsIntConCommon()->LngValue();
// For the shift operators we can have a op2 that is a TYP_INT.
// Thus we cannot just use LngValue(), as it will assert on 32 bit if op2 is not GT_CNS_LNG.
lval2 = op2->AsIntConCommon()->IntegralValue();
switch (tree->OperGet())
{
case GT_EQ:
i1 = (lval1 == lval2);
goto FOLD_COND;
case GT_NE:
i1 = (lval1 != lval2);
goto FOLD_COND;
case GT_LT:
if (tree->IsUnsigned())
{
i1 = (UINT64(lval1) < UINT64(lval2));
}
else
{
i1 = (lval1 < lval2);
}
goto FOLD_COND;
case GT_LE:
if (tree->IsUnsigned())
{
i1 = (UINT64(lval1) <= UINT64(lval2));
}
else
{
i1 = (lval1 <= lval2);
}
goto FOLD_COND;
case GT_GE:
if (tree->IsUnsigned())
{
i1 = (UINT64(lval1) >= UINT64(lval2));
}
else
{
i1 = (lval1 >= lval2);
}
goto FOLD_COND;
case GT_GT:
if (tree->IsUnsigned())
{
i1 = (UINT64(lval1) > UINT64(lval2));
}
else
{
i1 = (lval1 > lval2);
}
goto FOLD_COND;
case GT_ADD:
ltemp = lval1 + lval2;
if (tree->gtOverflow() && CheckedOps::AddOverflows(lval1, lval2, tree->IsUnsigned()))
{
goto INTEGRAL_OVF;
}
lval1 = ltemp;
#ifdef TARGET_64BIT
fieldSeq = GetFieldSeqStore()->Append(op1->AsIntCon()->gtFieldSeq, op2->AsIntCon()->gtFieldSeq);
#endif
break;
case GT_SUB:
ltemp = lval1 - lval2;
if (tree->gtOverflow() && CheckedOps::SubOverflows(lval1, lval2, tree->IsUnsigned()))
{
goto INTEGRAL_OVF;
}
lval1 = ltemp;
break;
case GT_MUL:
ltemp = lval1 * lval2;
if (tree->gtOverflow() && CheckedOps::MulOverflows(lval1, lval2, tree->IsUnsigned()))
{
goto INTEGRAL_OVF;
}
lval1 = ltemp;
break;
case GT_OR:
lval1 |= lval2;
break;
case GT_XOR:
lval1 ^= lval2;
break;
case GT_AND:
lval1 &= lval2;
break;
case GT_LSH:
lval1 <<= (lval2 & 0x3f);
break;
case GT_RSH:
lval1 >>= (lval2 & 0x3f);
break;
case GT_RSZ:
// logical shift -> make it unsigned to not propagate the sign bit.
lval1 = UINT64(lval1) >> (lval2 & 0x3f);
break;
case GT_ROL:
lval1 = (lval1 << (lval2 & 0x3f)) | (UINT64(lval1) >> ((64 - lval2) & 0x3f));
break;
case GT_ROR:
lval1 = (lval1 << ((64 - lval2) & 0x3f)) | (UINT64(lval1) >> (lval2 & 0x3f));
break;
// Both DIV and IDIV on x86 raise an exception for min_int (and min_long) / -1. So we preserve
// that behavior here.
case GT_DIV:
if (lval2 == 0)
{
return tree;
}
if ((UINT64(lval1) == UINT64(0x8000000000000000)) && (lval2 == INT64(-1)))
{
return tree;
}
lval1 /= lval2;
break;
case GT_MOD:
if (lval2 == 0)
{
return tree;
}
if ((UINT64(lval1) == UINT64(0x8000000000000000)) && (lval2 == INT64(-1)))
{
return tree;
}
lval1 %= lval2;
break;
case GT_UDIV:
if (lval2 == 0)
{
return tree;
}
if ((UINT64(lval1) == UINT64(0x8000000000000000)) && (lval2 == INT64(-1)))
{
return tree;
}
lval1 = UINT64(lval1) / UINT64(lval2);
break;
case GT_UMOD:
if (lval2 == 0)
{
return tree;
}
if ((UINT64(lval1) == UINT64(0x8000000000000000)) && (lval2 == INT64(-1)))
{
return tree;
}
lval1 = UINT64(lval1) % UINT64(lval2);
break;
default:
return tree;
}
CNS_LONG:
#if !defined(TARGET_64BIT)
if (fieldSeq != FieldSeqStore::NotAField())
{
assert(!"Field sequences on CNS_LNG nodes!?");
return tree;
}
#endif // !defined(TARGET_64BIT)
JITDUMP("\nFolding long operator with constant nodes into a constant:\n");
DISPTREE(tree);
assert((GenTree::s_gtNodeSizes[GT_CNS_NATIVELONG] == TREE_NODE_SZ_SMALL) ||
(tree->gtDebugFlags & GTF_DEBUG_NODE_LARGE));
tree->BashToConst(lval1);
#ifdef TARGET_64BIT
tree->AsIntCon()->gtFieldSeq = fieldSeq;
#endif
if (vnStore != nullptr)
{
fgValueNumberTreeConst(tree);
}
JITDUMP("Bashed to long constant:\n");
DISPTREE(tree);
goto DONE;
// Fold constant FLOAT or DOUBLE binary operator
case TYP_FLOAT:
case TYP_DOUBLE:
if (tree->gtOverflowEx())
{
return tree;
}
assert(op1->OperIs(GT_CNS_DBL));
d1 = op1->AsDblCon()->gtDconVal;
assert(varTypeIsFloating(op2->TypeGet()));
assert(op2->OperIs(GT_CNS_DBL));
d2 = op2->AsDblCon()->gtDconVal;
// Special case - check if we have NaN operands.
// For comparisons if not an unordered operation always return 0.
// For unordered operations (i.e. the GTF_RELOP_NAN_UN flag is set)
// the result is always true - return 1.
if (_isnan(d1) || _isnan(d2))
{
JITDUMP("Double operator(s) is NaN\n");
if (tree->OperIsCompare())
{
if (tree->gtFlags & GTF_RELOP_NAN_UN)
{
// Unordered comparison with NaN always succeeds.
i1 = 1;
goto FOLD_COND;
}
else
{
// Normal comparison with NaN always fails.
i1 = 0;
goto FOLD_COND;
}
}
}
switch (tree->OperGet())
{
case GT_EQ:
i1 = (d1 == d2);
goto FOLD_COND;
case GT_NE:
i1 = (d1 != d2);
goto FOLD_COND;
case GT_LT:
i1 = (d1 < d2);
goto FOLD_COND;
case GT_LE:
i1 = (d1 <= d2);
goto FOLD_COND;
case GT_GE:
i1 = (d1 >= d2);
goto FOLD_COND;
case GT_GT:
i1 = (d1 > d2);
goto FOLD_COND;
// Floating point arithmetic should be done in declared
// precision while doing constant folding. For this reason though TYP_FLOAT
// constants are stored as double constants, while performing float arithmetic,
// double constants should be converted to float. Here is an example case
// where performing arithmetic in double precision would lead to incorrect
// results.
//
// Example:
// float a = float.MaxValue;
// float b = a*a; This will produce +inf in single precision and 1.1579207543382391e+077 in double
// precision.
// flaot c = b/b; This will produce NaN in single precision and 1 in double precision.
case GT_ADD:
if (op1->TypeIs(TYP_FLOAT))
{
f1 = forceCastToFloat(d1);
f2 = forceCastToFloat(d2);
d1 = forceCastToFloat(f1 + f2);
}
else
{
d1 += d2;
}
break;
case GT_SUB:
if (op1->TypeIs(TYP_FLOAT))
{
f1 = forceCastToFloat(d1);
f2 = forceCastToFloat(d2);
d1 = forceCastToFloat(f1 - f2);
}
else
{
d1 -= d2;
}
break;
case GT_MUL:
if (op1->TypeIs(TYP_FLOAT))
{
f1 = forceCastToFloat(d1);
f2 = forceCastToFloat(d2);
d1 = forceCastToFloat(f1 * f2);
}
else
{
d1 *= d2;
}
break;
case GT_DIV:
// We do not fold division by zero, even for floating point.
// This is because the result will be platform-dependent for an expression like 0d / 0d.
if (d2 == 0)
{
return tree;
}
if (op1->TypeIs(TYP_FLOAT))
{
f1 = forceCastToFloat(d1);
f2 = forceCastToFloat(d2);
d1 = forceCastToFloat(f1 / f2);
}
else
{
d1 /= d2;
}
break;
default:
return tree;
}
CNS_DOUBLE:
JITDUMP("\nFolding fp operator with constant nodes into a fp constant:\n");
DISPTREE(tree);
assert((GenTree::s_gtNodeSizes[GT_CNS_DBL] == TREE_NODE_SZ_SMALL) ||
(tree->gtDebugFlags & GTF_DEBUG_NODE_LARGE));
tree->BashToConst(d1, tree->TypeGet());
if (vnStore != nullptr)
{
fgValueNumberTreeConst(tree);
}
JITDUMP("Bashed to fp constant:\n");
DISPTREE(tree);
goto DONE;
default:
// Not a foldable type.
return tree;
}
DONE:
// Make sure no side effect flags are set on this constant node.
tree->gtFlags &= ~GTF_ALL_EFFECT;
return tree;
INTEGRAL_OVF:
// This operation is going to cause an overflow exception. Morph into
// an overflow helper. Put a dummy constant value for code generation.
//
// We could remove all subsequent trees in the current basic block,
// unless this node is a child of GT_COLON
//
// NOTE: Since the folded value is not constant we should not change the
// "tree" node - otherwise we confuse the logic that checks if the folding
// was successful - instead use one of the operands, e.g. op1.
// Don't fold overflow operations if not global morph phase.
// The reason for this is that this optimization is replacing a gentree node
// with another new gentree node. Say a GT_CALL(arglist) has one 'arg'
// involving overflow arithmetic. During assertion prop, it is possible
// that the 'arg' could be constant folded and the result could lead to an
// overflow. In such a case 'arg' will get replaced with GT_COMMA node
// but fgMorphArgs() - see the logic around "if(lateArgsComputed)" - doesn't
// update args table. For this reason this optimization is enabled only
// for global morphing phase.
//
// TODO-CQ: Once fgMorphArgs() is fixed this restriction could be removed.
if (!fgGlobalMorph)
{
assert(tree->gtOverflow());
return tree;
}
var_types type = genActualType(tree->TypeGet());
op1 = type == TYP_LONG ? gtNewLconNode(0) : gtNewIconNode(0);
if (vnStore != nullptr)
{
op1->gtVNPair.SetBoth(vnStore->VNZeroForType(type));
}
JITDUMP("\nFolding binary operator with constant nodes into a comma throw:\n");
DISPTREE(tree);
// We will change the cast to a GT_COMMA and attach the exception helper as AsOp()->gtOp1.
// The constant expression zero becomes op2.
assert(tree->gtOverflow());
assert(tree->OperIs(GT_ADD, GT_SUB, GT_CAST, GT_MUL));
assert(op1 != nullptr);
op2 = op1;
op1 = gtNewHelperCallNode(CORINFO_HELP_OVERFLOW, TYP_VOID, gtNewCallArgs(gtNewIconNode(compCurBB->bbTryIndex)));
// op1 is a call to the JIT helper that throws an Overflow exception.
// Attach the ExcSet for VNF_OverflowExc(Void) to this call.
if (vnStore != nullptr)
{
op1->gtVNPair = vnStore->VNPWithExc(ValueNumPair(ValueNumStore::VNForVoid(), ValueNumStore::VNForVoid()),
vnStore->VNPExcSetSingleton(vnStore->VNPairForFunc(TYP_REF, VNF_OverflowExc,
vnStore->VNPForVoid())));
}
tree = gtNewOperNode(GT_COMMA, tree->TypeGet(), op1, op2);
return tree;
}
#ifdef _PREFAST_
#pragma warning(pop)
#endif
//------------------------------------------------------------------------
// gtNewTempAssign: Create an assignment of the given value to a temp.
//
// Arguments:
// tmp - local number for a compiler temp
// val - value to assign to the temp
// pAfterStmt - statement to insert any additional statements after
// ilOffset - il offset for new statements
// block - block to insert any additional statements in
//
// Return Value:
// Normally a new assignment node.
// However may return a nop node if val is simply a reference to the temp.
//
// Notes:
// Self-assignments may be represented via NOPs.
//
// May update the type of the temp, if it was previously unknown.
//
// May set compFloatingPointUsed.
GenTree* Compiler::gtNewTempAssign(
unsigned tmp, GenTree* val, Statement** pAfterStmt, const DebugInfo& di, BasicBlock* block)
{
// Self-assignment is a nop.
if (val->OperGet() == GT_LCL_VAR && val->AsLclVarCommon()->GetLclNum() == tmp)
{
return gtNewNothingNode();
}
LclVarDsc* varDsc = lvaGetDesc(tmp);
if (varDsc->TypeGet() == TYP_I_IMPL && val->TypeGet() == TYP_BYREF)
{
impBashVarAddrsToI(val);
}
var_types valTyp = val->TypeGet();
if (val->OperGet() == GT_LCL_VAR && lvaTable[val->AsLclVar()->GetLclNum()].lvNormalizeOnLoad())
{
valTyp = lvaGetRealType(val->AsLclVar()->GetLclNum());
val->gtType = valTyp;
}
var_types dstTyp = varDsc->TypeGet();
/* If the variable's lvType is not yet set then set it here */
if (dstTyp == TYP_UNDEF)
{
varDsc->lvType = dstTyp = genActualType(valTyp);
#if FEATURE_SIMD
if (varTypeIsSIMD(dstTyp))
{
varDsc->lvSIMDType = 1;
}
#endif
}
#ifdef DEBUG
// Make sure the actual types match.
if (genActualType(valTyp) != genActualType(dstTyp))
{
// Plus some other exceptions that are apparently legal:
// 1) TYP_REF or BYREF = TYP_I_IMPL
bool ok = false;
if (varTypeIsGC(dstTyp) && (valTyp == TYP_I_IMPL))
{
ok = true;
}
// 2) TYP_DOUBLE = TYP_FLOAT or TYP_FLOAT = TYP_DOUBLE
else if (varTypeIsFloating(dstTyp) && varTypeIsFloating(valTyp))
{
ok = true;
}
// 3) TYP_BYREF = TYP_REF when object stack allocation is enabled
else if (JitConfig.JitObjectStackAllocation() && (dstTyp == TYP_BYREF) && (valTyp == TYP_REF))
{
ok = true;
}
else if (!varTypeIsGC(dstTyp) && (genTypeSize(valTyp) == genTypeSize(dstTyp)))
{
// We can have assignments that require a change of register file, e.g. for arguments
// and call returns. Lowering and Codegen will handle these.
ok = true;
}
else if ((dstTyp == TYP_STRUCT) && (valTyp == TYP_INT))
{
// It could come from `ASG(struct, 0)` that was propagated to `RETURN struct(0)`,
// and now it is merging to a struct again.
assert(tmp == genReturnLocal);
ok = true;
}
else if (varTypeIsSIMD(dstTyp) && (valTyp == TYP_STRUCT))
{
assert(val->IsCall());
ok = true;
}
if (!ok)
{
gtDispTree(val);
assert(!"Incompatible types for gtNewTempAssign");
}
}
#endif
// Added this noway_assert for runtime\issue 44895, to protect against silent bad codegen
//
if ((dstTyp == TYP_STRUCT) && (valTyp == TYP_REF))
{
noway_assert(!"Incompatible types for gtNewTempAssign");
}
// Floating Point assignments can be created during inlining
// see "Zero init inlinee locals:" in fgInlinePrependStatements
// thus we may need to set compFloatingPointUsed to true here.
//
if (varTypeUsesFloatReg(dstTyp) && (compFloatingPointUsed == false))
{
compFloatingPointUsed = true;
}
/* Create the assignment node */
GenTree* asg;
GenTree* dest = gtNewLclvNode(tmp, dstTyp);
dest->gtFlags |= GTF_VAR_DEF;
// With first-class structs, we should be propagating the class handle on all non-primitive
// struct types. We don't have a convenient way to do that for all SIMD temps, since some
// internal trees use SIMD types that are not used by the input IL. In this case, we allow
// a null type handle and derive the necessary information about the type from its varType.
CORINFO_CLASS_HANDLE valStructHnd = gtGetStructHandleIfPresent(val);
if (varTypeIsStruct(varDsc) && (valStructHnd == NO_CLASS_HANDLE) && !varTypeIsSIMD(valTyp))
{
// There are 2 special cases:
// 1. we have lost classHandle from a FIELD node because the parent struct has overlapping fields,
// the field was transformed as IND opr GT_LCL_FLD;
// 2. we are propagation `ASG(struct V01, 0)` to `RETURN(struct V01)`, `CNT_INT` doesn't `structHnd`;
// in these cases, we can use the type of the merge return for the assignment.
assert(val->gtEffectiveVal(true)->OperIs(GT_IND, GT_LCL_FLD, GT_CNS_INT));
assert(tmp == genReturnLocal);
valStructHnd = lvaGetStruct(genReturnLocal);
assert(valStructHnd != NO_CLASS_HANDLE);
}
if ((valStructHnd != NO_CLASS_HANDLE) && val->IsConstInitVal())
{
asg = gtNewAssignNode(dest, val);
}
else if (varTypeIsStruct(varDsc) && ((valStructHnd != NO_CLASS_HANDLE) || varTypeIsSIMD(valTyp)))
{
// The struct value may be be a child of a GT_COMMA due to explicit null checks of indirs/fields.
GenTree* valx = val->gtEffectiveVal(/*commaOnly*/ true);
if (valStructHnd != NO_CLASS_HANDLE)
{
lvaSetStruct(tmp, valStructHnd, false);
}
else
{
assert(valx->gtOper != GT_OBJ);
}
dest->gtFlags |= GTF_DONT_CSE;
valx->gtFlags |= GTF_DONT_CSE;
asg = impAssignStruct(dest, val, valStructHnd, (unsigned)CHECK_SPILL_NONE, pAfterStmt, di, block);
}
else
{
// We may have a scalar type variable assigned a struct value, e.g. a 'genReturnLocal'
// when the ABI calls for returning a struct as a primitive type.
// TODO-1stClassStructs: When we stop "lying" about the types for ABI purposes, the
// 'genReturnLocal' should be the original struct type.
assert(!varTypeIsStruct(valTyp) || ((valStructHnd != NO_CLASS_HANDLE) &&
(typGetObjLayout(valStructHnd)->GetSize() == genTypeSize(varDsc))));
asg = gtNewAssignNode(dest, val);
}
if (compRationalIRForm)
{
Rationalizer::RewriteAssignmentIntoStoreLcl(asg->AsOp());
}
return asg;
}
/*****************************************************************************
*
* Create a helper call to access a COM field (iff 'assg' is non-zero this is
* an assignment and 'assg' is the new value).
*/
GenTree* Compiler::gtNewRefCOMfield(GenTree* objPtr,
CORINFO_RESOLVED_TOKEN* pResolvedToken,
CORINFO_ACCESS_FLAGS access,
CORINFO_FIELD_INFO* pFieldInfo,
var_types lclTyp,
CORINFO_CLASS_HANDLE structType,
GenTree* assg)
{
assert(pFieldInfo->fieldAccessor == CORINFO_FIELD_INSTANCE_HELPER ||
pFieldInfo->fieldAccessor == CORINFO_FIELD_INSTANCE_ADDR_HELPER ||
pFieldInfo->fieldAccessor == CORINFO_FIELD_STATIC_ADDR_HELPER);
/* If we can't access it directly, we need to call a helper function */
GenTreeCall::Use* args = nullptr;
var_types helperType = TYP_BYREF;
if (pFieldInfo->fieldAccessor == CORINFO_FIELD_INSTANCE_HELPER)
{
if (access & CORINFO_ACCESS_SET)
{
assert(assg != nullptr);
// helper needs pointer to struct, not struct itself
if (pFieldInfo->helper == CORINFO_HELP_SETFIELDSTRUCT)
{
assert(structType != nullptr);
assg = impGetStructAddr(assg, structType, (unsigned)CHECK_SPILL_ALL, true);
}
else if (lclTyp == TYP_DOUBLE && assg->TypeGet() == TYP_FLOAT)
{
assg = gtNewCastNode(TYP_DOUBLE, assg, false, TYP_DOUBLE);
}
else if (lclTyp == TYP_FLOAT && assg->TypeGet() == TYP_DOUBLE)
{
assg = gtNewCastNode(TYP_FLOAT, assg, false, TYP_FLOAT);
}
args = gtNewCallArgs(assg);
helperType = TYP_VOID;
}
else if (access & CORINFO_ACCESS_GET)
{
helperType = lclTyp;
// The calling convention for the helper does not take into
// account optimization of primitive structs.
if ((pFieldInfo->helper == CORINFO_HELP_GETFIELDSTRUCT) && !varTypeIsStruct(lclTyp))
{
helperType = TYP_STRUCT;
}
}
}
if (pFieldInfo->helper == CORINFO_HELP_GETFIELDSTRUCT || pFieldInfo->helper == CORINFO_HELP_SETFIELDSTRUCT)
{
assert(pFieldInfo->structType != nullptr);
args = gtPrependNewCallArg(gtNewIconEmbClsHndNode(pFieldInfo->structType), args);
}
GenTree* fieldHnd = impTokenToHandle(pResolvedToken);
if (fieldHnd == nullptr)
{ // compDonotInline()
return nullptr;
}
args = gtPrependNewCallArg(fieldHnd, args);
// If it's a static field, we shouldn't have an object node
// If it's an instance field, we have an object node
assert((pFieldInfo->fieldAccessor != CORINFO_FIELD_STATIC_ADDR_HELPER) ^ (objPtr == nullptr));
if (objPtr != nullptr)
{
args = gtPrependNewCallArg(objPtr, args);
}
GenTreeCall* call = gtNewHelperCallNode(pFieldInfo->helper, genActualType(helperType), args);
#if FEATURE_MULTIREG_RET
if (varTypeIsStruct(call))
{
call->InitializeStructReturnType(this, structType, call->GetUnmanagedCallConv());
}
#endif // FEATURE_MULTIREG_RET
GenTree* result = call;
if (pFieldInfo->fieldAccessor == CORINFO_FIELD_INSTANCE_HELPER)
{
if (access & CORINFO_ACCESS_GET)
{
if (pFieldInfo->helper == CORINFO_HELP_GETFIELDSTRUCT)
{
if (!varTypeIsStruct(lclTyp))
{
// get the result as primitive type
result = impGetStructAddr(result, structType, (unsigned)CHECK_SPILL_ALL, true);
result = gtNewOperNode(GT_IND, lclTyp, result);
}
}
else if (varTypeIsIntegral(lclTyp) && genTypeSize(lclTyp) < genTypeSize(TYP_INT))
{
// The helper does not extend the small return types.
result = gtNewCastNode(genActualType(lclTyp), result, false, lclTyp);
}
}
}
else
{
// OK, now do the indirection
if (access & CORINFO_ACCESS_GET)
{
if (varTypeIsStruct(lclTyp))
{
result = gtNewObjNode(structType, result);
}
else
{
result = gtNewOperNode(GT_IND, lclTyp, result);
}
result->gtFlags |= (GTF_EXCEPT | GTF_GLOB_REF);
}
else if (access & CORINFO_ACCESS_SET)
{
if (varTypeIsStruct(lclTyp))
{
result = impAssignStructPtr(result, assg, structType, (unsigned)CHECK_SPILL_ALL);
}
else
{
result = gtNewOperNode(GT_IND, lclTyp, result);
result->gtFlags |= (GTF_EXCEPT | GTF_GLOB_REF | GTF_IND_TGTANYWHERE);
result = gtNewAssignNode(result, assg);
}
}
}
return result;
}
/*****************************************************************************
*
* Return true if the given node (excluding children trees) contains side effects.
* Note that it does not recurse, and children need to be handled separately.
* It may return false even if the node has GTF_SIDE_EFFECT (because of its children).
*
* Similar to OperMayThrow() (but handles GT_CALLs specially), but considers
* assignments too.
*/
bool Compiler::gtNodeHasSideEffects(GenTree* tree, GenTreeFlags flags)
{
if (flags & GTF_ASG)
{
// TODO-Bug: This only checks for GT_ASG/GT_STORE_DYN_BLK but according to OperRequiresAsgFlag
// there are many more opers that are considered to have an assignment side effect: atomic ops
// (GT_CMPXCHG & co.), GT_MEMORYBARRIER (not classified as an atomic op) and HW intrinsic
// memory stores. Atomic ops have special handling in gtExtractSideEffList but the others
// will simply be dropped is they are ever subject to an "extract side effects" operation.
// It is possible that the reason no bugs have yet been observed in this area is that the
// other nodes are likely to always be tree roots.
if (tree->OperIs(GT_ASG, GT_STORE_DYN_BLK))
{
return true;
}
}
// Are there only GTF_CALL side effects remaining? (and no other side effect kinds)
if (flags & GTF_CALL)
{
if (tree->OperGet() == GT_CALL)
{
GenTreeCall* const call = tree->AsCall();
const bool ignoreExceptions = (flags & GTF_EXCEPT) == 0;
const bool ignoreCctors = (flags & GTF_IS_IN_CSE) != 0; // We can CSE helpers that run cctors.
if (!call->HasSideEffects(this, ignoreExceptions, ignoreCctors))
{
// If this call is otherwise side effect free, check its arguments.
for (GenTreeCall::Use& use : call->Args())
{
if (gtTreeHasSideEffects(use.GetNode(), flags))
{
return true;
}
}
// I'm a little worried that args that assign to temps that are late args will look like
// side effects...but better to be conservative for now.
for (GenTreeCall::Use& use : call->LateArgs())
{
if (gtTreeHasSideEffects(use.GetNode(), flags))
{
return true;
}
}
// Otherwise:
return false;
}
// Otherwise the GT_CALL is considered to have side-effects.
return true;
}
}
if (flags & GTF_EXCEPT)
{
if (tree->OperMayThrow(this))
{
return true;
}
}
// Expressions declared as CSE by (e.g.) hoisting code are considered to have relevant side
// effects (if we care about GTF_MAKE_CSE).
if ((flags & GTF_MAKE_CSE) && (tree->gtFlags & GTF_MAKE_CSE))
{
return true;
}
return false;
}
/*****************************************************************************
* Returns true if the expr tree has any side effects.
*/
bool Compiler::gtTreeHasSideEffects(GenTree* tree, GenTreeFlags flags /* = GTF_SIDE_EFFECT*/)
{
// These are the side effect flags that we care about for this tree
GenTreeFlags sideEffectFlags = tree->gtFlags & flags;
// Does this tree have any Side-effect flags set that we care about?
if (sideEffectFlags == 0)
{
// no it doesn't..
return false;
}
if (sideEffectFlags == GTF_CALL)
{
if (tree->OperGet() == GT_CALL)
{
// Generally all trees that contain GT_CALL nodes are considered to have side-effects.
//
if (tree->AsCall()->gtCallType == CT_HELPER)
{
// If this node is a helper call we may not care about the side-effects.
// Note that gtNodeHasSideEffects checks the side effects of the helper itself
// as well as the side effects of its arguments.
return gtNodeHasSideEffects(tree, flags);
}
}
else if (tree->OperGet() == GT_INTRINSIC)
{
if (gtNodeHasSideEffects(tree, flags))
{
return true;
}
if (gtNodeHasSideEffects(tree->AsOp()->gtOp1, flags))
{
return true;
}
if ((tree->AsOp()->gtOp2 != nullptr) && gtNodeHasSideEffects(tree->AsOp()->gtOp2, flags))
{
return true;
}
return false;
}
}
return true;
}
GenTree* Compiler::gtBuildCommaList(GenTree* list, GenTree* expr)
{
// 'list' starts off as null,
// and when it is null we haven't started the list yet.
//
if (list != nullptr)
{
// Create a GT_COMMA that appends 'expr' in front of the remaining set of expressions in (*list)
GenTree* result = gtNewOperNode(GT_COMMA, TYP_VOID, expr, list);
// Set the flags in the comma node
result->gtFlags |= (list->gtFlags & GTF_ALL_EFFECT);
result->gtFlags |= (expr->gtFlags & GTF_ALL_EFFECT);
DBEXEC(fgGlobalMorph, result->gtDebugFlags |= GTF_DEBUG_NODE_MORPHED);
// 'list' and 'expr' should have valuenumbers defined for both or for neither one (unless we are remorphing,
// in which case a prior transform involving either node may have discarded or otherwise invalidated the value
// numbers).
assert((list->gtVNPair.BothDefined() == expr->gtVNPair.BothDefined()) || !fgGlobalMorph);
// Set the ValueNumber 'gtVNPair' for the new GT_COMMA node
//
if (list->gtVNPair.BothDefined() && expr->gtVNPair.BothDefined())
{
// The result of a GT_COMMA node is op2, the normal value number is op2vnp
// But we also need to include the union of side effects from op1 and op2.
// we compute this value into exceptions_vnp.
ValueNumPair op1vnp;
ValueNumPair op1Xvnp = ValueNumStore::VNPForEmptyExcSet();
ValueNumPair op2vnp;
ValueNumPair op2Xvnp = ValueNumStore::VNPForEmptyExcSet();
vnStore->VNPUnpackExc(expr->gtVNPair, &op1vnp, &op1Xvnp);
vnStore->VNPUnpackExc(list->gtVNPair, &op2vnp, &op2Xvnp);
ValueNumPair exceptions_vnp = ValueNumStore::VNPForEmptyExcSet();
exceptions_vnp = vnStore->VNPExcSetUnion(exceptions_vnp, op1Xvnp);
exceptions_vnp = vnStore->VNPExcSetUnion(exceptions_vnp, op2Xvnp);
result->gtVNPair = vnStore->VNPWithExc(op2vnp, exceptions_vnp);
}
return result;
}
else
{
// The 'expr' will start the list of expressions
return expr;
}
}
//------------------------------------------------------------------------
// gtExtractSideEffList: Extracts side effects from the given expression.
//
// Arguments:
// expr - the expression tree to extract side effects from
// pList - pointer to a (possibly null) GT_COMMA list that
// will contain the extracted side effects
// flags - side effect flags to be considered
// ignoreRoot - ignore side effects on the expression root node
//
// Notes:
// Side effects are prepended to the GT_COMMA list such that op1 of
// each comma node holds the side effect tree and op2 points to the
// next comma node. The original side effect execution order is preserved.
//
void Compiler::gtExtractSideEffList(GenTree* expr,
GenTree** pList,
GenTreeFlags flags /* = GTF_SIDE_EFFECT*/,
bool ignoreRoot /* = false */)
{
class SideEffectExtractor final : public GenTreeVisitor<SideEffectExtractor>
{
public:
const GenTreeFlags m_flags;
ArrayStack<GenTree*> m_sideEffects;
enum
{
DoPreOrder = true,
UseExecutionOrder = true
};
SideEffectExtractor(Compiler* compiler, GenTreeFlags flags)
: GenTreeVisitor(compiler), m_flags(flags), m_sideEffects(compiler->getAllocator(CMK_SideEffects))
{
}
fgWalkResult PreOrderVisit(GenTree** use, GenTree* user)
{
GenTree* node = *use;
bool treeHasSideEffects = m_compiler->gtTreeHasSideEffects(node, m_flags);
if (treeHasSideEffects)
{
if (m_compiler->gtNodeHasSideEffects(node, m_flags))
{
PushSideEffects(node);
if (node->OperIsBlk() && !node->OperIsStoreBlk())
{
JITDUMP("Replace an unused OBJ/BLK node [%06d] with a NULLCHECK\n", dspTreeID(node));
m_compiler->gtChangeOperToNullCheck(node, m_compiler->compCurBB);
}
return Compiler::WALK_SKIP_SUBTREES;
}
// TODO-Cleanup: These have GTF_ASG set but for some reason gtNodeHasSideEffects ignores
// them. See the related gtNodeHasSideEffects comment as well.
// Also, these nodes must always be preserved, no matter what side effect flags are passed
// in. But then it should never be the case that gtExtractSideEffList gets called without
// specifying GTF_ASG so there doesn't seem to be any reason to be inconsistent with
// gtNodeHasSideEffects and make this check unconditionally.
if (node->OperIsAtomicOp())
{
PushSideEffects(node);
return Compiler::WALK_SKIP_SUBTREES;
}
if ((m_flags & GTF_EXCEPT) != 0)
{
// Special case - GT_ADDR of GT_IND nodes of TYP_STRUCT have to be kept together.
if (node->OperIs(GT_ADDR) && node->gtGetOp1()->OperIsIndir() &&
(node->gtGetOp1()->TypeGet() == TYP_STRUCT))
{
JITDUMP("Keep the GT_ADDR and GT_IND together:\n");
PushSideEffects(node);
return Compiler::WALK_SKIP_SUBTREES;
}
}
// Generally all GT_CALL nodes are considered to have side-effects.
// So if we get here it must be a helper call that we decided it does
// not have side effects that we needed to keep.
assert(!node->OperIs(GT_CALL) || (node->AsCall()->gtCallType == CT_HELPER));
}
if ((m_flags & GTF_IS_IN_CSE) != 0)
{
// If we're doing CSE then we also need to unmark CSE nodes. This will fail for CSE defs,
// those need to be extracted as if they're side effects.
if (!UnmarkCSE(node))
{
PushSideEffects(node);
return Compiler::WALK_SKIP_SUBTREES;
}
// The existence of CSE defs and uses is not propagated up the tree like side
// effects are. We need to continue visiting the tree as if it has side effects.
treeHasSideEffects = true;
}
return treeHasSideEffects ? Compiler::WALK_CONTINUE : Compiler::WALK_SKIP_SUBTREES;
}
private:
bool UnmarkCSE(GenTree* node)
{
assert(m_compiler->optValnumCSE_phase);
if (m_compiler->optUnmarkCSE(node))
{
// The call to optUnmarkCSE(node) should have cleared any CSE info.
assert(!IS_CSE_INDEX(node->gtCSEnum));
return true;
}
else
{
assert(IS_CSE_DEF(node->gtCSEnum));
#ifdef DEBUG
if (m_compiler->verbose)
{
printf("Preserving the CSE def #%02d at ", GET_CSE_INDEX(node->gtCSEnum));
m_compiler->printTreeID(node);
}
#endif
return false;
}
}
void PushSideEffects(GenTree* node)
{
// The extracted side effect will no longer be an argument, so unmark it.
// This is safe to do because the side effects will be visited in pre-order,
// aborting as soon as any tree is extracted. Thus if an argument for a call
// is being extracted, it is guaranteed that the call itself will not be.
node->gtFlags &= ~GTF_LATE_ARG;
m_sideEffects.Push(node);
}
};
SideEffectExtractor extractor(this, flags);
if (ignoreRoot)
{
for (GenTree* op : expr->Operands())
{
extractor.WalkTree(&op, nullptr);
}
}
else
{
extractor.WalkTree(&expr, nullptr);
}
GenTree* list = *pList;
// The extractor returns side effects in execution order but gtBuildCommaList prepends
// to the comma-based side effect list so we have to build the list in reverse order.
// This is also why the list cannot be built while traversing the tree.
// The number of side effects is usually small (<= 4), less than the ArrayStack's
// built-in size, so memory allocation is avoided.
while (!extractor.m_sideEffects.Empty())
{
list = gtBuildCommaList(list, extractor.m_sideEffects.Pop());
}
*pList = list;
}
/*****************************************************************************
*
* For debugging only - displays a tree node list and makes sure all the
* links are correctly set.
*/
#ifdef DEBUG
void dispNodeList(GenTree* list, bool verbose)
{
GenTree* last = nullptr;
GenTree* next;
if (!list)
{
return;
}
for (;;)
{
next = list->gtNext;
if (verbose)
{
printf("%08X -> %08X -> %08X\n", last, list, next);
}
assert(!last || last->gtNext == list);
assert(next == nullptr || next->gtPrev == list);
if (!next)
{
break;
}
last = list;
list = next;
}
printf(""); // null string means flush
}
#endif
/*****************************************************************************
* Callback to mark the nodes of a qmark-colon subtree that are conditionally
* executed.
*/
/* static */
Compiler::fgWalkResult Compiler::gtMarkColonCond(GenTree** pTree, fgWalkData* data)
{
assert(data->pCallbackData == nullptr);
(*pTree)->gtFlags |= GTF_COLON_COND;
return WALK_CONTINUE;
}
/*****************************************************************************
* Callback to clear the conditionally executed flags of nodes that no longer
will be conditionally executed. Note that when we find another colon we must
stop, as the nodes below this one WILL be conditionally executed. This callback
is called when folding a qmark condition (ie the condition is constant).
*/
/* static */
Compiler::fgWalkResult Compiler::gtClearColonCond(GenTree** pTree, fgWalkData* data)
{
GenTree* tree = *pTree;
assert(data->pCallbackData == nullptr);
if (tree->OperGet() == GT_COLON)
{
// Nodes below this will be conditionally executed.
return WALK_SKIP_SUBTREES;
}
tree->gtFlags &= ~GTF_COLON_COND;
return WALK_CONTINUE;
}
/*****************************************************************************
*
* Callback used by the tree walker to implement fgFindLink()
*/
static Compiler::fgWalkResult gtFindLinkCB(GenTree** pTree, Compiler::fgWalkData* cbData)
{
Compiler::FindLinkData* data = (Compiler::FindLinkData*)cbData->pCallbackData;
if (*pTree == data->nodeToFind)
{
data->result = pTree;
data->parent = cbData->parent;
return Compiler::WALK_ABORT;
}
return Compiler::WALK_CONTINUE;
}
Compiler::FindLinkData Compiler::gtFindLink(Statement* stmt, GenTree* node)
{
FindLinkData data = {node, nullptr, nullptr};
fgWalkResult result = fgWalkTreePre(stmt->GetRootNodePointer(), gtFindLinkCB, &data);
if (result == WALK_ABORT)
{
assert(data.nodeToFind == *data.result);
return data;
}
else
{
return {node, nullptr, nullptr};
}
}
/*****************************************************************************
*
* Callback that checks if a tree node has oper type GT_CATCH_ARG
*/
static Compiler::fgWalkResult gtFindCatchArg(GenTree** pTree, Compiler::fgWalkData* /* data */)
{
return ((*pTree)->OperGet() == GT_CATCH_ARG) ? Compiler::WALK_ABORT : Compiler::WALK_CONTINUE;
}
/*****************************************************************************/
bool Compiler::gtHasCatchArg(GenTree* tree)
{
if (((tree->gtFlags & GTF_ORDER_SIDEEFF) != 0) && (fgWalkTreePre(&tree, gtFindCatchArg) == WALK_ABORT))
{
return true;
}
return false;
}
//------------------------------------------------------------------------
// gtHasCallOnStack:
//
// Arguments:
// parentStack: a context (stack of parent nodes)
//
// Return Value:
// returns true if any of the parent nodes are a GT_CALL
//
// Assumptions:
// We have a stack of parent nodes. This generally requires that
// we are performing a recursive tree walk using struct fgWalkData
//
//------------------------------------------------------------------------
/* static */ bool Compiler::gtHasCallOnStack(GenTreeStack* parentStack)
{
for (int i = 0; i < parentStack->Height(); i++)
{
GenTree* node = parentStack->Top(i);
if (node->OperGet() == GT_CALL)
{
return true;
}
}
return false;
}
//------------------------------------------------------------------------
// gtGetTypeProducerKind: determine if a tree produces a runtime type, and
// if so, how.
//
// Arguments:
// tree - tree to examine
//
// Return Value:
// TypeProducerKind for the tree.
//
// Notes:
// Checks to see if this tree returns a RuntimeType value, and if so,
// how that value is determined.
//
// Currently handles these cases
// 1) The result of Object::GetType
// 2) The result of typeof(...)
// 3) A null reference
// 4) Tree is otherwise known to have type RuntimeType
//
// The null reference case is surprisingly common because operator
// overloading turns the otherwise innocuous
//
// Type t = ....;
// if (t == null)
//
// into a method call.
Compiler::TypeProducerKind Compiler::gtGetTypeProducerKind(GenTree* tree)
{
if (tree->gtOper == GT_CALL)
{
if (tree->AsCall()->gtCallType == CT_HELPER)
{
if (gtIsTypeHandleToRuntimeTypeHelper(tree->AsCall()))
{
return TPK_Handle;
}
}
else if (tree->AsCall()->gtCallMoreFlags & GTF_CALL_M_SPECIAL_INTRINSIC)
{
if (lookupNamedIntrinsic(tree->AsCall()->gtCallMethHnd) == NI_System_Object_GetType)
{
return TPK_GetType;
}
}
}
else if ((tree->gtOper == GT_INTRINSIC) && (tree->AsIntrinsic()->gtIntrinsicName == NI_System_Object_GetType))
{
return TPK_GetType;
}
else if ((tree->gtOper == GT_CNS_INT) && (tree->AsIntCon()->gtIconVal == 0))
{
return TPK_Null;
}
else
{
bool isExact = false;
bool isNonNull = false;
CORINFO_CLASS_HANDLE clsHnd = gtGetClassHandle(tree, &isExact, &isNonNull);
if (clsHnd != NO_CLASS_HANDLE && clsHnd == info.compCompHnd->getBuiltinClass(CLASSID_RUNTIME_TYPE))
{
return TPK_Other;
}
}
return TPK_Unknown;
}
//------------------------------------------------------------------------
// gtIsTypeHandleToRuntimeTypeHelperCall -- see if tree is constructing
// a RuntimeType from a handle
//
// Arguments:
// tree - tree to examine
//
// Return Value:
// True if so
bool Compiler::gtIsTypeHandleToRuntimeTypeHelper(GenTreeCall* call)
{
return call->gtCallMethHnd == eeFindHelper(CORINFO_HELP_TYPEHANDLE_TO_RUNTIMETYPE) ||
call->gtCallMethHnd == eeFindHelper(CORINFO_HELP_TYPEHANDLE_TO_RUNTIMETYPE_MAYBENULL);
}
//------------------------------------------------------------------------
// gtIsTypeHandleToRuntimeTypeHandleHelperCall -- see if tree is constructing
// a RuntimeTypeHandle from a handle
//
// Arguments:
// tree - tree to examine
// pHelper - optional pointer to a variable that receives the type of the helper
//
// Return Value:
// True if so
bool Compiler::gtIsTypeHandleToRuntimeTypeHandleHelper(GenTreeCall* call, CorInfoHelpFunc* pHelper)
{
CorInfoHelpFunc helper = CORINFO_HELP_UNDEF;
if (call->gtCallMethHnd == eeFindHelper(CORINFO_HELP_TYPEHANDLE_TO_RUNTIMETYPEHANDLE))
{
helper = CORINFO_HELP_TYPEHANDLE_TO_RUNTIMETYPEHANDLE;
}
else if (call->gtCallMethHnd == eeFindHelper(CORINFO_HELP_TYPEHANDLE_TO_RUNTIMETYPEHANDLE_MAYBENULL))
{
helper = CORINFO_HELP_TYPEHANDLE_TO_RUNTIMETYPEHANDLE_MAYBENULL;
}
if (pHelper != nullptr)
{
*pHelper = helper;
}
return helper != CORINFO_HELP_UNDEF;
}
bool Compiler::gtIsActiveCSE_Candidate(GenTree* tree)
{
return (optValnumCSE_phase && IS_CSE_INDEX(tree->gtCSEnum));
}
/*****************************************************************************/
struct ComplexityStruct
{
unsigned m_numNodes;
unsigned m_nodeLimit;
ComplexityStruct(unsigned nodeLimit) : m_numNodes(0), m_nodeLimit(nodeLimit)
{
}
};
static Compiler::fgWalkResult ComplexityExceedsWalker(GenTree** pTree, Compiler::fgWalkData* data)
{
ComplexityStruct* pComplexity = (ComplexityStruct*)data->pCallbackData;
if (++pComplexity->m_numNodes > pComplexity->m_nodeLimit)
{
return Compiler::WALK_ABORT;
}
else
{
return Compiler::WALK_CONTINUE;
}
}
bool Compiler::gtComplexityExceeds(GenTree** tree, unsigned limit)
{
ComplexityStruct complexity(limit);
if (fgWalkTreePre(tree, &ComplexityExceedsWalker, &complexity) == WALK_ABORT)
{
return true;
}
else
{
return false;
}
}
bool GenTree::IsPhiNode()
{
return (OperGet() == GT_PHI_ARG) || (OperGet() == GT_PHI) || IsPhiDefn();
}
bool GenTree::IsPhiDefn()
{
bool res = ((OperGet() == GT_ASG) && (AsOp()->gtOp2 != nullptr) && (AsOp()->gtOp2->OperGet() == GT_PHI)) ||
((OperGet() == GT_STORE_LCL_VAR) && (AsOp()->gtOp1 != nullptr) && (AsOp()->gtOp1->OperGet() == GT_PHI));
assert(!res || OperGet() == GT_STORE_LCL_VAR || AsOp()->gtOp1->OperGet() == GT_LCL_VAR);
return res;
}
// IsPartialLclFld: Check for a GT_LCL_FLD whose type is a different size than the lclVar.
//
// Arguments:
// comp - the Compiler object.
//
// Return Value:
// Returns "true" iff 'this' is a GT_LCL_FLD or GT_STORE_LCL_FLD on which the type
// is not the same size as the type of the GT_LCL_VAR
bool GenTree::IsPartialLclFld(Compiler* comp)
{
return ((gtOper == GT_LCL_FLD) &&
(comp->lvaTable[this->AsLclVarCommon()->GetLclNum()].lvExactSize != genTypeSize(gtType)));
}
bool GenTree::DefinesLocal(Compiler* comp, GenTreeLclVarCommon** pLclVarTree, bool* pIsEntire)
{
GenTreeBlk* blkNode = nullptr;
if (OperIs(GT_ASG))
{
if (AsOp()->gtOp1->IsLocal())
{
GenTreeLclVarCommon* lclVarTree = AsOp()->gtOp1->AsLclVarCommon();
*pLclVarTree = lclVarTree;
if (pIsEntire != nullptr)
{
if (lclVarTree->IsPartialLclFld(comp))
{
*pIsEntire = false;
}
else
{
*pIsEntire = true;
}
}
return true;
}
else if (AsOp()->gtOp1->OperGet() == GT_IND)
{
GenTree* indArg = AsOp()->gtOp1->AsOp()->gtOp1;
return indArg->DefinesLocalAddr(comp, genTypeSize(AsOp()->gtOp1->TypeGet()), pLclVarTree, pIsEntire);
}
else if (AsOp()->gtOp1->OperIsBlk())
{
blkNode = AsOp()->gtOp1->AsBlk();
}
}
else if (OperIs(GT_CALL))
{
GenTree* retBufArg = AsCall()->GetLclRetBufArgNode();
if (retBufArg == nullptr)
{
return false;
}
unsigned size = comp->typGetObjLayout(AsCall()->gtRetClsHnd)->GetSize();
return retBufArg->DefinesLocalAddr(comp, size, pLclVarTree, pIsEntire);
}
else if (OperIsBlk())
{
blkNode = this->AsBlk();
}
if (blkNode != nullptr)
{
GenTree* destAddr = blkNode->Addr();
unsigned width = blkNode->Size();
// Do we care about whether this assigns the entire variable?
if (pIsEntire != nullptr && blkNode->OperIs(GT_STORE_DYN_BLK))
{
GenTree* blockWidth = blkNode->AsStoreDynBlk()->gtDynamicSize;
if (blockWidth->IsCnsIntOrI())
{
assert(blockWidth->AsIntConCommon()->FitsInI32());
width = static_cast<unsigned>(blockWidth->AsIntConCommon()->IconValue());
if (width == 0)
{
return false;
}
}
}
return destAddr->DefinesLocalAddr(comp, width, pLclVarTree, pIsEntire);
}
// Otherwise...
return false;
}
// Returns true if this GenTree defines a result which is based on the address of a local.
bool GenTree::DefinesLocalAddr(Compiler* comp, unsigned width, GenTreeLclVarCommon** pLclVarTree, bool* pIsEntire)
{
if (OperGet() == GT_ADDR || OperGet() == GT_LCL_VAR_ADDR)
{
GenTree* addrArg = this;
if (OperGet() == GT_ADDR)
{
addrArg = AsOp()->gtOp1;
}
if (addrArg->IsLocal() || addrArg->OperIsLocalAddr())
{
GenTreeLclVarCommon* addrArgLcl = addrArg->AsLclVarCommon();
*pLclVarTree = addrArgLcl;
if (pIsEntire != nullptr)
{
unsigned lclOffset = addrArgLcl->GetLclOffs();
if (lclOffset != 0)
{
// We aren't updating the bytes at [0..lclOffset-1] so *pIsEntire should be set to false
*pIsEntire = false;
}
else
{
unsigned lclNum = addrArgLcl->GetLclNum();
unsigned varWidth = comp->lvaLclExactSize(lclNum);
if (comp->lvaTable[lclNum].lvNormalizeOnStore())
{
// It's normalize on store, so use the full storage width -- writing to low bytes won't
// necessarily yield a normalized value.
varWidth = genTypeStSz(var_types(comp->lvaTable[lclNum].lvType)) * sizeof(int);
}
*pIsEntire = (varWidth == width);
}
}
return true;
}
else if (addrArg->OperGet() == GT_IND)
{
// A GT_ADDR of a GT_IND can both be optimized away, recurse using the child of the GT_IND
return addrArg->AsOp()->gtOp1->DefinesLocalAddr(comp, width, pLclVarTree, pIsEntire);
}
}
else if (OperGet() == GT_ADD)
{
if (AsOp()->gtOp1->IsCnsIntOrI())
{
// If we just adding a zero then we allow an IsEntire match against width
// otherwise we change width to zero to disallow an IsEntire Match
return AsOp()->gtOp2->DefinesLocalAddr(comp, AsOp()->gtOp1->IsIntegralConst(0) ? width : 0, pLclVarTree,
pIsEntire);
}
else if (AsOp()->gtOp2->IsCnsIntOrI())
{
// If we just adding a zero then we allow an IsEntire match against width
// otherwise we change width to zero to disallow an IsEntire Match
return AsOp()->gtOp1->DefinesLocalAddr(comp, AsOp()->gtOp2->IsIntegralConst(0) ? width : 0, pLclVarTree,
pIsEntire);
}
}
// Post rationalization we could have GT_IND(GT_LEA(..)) trees.
else if (OperGet() == GT_LEA)
{
// This method gets invoked during liveness computation and therefore it is critical
// that we don't miss 'use' of any local. The below logic is making the assumption
// that in case of LEA(base, index, offset) - only base can be a GT_LCL_VAR_ADDR
// and index is not.
CLANG_FORMAT_COMMENT_ANCHOR;
#ifdef DEBUG
GenTree* index = AsOp()->gtOp2;
if (index != nullptr)
{
assert(!index->DefinesLocalAddr(comp, width, pLclVarTree, pIsEntire));
}
#endif // DEBUG
// base
GenTree* base = AsOp()->gtOp1;
if (base != nullptr)
{
// Lea could have an Indir as its base.
if (base->OperGet() == GT_IND)
{
base = base->AsOp()->gtOp1->gtEffectiveVal(/*commas only*/ true);
}
return base->DefinesLocalAddr(comp, width, pLclVarTree, pIsEntire);
}
}
// Otherwise...
return false;
}
//------------------------------------------------------------------------
// IsLocalExpr: Determine if this is a LclVarCommon node and return some
// additional info about it in the two out parameters.
//
// Arguments:
// comp - The Compiler instance
// pLclVarTree - An "out" argument that returns the local tree as a
// LclVarCommon, if it is indeed local.
// pFldSeq - An "out" argument that returns the value numbering field
// sequence for the node, if any.
//
// Return Value:
// Returns true, and sets the out arguments accordingly, if this is
// a LclVarCommon node.
bool GenTree::IsLocalExpr(Compiler* comp, GenTreeLclVarCommon** pLclVarTree, FieldSeqNode** pFldSeq)
{
if (IsLocal()) // Note that this covers "GT_LCL_FLD."
{
*pLclVarTree = AsLclVarCommon();
if (OperGet() == GT_LCL_FLD)
{
// Otherwise, prepend this field to whatever we've already accumulated outside in.
*pFldSeq = comp->GetFieldSeqStore()->Append(AsLclFld()->GetFieldSeq(), *pFldSeq);
}
return true;
}
else
{
return false;
}
}
// If this tree evaluates some sum of a local address and some constants,
// return the node for the local being addressed
GenTreeLclVarCommon* GenTree::IsLocalAddrExpr()
{
if (OperGet() == GT_ADDR)
{
return AsOp()->gtOp1->IsLocal() ? AsOp()->gtOp1->AsLclVarCommon() : nullptr;
}
else if (OperIsLocalAddr())
{
return this->AsLclVarCommon();
}
else if (OperGet() == GT_ADD)
{
if (AsOp()->gtOp1->OperGet() == GT_CNS_INT)
{
return AsOp()->gtOp2->IsLocalAddrExpr();
}
else if (AsOp()->gtOp2->OperGet() == GT_CNS_INT)
{
return AsOp()->gtOp1->IsLocalAddrExpr();
}
}
// Otherwise...
return nullptr;
}
//------------------------------------------------------------------------
// IsLocalAddrExpr: finds if "this" is an address of a local var/fld.
//
// Arguments:
// comp - a compiler instance;
// pLclVarTree - [out] sets to the node indicating the local variable if found;
// pFldSeq - [out] sets to the field sequence representing the field, else null;
// pOffset - [out](optional) sets to the sum offset of the lcl/fld if found,
// note it does not include pLclVarTree->GetLclOffs().
//
// Returns:
// Returns true if "this" represents the address of a local, or a field of a local.
//
// Notes:
// It is mostly used for optimizations but assertion propagation depends on it for correctness.
// So if this function does not recognize a def of a LCL_VAR we can have an incorrect optimization.
//
bool GenTree::IsLocalAddrExpr(Compiler* comp,
GenTreeLclVarCommon** pLclVarTree,
FieldSeqNode** pFldSeq,
ssize_t* pOffset /* = nullptr */)
{
if (OperGet() == GT_ADDR)
{
assert(!comp->compRationalIRForm);
GenTree* addrArg = AsOp()->gtOp1;
if (addrArg->IsLocal()) // Note that this covers "GT_LCL_FLD."
{
*pLclVarTree = addrArg->AsLclVarCommon();
if (addrArg->OperGet() == GT_LCL_FLD)
{
// Otherwise, prepend this field to whatever we've already accumulated outside in.
*pFldSeq = comp->GetFieldSeqStore()->Append(addrArg->AsLclFld()->GetFieldSeq(), *pFldSeq);
}
return true;
}
else
{
return false;
}
}
else if (OperIsLocalAddr())
{
*pLclVarTree = this->AsLclVarCommon();
if (this->OperGet() == GT_LCL_FLD_ADDR)
{
*pFldSeq = comp->GetFieldSeqStore()->Append(this->AsLclFld()->GetFieldSeq(), *pFldSeq);
}
return true;
}
else if (OperGet() == GT_ADD)
{
if (AsOp()->gtOp1->OperGet() == GT_CNS_INT)
{
GenTreeIntCon* cnst = AsOp()->gtOp1->AsIntCon();
if (cnst->gtFieldSeq == nullptr)
{
return false;
}
// Otherwise, prepend this field to whatever we've already accumulated outside in.
*pFldSeq = comp->GetFieldSeqStore()->Append(cnst->gtFieldSeq, *pFldSeq);
if (pOffset != nullptr)
{
*pOffset += cnst->IconValue();
}
return AsOp()->gtOp2->IsLocalAddrExpr(comp, pLclVarTree, pFldSeq, pOffset);
}
else if (AsOp()->gtOp2->OperGet() == GT_CNS_INT)
{
GenTreeIntCon* cnst = AsOp()->gtOp2->AsIntCon();
if (cnst->gtFieldSeq == nullptr)
{
return false;
}
// Otherwise, prepend this field to whatever we've already accumulated outside in.
*pFldSeq = comp->GetFieldSeqStore()->Append(cnst->gtFieldSeq, *pFldSeq);
if (pOffset != nullptr)
{
*pOffset += cnst->IconValue();
}
return AsOp()->gtOp1->IsLocalAddrExpr(comp, pLclVarTree, pFldSeq, pOffset);
}
}
// Otherwise...
return false;
}
//------------------------------------------------------------------------
// IsImplicitByrefParameterValue: determine if this tree is the entire
// value of a local implicit byref parameter
//
// Arguments:
// compiler -- compiler instance
//
// Return Value:
// GenTreeLclVar node for the local, or nullptr.
//
GenTreeLclVar* GenTree::IsImplicitByrefParameterValue(Compiler* compiler)
{
#if defined(TARGET_AMD64) || defined(TARGET_ARM64)
GenTreeLclVar* lcl = nullptr;
if (OperIs(GT_LCL_VAR))
{
lcl = AsLclVar();
}
else if (OperIs(GT_OBJ))
{
GenTree* addr = AsIndir()->Addr();
if (addr->OperIs(GT_LCL_VAR))
{
lcl = addr->AsLclVar();
}
else if (addr->OperIs(GT_ADDR))
{
GenTree* base = addr->AsOp()->gtOp1;
if (base->OperIs(GT_LCL_VAR))
{
lcl = base->AsLclVar();
}
}
}
if ((lcl != nullptr) && compiler->lvaIsImplicitByRefLocal(lcl->GetLclNum()))
{
return lcl;
}
#endif // defined(TARGET_AMD64) || defined(TARGET_ARM64)
return nullptr;
}
//------------------------------------------------------------------------
// IsLclVarUpdateTree: Determine whether this is an assignment tree of the
// form Vn = Vn 'oper' 'otherTree' where Vn is a lclVar
//
// Arguments:
// pOtherTree - An "out" argument in which 'otherTree' will be returned.
// pOper - An "out" argument in which 'oper' will be returned.
//
// Return Value:
// If the tree is of the above form, the lclNum of the variable being
// updated is returned, and 'pOtherTree' and 'pOper' are set.
// Otherwise, returns BAD_VAR_NUM.
//
// Notes:
// 'otherTree' can have any shape.
// We avoid worrying about whether the op is commutative by only considering the
// first operand of the rhs. It is expected that most trees of this form will
// already have the lclVar on the lhs.
// TODO-CQ: Evaluate whether there are missed opportunities due to this, or
// whether gtSetEvalOrder will already have put the lclVar on the lhs in
// the cases of interest.
unsigned GenTree::IsLclVarUpdateTree(GenTree** pOtherTree, genTreeOps* pOper)
{
unsigned lclNum = BAD_VAR_NUM;
if (OperIs(GT_ASG))
{
GenTree* lhs = AsOp()->gtOp1;
GenTree* rhs = AsOp()->gtOp2;
if ((lhs->OperGet() == GT_LCL_VAR) && rhs->OperIsBinary())
{
unsigned lhsLclNum = lhs->AsLclVarCommon()->GetLclNum();
GenTree* rhsOp1 = rhs->AsOp()->gtOp1;
GenTree* rhsOp2 = rhs->AsOp()->gtOp2;
// Some operators, such as HWINTRINSIC, are currently declared as binary but
// may not have two operands. We must check that both operands actually exist.
if ((rhsOp1 != nullptr) && (rhsOp2 != nullptr) && (rhsOp1->OperGet() == GT_LCL_VAR) &&
(rhsOp1->AsLclVarCommon()->GetLclNum() == lhsLclNum))
{
lclNum = lhsLclNum;
*pOtherTree = rhsOp2;
*pOper = rhs->OperGet();
}
}
}
return lclNum;
}
#ifdef DEBUG
//------------------------------------------------------------------------
// canBeContained: check whether this tree node may be a subcomponent of its parent for purposes
// of code generation.
//
// Return Value:
// True if it is possible to contain this node and false otherwise.
//
bool GenTree::canBeContained() const
{
assert(OperIsLIR());
if (IsMultiRegLclVar())
{
return false;
}
if (gtHasReg(nullptr))
{
return false;
}
// It is not possible for nodes that do not produce values or that are not containable values to be contained.
if (!IsValue() || ((DebugOperKind() & DBK_NOCONTAIN) != 0) || (OperIsHWIntrinsic() && !isContainableHWIntrinsic()))
{
return false;
}
return true;
}
#endif // DEBUG
//------------------------------------------------------------------------
// isContained: check whether this tree node is a subcomponent of its parent for codegen purposes
//
// Return Value:
// Returns true if there is no code generated explicitly for this node.
// Essentially, it will be rolled into the code generation for the parent.
//
// Assumptions:
// This method relies upon the value of the GTF_CONTAINED flag.
// Therefore this method is only valid after Lowering.
// Also note that register allocation or other subsequent phases may cause
// nodes to become contained (or not) and therefore this property may change.
//
bool GenTree::isContained() const
{
assert(OperIsLIR());
const bool isMarkedContained = ((gtFlags & GTF_CONTAINED) != 0);
#ifdef DEBUG
if (!canBeContained())
{
assert(!isMarkedContained);
}
// these actually produce a register (the flags reg, we just don't model it)
// and are a separate instruction from the branch that consumes the result.
// They can only produce a result if the child is a SIMD equality comparison.
else if (OperIsCompare())
{
assert(isMarkedContained == false);
}
// if it's contained it can't be unused.
if (isMarkedContained)
{
assert(!IsUnusedValue());
}
#endif // DEBUG
return isMarkedContained;
}
// return true if node is contained and an indir
bool GenTree::isContainedIndir() const
{
return OperIsIndir() && isContained();
}
bool GenTree::isIndirAddrMode()
{
return OperIsIndir() && AsIndir()->Addr()->OperIsAddrMode() && AsIndir()->Addr()->isContained();
}
bool GenTree::isIndir() const
{
return OperGet() == GT_IND || OperGet() == GT_STOREIND;
}
bool GenTreeIndir::HasBase()
{
return Base() != nullptr;
}
bool GenTreeIndir::HasIndex()
{
return Index() != nullptr;
}
GenTree* GenTreeIndir::Base()
{
GenTree* addr = Addr();
if (isIndirAddrMode())
{
GenTree* result = addr->AsAddrMode()->Base();
if (result != nullptr)
{
result = result->gtEffectiveVal();
}
return result;
}
else
{
return addr; // TODO: why do we return 'addr' here, but we return 'nullptr' in the equivalent Index() case?
}
}
GenTree* GenTreeIndir::Index()
{
if (isIndirAddrMode())
{
GenTree* result = Addr()->AsAddrMode()->Index();
if (result != nullptr)
{
result = result->gtEffectiveVal();
}
return result;
}
else
{
return nullptr;
}
}
unsigned GenTreeIndir::Scale()
{
if (HasIndex())
{
return Addr()->AsAddrMode()->gtScale;
}
else
{
return 1;
}
}
ssize_t GenTreeIndir::Offset()
{
if (isIndirAddrMode())
{
return Addr()->AsAddrMode()->Offset();
}
else if (Addr()->gtOper == GT_CLS_VAR_ADDR)
{
return static_cast<ssize_t>(reinterpret_cast<intptr_t>(Addr()->AsClsVar()->gtClsVarHnd));
}
else if (Addr()->IsCnsIntOrI() && Addr()->isContained())
{
return Addr()->AsIntConCommon()->IconValue();
}
else
{
return 0;
}
}
//------------------------------------------------------------------------
// GenTreeIntConCommon::ImmedValNeedsReloc: does this immediate value needs recording a relocation with the VM?
//
// Arguments:
// comp - Compiler instance
//
// Return Value:
// True if this immediate value requires us to record a relocation for it; false otherwise.
bool GenTreeIntConCommon::ImmedValNeedsReloc(Compiler* comp)
{
return comp->opts.compReloc && (gtOper == GT_CNS_INT) && IsIconHandle();
}
//------------------------------------------------------------------------
// ImmedValCanBeFolded: can this immediate value be folded for op?
//
// Arguments:
// comp - Compiler instance
// op - Tree operator
//
// Return Value:
// True if this immediate value can be folded for op; false otherwise.
bool GenTreeIntConCommon::ImmedValCanBeFolded(Compiler* comp, genTreeOps op)
{
// In general, immediate values that need relocations can't be folded.
// There are cases where we do want to allow folding of handle comparisons
// (e.g., typeof(T) == typeof(int)).
return !ImmedValNeedsReloc(comp) || (op == GT_EQ) || (op == GT_NE);
}
#ifdef TARGET_AMD64
// Returns true if this absolute address fits within the base of an addr mode.
// On Amd64 this effectively means, whether an absolute indirect address can
// be encoded as 32-bit offset relative to IP or zero.
bool GenTreeIntConCommon::FitsInAddrBase(Compiler* comp)
{
#ifdef DEBUG
// Early out if PC-rel encoding of absolute addr is disabled.
if (!comp->opts.compEnablePCRelAddr)
{
return false;
}
#endif
if (comp->opts.compReloc)
{
// During Ngen JIT is always asked to generate relocatable code.
// Hence JIT will try to encode only icon handles as pc-relative offsets.
return IsIconHandle() && (IMAGE_REL_BASED_REL32 == comp->eeGetRelocTypeHint((void*)IconValue()));
}
else
{
// During Jitting, we are allowed to generate non-relocatable code.
// On Amd64 we can encode an absolute indirect addr as an offset relative to zero or RIP.
// An absolute indir addr that can fit within 32-bits can ben encoded as an offset relative
// to zero. All other absolute indir addr could be attempted to be encoded as RIP relative
// based on reloc hint provided by VM. RIP relative encoding is preferred over relative
// to zero, because the former is one byte smaller than the latter. For this reason
// we check for reloc hint first and then whether addr fits in 32-bits next.
//
// VM starts off with an initial state to allow both data and code address to be encoded as
// pc-relative offsets. Hence JIT will attempt to encode all absolute addresses as pc-relative
// offsets. It is possible while jitting a method, an address could not be encoded as a
// pc-relative offset. In that case VM will note the overflow and will trigger re-jitting
// of the method with reloc hints turned off for all future methods. Second time around
// jitting will succeed since JIT will not attempt to encode data addresses as pc-relative
// offsets. Note that JIT will always attempt to relocate code addresses (.e.g call addr).
// After an overflow, VM will assume any relocation recorded is for a code address and will
// emit jump thunk if it cannot be encoded as pc-relative offset.
return (IMAGE_REL_BASED_REL32 == comp->eeGetRelocTypeHint((void*)IconValue())) || FitsInI32();
}
}
// Returns true if this icon value is encoded as addr needs recording a relocation with VM
bool GenTreeIntConCommon::AddrNeedsReloc(Compiler* comp)
{
if (comp->opts.compReloc)
{
// During Ngen JIT is always asked to generate relocatable code.
// Hence JIT will try to encode only icon handles as pc-relative offsets.
return IsIconHandle() && (IMAGE_REL_BASED_REL32 == comp->eeGetRelocTypeHint((void*)IconValue()));
}
else
{
return IMAGE_REL_BASED_REL32 == comp->eeGetRelocTypeHint((void*)IconValue());
}
}
#elif defined(TARGET_X86)
// Returns true if this absolute address fits within the base of an addr mode.
// On x86 all addresses are 4-bytes and can be directly encoded in an addr mode.
bool GenTreeIntConCommon::FitsInAddrBase(Compiler* comp)
{
#ifdef DEBUG
// Early out if PC-rel encoding of absolute addr is disabled.
if (!comp->opts.compEnablePCRelAddr)
{
return false;
}
#endif
return IsCnsIntOrI();
}
// Returns true if this icon value is encoded as addr needs recording a relocation with VM
bool GenTreeIntConCommon::AddrNeedsReloc(Compiler* comp)
{
// If generating relocatable code, icons should be reported for recording relocatons.
return comp->opts.compReloc && IsIconHandle();
}
#endif // TARGET_X86
//------------------------------------------------------------------------
// IsFieldAddr: Is "this" a static or class field address?
//
// Recognizes the following patterns:
// this: ADD(baseAddr, CONST [FldSeq])
// this: ADD(CONST [FldSeq], baseAddr)
// this: CONST [FldSeq]
// this: Zero [FldSeq]
//
// Arguments:
// comp - the Compiler object
// pBaseAddr - [out] parameter for "the base address"
// pFldSeq - [out] parameter for the field sequence
//
// Return Value:
// If "this" matches patterns denoted above, and the FldSeq found is "full",
// i. e. starts with a class field or a static field, and includes all the
// struct fields that this tree represents the address of, this method will
// return "true" and set either "pBaseAddr" to some value, which must be used
// by the caller as the key into the "first field map" to obtain the actual
// value for the field. For instance fields, "base address" will be the object
// reference, for statics - the address to which the field offset with the
// field sequence is added, see "impImportStaticFieldAccess" and "fgMorphField".
//
bool GenTree::IsFieldAddr(Compiler* comp, GenTree** pBaseAddr, FieldSeqNode** pFldSeq)
{
assert(TypeIs(TYP_I_IMPL, TYP_BYREF, TYP_REF));
*pBaseAddr = nullptr;
*pFldSeq = FieldSeqStore::NotAField();
GenTree* baseAddr = nullptr;
FieldSeqNode* fldSeq = FieldSeqStore::NotAField();
if (OperIs(GT_ADD))
{
// If one operand has a field sequence, the other operand must not have one
// as the order of fields in that case would not be well-defined.
if (AsOp()->gtOp1->IsCnsIntOrI() && AsOp()->gtOp1->IsIconHandle())
{
assert(!AsOp()->gtOp2->IsCnsIntOrI() || !AsOp()->gtOp2->IsIconHandle());
fldSeq = AsOp()->gtOp1->AsIntCon()->gtFieldSeq;
baseAddr = AsOp()->gtOp2;
}
else if (AsOp()->gtOp2->IsCnsIntOrI())
{
assert(!AsOp()->gtOp1->IsCnsIntOrI() || !AsOp()->gtOp1->IsIconHandle());
fldSeq = AsOp()->gtOp2->AsIntCon()->gtFieldSeq;
baseAddr = AsOp()->gtOp1;
}
else
{
return false;
}
assert(!baseAddr->TypeIs(TYP_REF) || !comp->GetZeroOffsetFieldMap()->Lookup(baseAddr));
}
else if (IsCnsIntOrI() && IsIconHandle(GTF_ICON_STATIC_HDL))
{
assert(!comp->GetZeroOffsetFieldMap()->Lookup(this) && (AsIntCon()->gtFieldSeq != nullptr));
fldSeq = AsIntCon()->gtFieldSeq;
baseAddr = this;
}
else if (comp->GetZeroOffsetFieldMap()->Lookup(this, &fldSeq))
{
baseAddr = this;
}
else
{
return false;
}
assert((fldSeq != nullptr) && (baseAddr != nullptr));
if ((fldSeq == FieldSeqStore::NotAField()) || fldSeq->IsPseudoField())
{
return false;
}
// The above screens out obviously invalid cases, but we have more checks to perform. The
// sequence returned from this method *must* start with either a class (NOT struct) field
// or a static field. To avoid the expense of calling "getFieldClass" here, we will instead
// rely on the invariant that TYP_REF base addresses can never appear for struct fields - we
// will effectively treat such cases ("possible" in unsafe code) as undefined behavior.
if (fldSeq->IsStaticField())
{
// For shared statics, we must encode the logical instantiation argument.
if (fldSeq->IsSharedStaticField())
{
*pBaseAddr = baseAddr;
}
*pFldSeq = fldSeq;
return true;
}
if (baseAddr->TypeIs(TYP_REF))
{
assert(!comp->eeIsValueClass(comp->info.compCompHnd->getFieldClass(fldSeq->GetFieldHandle())));
*pBaseAddr = baseAddr;
*pFldSeq = fldSeq;
return true;
}
// This case is reached, for example, if we have a chain of struct fields that are based on
// some pointer. We do not model such cases because we do not model maps for ByrefExposed
// memory, as it does not have the non-aliasing property of GcHeap and reference types.
return false;
}
bool Compiler::gtIsStaticFieldPtrToBoxedStruct(var_types fieldNodeType, CORINFO_FIELD_HANDLE fldHnd)
{
if (fieldNodeType != TYP_REF)
{
return false;
}
noway_assert(fldHnd != nullptr);
CorInfoType cit = info.compCompHnd->getFieldType(fldHnd);
var_types fieldTyp = JITtype2varType(cit);
return fieldTyp != TYP_REF;
}
#ifdef FEATURE_SIMD
//------------------------------------------------------------------------
// gtGetSIMDZero: Get a zero value of the appropriate SIMD type.
//
// Arguments:
// var_types - The simdType
// simdBaseJitType - The SIMD base JIT type we need
// simdHandle - The handle for the SIMD type
//
// Return Value:
// A node generating the appropriate Zero, if we are able to discern it,
// otherwise null (note that this shouldn't happen, but callers should
// be tolerant of this case).
GenTree* Compiler::gtGetSIMDZero(var_types simdType, CorInfoType simdBaseJitType, CORINFO_CLASS_HANDLE simdHandle)
{
bool found = false;
bool isHWSIMD = true;
noway_assert(m_simdHandleCache != nullptr);
// First, determine whether this is Vector<T>.
if (simdType == getSIMDVectorType())
{
switch (simdBaseJitType)
{
case CORINFO_TYPE_FLOAT:
found = (simdHandle == m_simdHandleCache->SIMDFloatHandle);
break;
case CORINFO_TYPE_DOUBLE:
found = (simdHandle == m_simdHandleCache->SIMDDoubleHandle);
break;
case CORINFO_TYPE_INT:
found = (simdHandle == m_simdHandleCache->SIMDIntHandle);
break;
case CORINFO_TYPE_USHORT:
found = (simdHandle == m_simdHandleCache->SIMDUShortHandle);
break;
case CORINFO_TYPE_UBYTE:
found = (simdHandle == m_simdHandleCache->SIMDUByteHandle);
break;
case CORINFO_TYPE_SHORT:
found = (simdHandle == m_simdHandleCache->SIMDShortHandle);
break;
case CORINFO_TYPE_BYTE:
found = (simdHandle == m_simdHandleCache->SIMDByteHandle);
break;
case CORINFO_TYPE_LONG:
found = (simdHandle == m_simdHandleCache->SIMDLongHandle);
break;
case CORINFO_TYPE_UINT:
found = (simdHandle == m_simdHandleCache->SIMDUIntHandle);
break;
case CORINFO_TYPE_ULONG:
found = (simdHandle == m_simdHandleCache->SIMDULongHandle);
break;
case CORINFO_TYPE_NATIVEINT:
found = (simdHandle == m_simdHandleCache->SIMDNIntHandle);
break;
case CORINFO_TYPE_NATIVEUINT:
found = (simdHandle == m_simdHandleCache->SIMDNUIntHandle);
break;
default:
break;
}
if (found)
{
isHWSIMD = false;
}
}
if (!found)
{
// We must still have isHWSIMD set to true, and the only non-HW types left are the fixed types.
switch (simdType)
{
case TYP_SIMD8:
switch (simdBaseJitType)
{
case CORINFO_TYPE_FLOAT:
if (simdHandle == m_simdHandleCache->SIMDVector2Handle)
{
isHWSIMD = false;
}
#if defined(TARGET_ARM64) && defined(FEATURE_HW_INTRINSICS)
else
{
assert(simdHandle == m_simdHandleCache->Vector64FloatHandle);
}
break;
case CORINFO_TYPE_INT:
assert(simdHandle == m_simdHandleCache->Vector64IntHandle);
break;
case CORINFO_TYPE_USHORT:
assert(simdHandle == m_simdHandleCache->Vector64UShortHandle);
break;
case CORINFO_TYPE_UBYTE:
assert(simdHandle == m_simdHandleCache->Vector64UByteHandle);
break;
case CORINFO_TYPE_SHORT:
assert(simdHandle == m_simdHandleCache->Vector64ShortHandle);
break;
case CORINFO_TYPE_BYTE:
assert(simdHandle == m_simdHandleCache->Vector64ByteHandle);
break;
case CORINFO_TYPE_UINT:
assert(simdHandle == m_simdHandleCache->Vector64UIntHandle);
#endif // defined(TARGET_ARM64) && defined(FEATURE_HW_INTRINSICS)
break;
default:
break;
}
break;
case TYP_SIMD12:
assert((simdBaseJitType == CORINFO_TYPE_FLOAT) && (simdHandle == m_simdHandleCache->SIMDVector3Handle));
isHWSIMD = false;
break;
case TYP_SIMD16:
switch (simdBaseJitType)
{
case CORINFO_TYPE_FLOAT:
if (simdHandle == m_simdHandleCache->SIMDVector4Handle)
{
isHWSIMD = false;
}
#if defined(FEATURE_HW_INTRINSICS)
else
{
assert(simdHandle == m_simdHandleCache->Vector128FloatHandle);
}
break;
case CORINFO_TYPE_DOUBLE:
assert(simdHandle == m_simdHandleCache->Vector128DoubleHandle);
break;
case CORINFO_TYPE_INT:
assert(simdHandle == m_simdHandleCache->Vector128IntHandle);
break;
case CORINFO_TYPE_USHORT:
assert(simdHandle == m_simdHandleCache->Vector128UShortHandle);
break;
case CORINFO_TYPE_UBYTE:
assert(simdHandle == m_simdHandleCache->Vector128UByteHandle);
break;
case CORINFO_TYPE_SHORT:
assert(simdHandle == m_simdHandleCache->Vector128ShortHandle);
break;
case CORINFO_TYPE_BYTE:
assert(simdHandle == m_simdHandleCache->Vector128ByteHandle);
break;
case CORINFO_TYPE_LONG:
assert(simdHandle == m_simdHandleCache->Vector128LongHandle);
break;
case CORINFO_TYPE_UINT:
assert(simdHandle == m_simdHandleCache->Vector128UIntHandle);
break;
case CORINFO_TYPE_ULONG:
assert(simdHandle == m_simdHandleCache->Vector128ULongHandle);
break;
case CORINFO_TYPE_NATIVEINT:
assert(simdHandle == m_simdHandleCache->Vector128NIntHandle);
break;
case CORINFO_TYPE_NATIVEUINT:
assert(simdHandle == m_simdHandleCache->Vector128NUIntHandle);
break;
#endif // defined(FEATURE_HW_INTRINSICS)
default:
break;
}
break;
#if defined(TARGET_XARCH) && defined(FEATURE_HW_INTRINSICS)
case TYP_SIMD32:
switch (simdBaseJitType)
{
case CORINFO_TYPE_FLOAT:
assert(simdHandle == m_simdHandleCache->Vector256FloatHandle);
break;
case CORINFO_TYPE_DOUBLE:
assert(simdHandle == m_simdHandleCache->Vector256DoubleHandle);
break;
case CORINFO_TYPE_INT:
assert(simdHandle == m_simdHandleCache->Vector256IntHandle);
break;
case CORINFO_TYPE_USHORT:
assert(simdHandle == m_simdHandleCache->Vector256UShortHandle);
break;
case CORINFO_TYPE_UBYTE:
assert(simdHandle == m_simdHandleCache->Vector256UByteHandle);
break;
case CORINFO_TYPE_SHORT:
assert(simdHandle == m_simdHandleCache->Vector256ShortHandle);
break;
case CORINFO_TYPE_BYTE:
assert(simdHandle == m_simdHandleCache->Vector256ByteHandle);
break;
case CORINFO_TYPE_LONG:
assert(simdHandle == m_simdHandleCache->Vector256LongHandle);
break;
case CORINFO_TYPE_UINT:
assert(simdHandle == m_simdHandleCache->Vector256UIntHandle);
break;
case CORINFO_TYPE_ULONG:
assert(simdHandle == m_simdHandleCache->Vector256ULongHandle);
break;
case CORINFO_TYPE_NATIVEINT:
assert(simdHandle == m_simdHandleCache->Vector256NIntHandle);
break;
case CORINFO_TYPE_NATIVEUINT:
assert(simdHandle == m_simdHandleCache->Vector256NUIntHandle);
break;
default:
break;
}
break;
#endif // TARGET_XARCH && FEATURE_HW_INTRINSICS
default:
break;
}
}
unsigned size = genTypeSize(simdType);
if (isHWSIMD)
{
#if defined(FEATURE_HW_INTRINSICS)
return gtNewSimdZeroNode(simdType, simdBaseJitType, size, /* isSimdAsHWIntrinsic */ false);
#else
JITDUMP("Coudn't find the matching SIMD type for %s<%s> in gtGetSIMDZero\n", varTypeName(simdType),
varTypeName(JitType2PreciseVarType(simdBaseJitType)));
return nullptr;
#endif // FEATURE_HW_INTRINSICS
}
else
{
return gtNewSIMDVectorZero(simdType, simdBaseJitType, size);
}
}
#endif // FEATURE_SIMD
CORINFO_CLASS_HANDLE Compiler::gtGetStructHandleIfPresent(GenTree* tree)
{
CORINFO_CLASS_HANDLE structHnd = NO_CLASS_HANDLE;
tree = tree->gtEffectiveVal();
if (varTypeIsStruct(tree->gtType))
{
switch (tree->gtOper)
{
default:
break;
case GT_MKREFANY:
structHnd = impGetRefAnyClass();
break;
case GT_OBJ:
structHnd = tree->AsObj()->GetLayout()->GetClassHandle();
break;
case GT_BLK:
structHnd = tree->AsBlk()->GetLayout()->GetClassHandle();
break;
case GT_CALL:
structHnd = tree->AsCall()->gtRetClsHnd;
break;
case GT_RET_EXPR:
structHnd = tree->AsRetExpr()->gtRetClsHnd;
break;
case GT_ARGPLACE:
structHnd = tree->AsArgPlace()->gtArgPlaceClsHnd;
break;
case GT_INDEX:
structHnd = tree->AsIndex()->gtStructElemClass;
break;
case GT_FIELD:
info.compCompHnd->getFieldType(tree->AsField()->gtFldHnd, &structHnd);
break;
case GT_ASG:
structHnd = gtGetStructHandleIfPresent(tree->gtGetOp1());
break;
case GT_LCL_FLD:
#ifdef FEATURE_SIMD
if (varTypeIsSIMD(tree))
{
structHnd = gtGetStructHandleForSIMD(tree->gtType, CORINFO_TYPE_FLOAT);
#ifdef FEATURE_HW_INTRINSICS
if (structHnd == NO_CLASS_HANDLE)
{
structHnd = gtGetStructHandleForHWSIMD(tree->gtType, CORINFO_TYPE_FLOAT);
}
#endif
}
#endif
break;
case GT_LCL_VAR:
{
unsigned lclNum = tree->AsLclVarCommon()->GetLclNum();
structHnd = lvaGetStruct(lclNum);
break;
}
case GT_RETURN:
structHnd = gtGetStructHandleIfPresent(tree->AsOp()->gtOp1);
break;
case GT_IND:
#ifdef FEATURE_SIMD
if (varTypeIsSIMD(tree))
{
structHnd = gtGetStructHandleForSIMD(tree->gtType, CORINFO_TYPE_FLOAT);
#ifdef FEATURE_HW_INTRINSICS
if (structHnd == NO_CLASS_HANDLE)
{
structHnd = gtGetStructHandleForHWSIMD(tree->gtType, CORINFO_TYPE_FLOAT);
}
#endif
}
else
#endif
{
// Attempt to find a handle for this expression.
// We can do this for an array element indirection, or for a field indirection.
ArrayInfo arrInfo;
if (TryGetArrayInfo(tree->AsIndir(), &arrInfo))
{
structHnd = arrInfo.m_elemStructType;
}
else
{
GenTree* addr = tree->AsIndir()->Addr();
FieldSeqNode* fieldSeq = nullptr;
if ((addr->OperGet() == GT_ADD) && addr->gtGetOp2()->OperIs(GT_CNS_INT))
{
fieldSeq = addr->gtGetOp2()->AsIntCon()->gtFieldSeq;
}
else
{
GetZeroOffsetFieldMap()->Lookup(addr, &fieldSeq);
}
if (fieldSeq != nullptr)
{
fieldSeq = fieldSeq->GetTail();
if (fieldSeq != FieldSeqStore::NotAField() && !fieldSeq->IsPseudoField())
{
// Note we may have a primitive here (and correctly fail to obtain the handle)
eeGetFieldType(fieldSeq->GetFieldHandle(), &structHnd);
}
}
}
}
break;
#ifdef FEATURE_SIMD
case GT_SIMD:
structHnd = gtGetStructHandleForSIMD(tree->gtType, tree->AsSIMD()->GetSimdBaseJitType());
break;
#endif // FEATURE_SIMD
#ifdef FEATURE_HW_INTRINSICS
case GT_HWINTRINSIC:
if ((tree->gtFlags & GTF_SIMDASHW_OP) != 0)
{
structHnd = gtGetStructHandleForSIMD(tree->gtType, tree->AsHWIntrinsic()->GetSimdBaseJitType());
}
else
{
structHnd = gtGetStructHandleForHWSIMD(tree->gtType, tree->AsHWIntrinsic()->GetSimdBaseJitType());
}
break;
#endif
break;
}
// TODO-1stClassStructs: add a check that `structHnd != NO_CLASS_HANDLE`,
// nowadays it won't work because the right part of an ASG could have struct type without a handle
// (check `fgMorphBlockOperand(isBlkReqd`) and a few other cases.
}
return structHnd;
}
CORINFO_CLASS_HANDLE Compiler::gtGetStructHandle(GenTree* tree)
{
CORINFO_CLASS_HANDLE structHnd = gtGetStructHandleIfPresent(tree);
assert(structHnd != NO_CLASS_HANDLE);
return structHnd;
}
//------------------------------------------------------------------------
// gtGetClassHandle: find class handle for a ref type
//
// Arguments:
// tree -- tree to find handle for
// pIsExact [out] -- whether handle is exact type
// pIsNonNull [out] -- whether tree value is known not to be null
//
// Return Value:
// nullptr if class handle is unknown,
// otherwise the class handle.
// *pIsExact set true if tree type is known to be exactly the handle type,
// otherwise actual type may be a subtype.
// *pIsNonNull set true if tree value is known not to be null,
// otherwise a null value is possible.
CORINFO_CLASS_HANDLE Compiler::gtGetClassHandle(GenTree* tree, bool* pIsExact, bool* pIsNonNull)
{
// Set default values for our out params.
*pIsNonNull = false;
*pIsExact = false;
CORINFO_CLASS_HANDLE objClass = nullptr;
// Bail out if we're just importing and not generating code, since
// the jit uses TYP_REF for CORINFO_TYPE_VAR locals and args, but
// these may not be ref types.
if (compIsForImportOnly())
{
return objClass;
}
// Bail out if the tree is not a ref type.
var_types treeType = tree->TypeGet();
if (treeType != TYP_REF)
{
return objClass;
}
// Tunnel through commas.
GenTree* obj = tree->gtEffectiveVal(false);
const genTreeOps objOp = obj->OperGet();
switch (objOp)
{
case GT_COMMA:
{
// gtEffectiveVal above means we shouldn't see commas here.
assert(!"unexpected GT_COMMA");
break;
}
case GT_LCL_VAR:
{
// For locals, pick up type info from the local table.
const unsigned objLcl = obj->AsLclVar()->GetLclNum();
objClass = lvaTable[objLcl].lvClassHnd;
*pIsExact = lvaTable[objLcl].lvClassIsExact;
break;
}
case GT_FIELD:
{
// For fields, get the type from the field handle.
CORINFO_FIELD_HANDLE fieldHnd = obj->AsField()->gtFldHnd;
if (fieldHnd != nullptr)
{
objClass = gtGetFieldClassHandle(fieldHnd, pIsExact, pIsNonNull);
}
break;
}
case GT_RET_EXPR:
{
// If we see a RET_EXPR, recurse through to examine the
// return value expression.
GenTree* retExpr = tree->AsRetExpr()->gtInlineCandidate;
objClass = gtGetClassHandle(retExpr, pIsExact, pIsNonNull);
break;
}
case GT_CALL:
{
GenTreeCall* call = tree->AsCall();
if (call->gtCallMoreFlags & GTF_CALL_M_SPECIAL_INTRINSIC)
{
NamedIntrinsic ni = lookupNamedIntrinsic(call->gtCallMethHnd);
if ((ni == NI_System_Array_Clone) || (ni == NI_System_Object_MemberwiseClone))
{
objClass = gtGetClassHandle(call->gtCallThisArg->GetNode(), pIsExact, pIsNonNull);
break;
}
CORINFO_CLASS_HANDLE specialObjClass = impGetSpecialIntrinsicExactReturnType(call->gtCallMethHnd);
if (specialObjClass != nullptr)
{
objClass = specialObjClass;
*pIsExact = true;
*pIsNonNull = true;
break;
}
}
if (call->IsInlineCandidate())
{
// For inline candidates, we've already cached the return
// type class handle in the inline info.
InlineCandidateInfo* inlInfo = call->gtInlineCandidateInfo;
assert(inlInfo != nullptr);
// Grab it as our first cut at a return type.
assert(inlInfo->methInfo.args.retType == CORINFO_TYPE_CLASS);
objClass = inlInfo->methInfo.args.retTypeClass;
// If the method is shared, the above may not capture
// the most precise return type information (that is,
// it may represent a shared return type and as such,
// have instances of __Canon). See if we can use the
// context to get at something more definite.
//
// For now, we do this here on demand rather than when
// processing the call, but we could/should apply
// similar sharpening to the argument and local types
// of the inlinee.
const unsigned retClassFlags = info.compCompHnd->getClassAttribs(objClass);
if (retClassFlags & CORINFO_FLG_SHAREDINST)
{
CORINFO_CONTEXT_HANDLE context = inlInfo->exactContextHnd;
if (context != nullptr)
{
CORINFO_CLASS_HANDLE exactClass = eeGetClassFromContext(context);
// Grab the signature in this context.
CORINFO_SIG_INFO sig;
eeGetMethodSig(call->gtCallMethHnd, &sig, exactClass);
assert(sig.retType == CORINFO_TYPE_CLASS);
objClass = sig.retTypeClass;
}
}
}
else if (call->gtCallType == CT_USER_FUNC)
{
// For user calls, we can fetch the approximate return
// type info from the method handle. Unfortunately
// we've lost the exact context, so this is the best
// we can do for now.
CORINFO_METHOD_HANDLE method = call->gtCallMethHnd;
CORINFO_CLASS_HANDLE exactClass = nullptr;
CORINFO_SIG_INFO sig;
eeGetMethodSig(method, &sig, exactClass);
if (sig.retType == CORINFO_TYPE_VOID)
{
// This is a constructor call.
const unsigned methodFlags = info.compCompHnd->getMethodAttribs(method);
assert((methodFlags & CORINFO_FLG_CONSTRUCTOR) != 0);
objClass = info.compCompHnd->getMethodClass(method);
*pIsExact = true;
*pIsNonNull = true;
}
else
{
assert(sig.retType == CORINFO_TYPE_CLASS);
objClass = sig.retTypeClass;
}
}
else if (call->gtCallType == CT_HELPER)
{
objClass = gtGetHelperCallClassHandle(call, pIsExact, pIsNonNull);
}
break;
}
case GT_INTRINSIC:
{
GenTreeIntrinsic* intrinsic = obj->AsIntrinsic();
if (intrinsic->gtIntrinsicName == NI_System_Object_GetType)
{
CORINFO_CLASS_HANDLE runtimeType = info.compCompHnd->getBuiltinClass(CLASSID_RUNTIME_TYPE);
assert(runtimeType != NO_CLASS_HANDLE);
objClass = runtimeType;
*pIsExact = false;
*pIsNonNull = true;
}
break;
}
case GT_CNS_STR:
{
// For literal strings, we know the class and that the
// value is not null.
objClass = impGetStringClass();
*pIsExact = true;
*pIsNonNull = true;
break;
}
case GT_IND:
{
GenTreeIndir* indir = obj->AsIndir();
if (indir->HasBase() && !indir->HasIndex())
{
// indir(addr(lcl)) --> lcl
//
// This comes up during constrained callvirt on ref types.
GenTree* base = indir->Base();
GenTreeLclVarCommon* lcl = base->IsLocalAddrExpr();
if ((lcl != nullptr) && (base->OperGet() != GT_ADD))
{
const unsigned objLcl = lcl->GetLclNum();
objClass = lvaTable[objLcl].lvClassHnd;
*pIsExact = lvaTable[objLcl].lvClassIsExact;
}
else if (base->OperGet() == GT_ARR_ELEM)
{
// indir(arr_elem(...)) -> array element type
GenTree* array = base->AsArrElem()->gtArrObj;
objClass = gtGetArrayElementClassHandle(array);
*pIsExact = false;
*pIsNonNull = false;
}
else if (base->OperGet() == GT_ADD)
{
// TODO-VNTypes: use "IsFieldAddr" here instead.
// This could be a static field access.
//
// See if op1 is a static field base helper call
// and if so, op2 will have the field info.
GenTree* op1 = base->AsOp()->gtOp1;
GenTree* op2 = base->AsOp()->gtOp2;
const bool op1IsStaticFieldBase = gtIsStaticGCBaseHelperCall(op1);
if (op1IsStaticFieldBase && (op2->OperGet() == GT_CNS_INT))
{
FieldSeqNode* fieldSeq = op2->AsIntCon()->gtFieldSeq;
if (fieldSeq != nullptr)
{
fieldSeq = fieldSeq->GetTail();
// No benefit to calling gtGetFieldClassHandle here, as
// the exact field being accessed can vary.
CORINFO_FIELD_HANDLE fieldHnd = fieldSeq->GetFieldHandle();
CORINFO_CLASS_HANDLE fieldClass = NO_CLASS_HANDLE;
var_types fieldType = eeGetFieldType(fieldHnd, &fieldClass);
assert(fieldType == TYP_REF);
objClass = fieldClass;
}
}
}
}
break;
}
case GT_BOX:
{
// Box should just wrap a local var reference which has
// the type we're looking for. Also box only represents a
// non-nullable value type so result cannot be null.
GenTreeBox* box = obj->AsBox();
GenTree* boxTemp = box->BoxOp();
assert(boxTemp->IsLocal());
const unsigned boxTempLcl = boxTemp->AsLclVar()->GetLclNum();
objClass = lvaTable[boxTempLcl].lvClassHnd;
*pIsExact = lvaTable[boxTempLcl].lvClassIsExact;
*pIsNonNull = true;
break;
}
case GT_INDEX:
{
GenTree* array = obj->AsIndex()->Arr();
objClass = gtGetArrayElementClassHandle(array);
*pIsExact = false;
*pIsNonNull = false;
break;
}
default:
{
break;
}
}
return objClass;
}
//------------------------------------------------------------------------
// gtGetHelperCallClassHandle: find class handle for return value of a
// helper call
//
// Arguments:
// call - helper call to examine
// pIsExact - [OUT] true if type is known exactly
// pIsNonNull - [OUT] true if return value is not null
//
// Return Value:
// nullptr if helper call result is not a ref class, or the class handle
// is unknown, otherwise the class handle.
CORINFO_CLASS_HANDLE Compiler::gtGetHelperCallClassHandle(GenTreeCall* call, bool* pIsExact, bool* pIsNonNull)
{
assert(call->gtCallType == CT_HELPER);
*pIsNonNull = false;
*pIsExact = false;
CORINFO_CLASS_HANDLE objClass = nullptr;
const CorInfoHelpFunc helper = eeGetHelperNum(call->gtCallMethHnd);
switch (helper)
{
case CORINFO_HELP_TYPEHANDLE_TO_RUNTIMETYPE:
case CORINFO_HELP_TYPEHANDLE_TO_RUNTIMETYPE_MAYBENULL:
{
// Note for some runtimes these helpers return exact types.
//
// But in those cases the types are also sealed, so there's no
// need to claim exactness here.
const bool helperResultNonNull = (helper == CORINFO_HELP_TYPEHANDLE_TO_RUNTIMETYPE);
CORINFO_CLASS_HANDLE runtimeType = info.compCompHnd->getBuiltinClass(CLASSID_RUNTIME_TYPE);
assert(runtimeType != NO_CLASS_HANDLE);
objClass = runtimeType;
*pIsNonNull = helperResultNonNull;
break;
}
case CORINFO_HELP_CHKCASTCLASS:
case CORINFO_HELP_CHKCASTANY:
case CORINFO_HELP_CHKCASTARRAY:
case CORINFO_HELP_CHKCASTINTERFACE:
case CORINFO_HELP_CHKCASTCLASS_SPECIAL:
case CORINFO_HELP_ISINSTANCEOFINTERFACE:
case CORINFO_HELP_ISINSTANCEOFARRAY:
case CORINFO_HELP_ISINSTANCEOFCLASS:
case CORINFO_HELP_ISINSTANCEOFANY:
{
// Fetch the class handle from the helper call arglist
GenTreeCall::Use* args = call->gtCallArgs;
GenTree* typeArg = args->GetNode();
CORINFO_CLASS_HANDLE castHnd = gtGetHelperArgClassHandle(typeArg);
// We generally assume the type being cast to is the best type
// for the result, unless it is an interface type.
//
// TODO-CQ: when we have default interface methods then
// this might not be the best assumption. We could also
// explore calling something like mergeClasses to identify
// the more specific class. A similar issue arises when
// typing the temp in impCastClassOrIsInstToTree, when we
// expand the cast inline.
if (castHnd != nullptr)
{
DWORD attrs = info.compCompHnd->getClassAttribs(castHnd);
if ((attrs & CORINFO_FLG_INTERFACE) != 0)
{
castHnd = nullptr;
}
}
// If we don't have a good estimate for the type we can use the
// type from the value being cast instead.
if (castHnd == nullptr)
{
GenTree* valueArg = args->GetNext()->GetNode();
castHnd = gtGetClassHandle(valueArg, pIsExact, pIsNonNull);
}
// We don't know at jit time if the cast will succeed or fail, but if it
// fails at runtime then an exception is thrown for cast helpers, or the
// result is set null for instance helpers.
//
// So it safe to claim the result has the cast type.
// Note we don't know for sure that it is exactly this type.
if (castHnd != nullptr)
{
objClass = castHnd;
}
break;
}
case CORINFO_HELP_NEWARR_1_DIRECT:
case CORINFO_HELP_NEWARR_1_OBJ:
case CORINFO_HELP_NEWARR_1_VC:
case CORINFO_HELP_NEWARR_1_ALIGN8:
case CORINFO_HELP_READYTORUN_NEWARR_1:
{
CORINFO_CLASS_HANDLE arrayHnd = (CORINFO_CLASS_HANDLE)call->compileTimeHelperArgumentHandle;
if (arrayHnd != NO_CLASS_HANDLE)
{
objClass = arrayHnd;
*pIsExact = true;
*pIsNonNull = true;
}
break;
}
default:
break;
}
return objClass;
}
//------------------------------------------------------------------------
// gtGetArrayElementClassHandle: find class handle for elements of an array
// of ref types
//
// Arguments:
// array -- array to find handle for
//
// Return Value:
// nullptr if element class handle is unknown, otherwise the class handle.
CORINFO_CLASS_HANDLE Compiler::gtGetArrayElementClassHandle(GenTree* array)
{
bool isArrayExact = false;
bool isArrayNonNull = false;
CORINFO_CLASS_HANDLE arrayClassHnd = gtGetClassHandle(array, &isArrayExact, &isArrayNonNull);
if (arrayClassHnd != nullptr)
{
// We know the class of the reference
DWORD attribs = info.compCompHnd->getClassAttribs(arrayClassHnd);
if ((attribs & CORINFO_FLG_ARRAY) != 0)
{
// We know for sure it is an array
CORINFO_CLASS_HANDLE elemClassHnd = nullptr;
CorInfoType arrayElemType = info.compCompHnd->getChildType(arrayClassHnd, &elemClassHnd);
if (arrayElemType == CORINFO_TYPE_CLASS)
{
// We know it is an array of ref types
return elemClassHnd;
}
}
}
return nullptr;
}
//------------------------------------------------------------------------
// gtGetFieldClassHandle: find class handle for a field
//
// Arguments:
// fieldHnd - field handle for field in question
// pIsExact - [OUT] true if type is known exactly
// pIsNonNull - [OUT] true if field value is not null
//
// Return Value:
// nullptr if helper call result is not a ref class, or the class handle
// is unknown, otherwise the class handle.
//
// May examine runtime state of static field instances.
CORINFO_CLASS_HANDLE Compiler::gtGetFieldClassHandle(CORINFO_FIELD_HANDLE fieldHnd, bool* pIsExact, bool* pIsNonNull)
{
CORINFO_CLASS_HANDLE fieldClass = nullptr;
CorInfoType fieldCorType = info.compCompHnd->getFieldType(fieldHnd, &fieldClass);
if (fieldCorType == CORINFO_TYPE_CLASS)
{
// Optionally, look at the actual type of the field's value
bool queryForCurrentClass = true;
INDEBUG(queryForCurrentClass = (JitConfig.JitQueryCurrentStaticFieldClass() > 0););
if (queryForCurrentClass)
{
#if DEBUG
const char* fieldClassName = nullptr;
const char* fieldName = eeGetFieldName(fieldHnd, &fieldClassName);
JITDUMP("Querying runtime about current class of field %s.%s (declared as %s)\n", fieldClassName, fieldName,
eeGetClassName(fieldClass));
#endif // DEBUG
// Is this a fully initialized init-only static field?
//
// Note we're not asking for speculative results here, yet.
CORINFO_CLASS_HANDLE currentClass = info.compCompHnd->getStaticFieldCurrentClass(fieldHnd);
if (currentClass != NO_CLASS_HANDLE)
{
// Yes! We know the class exactly and can rely on this to always be true.
fieldClass = currentClass;
*pIsExact = true;
*pIsNonNull = true;
JITDUMP("Runtime reports field is init-only and initialized and has class %s\n",
eeGetClassName(fieldClass));
}
else
{
JITDUMP("Field's current class not available\n");
}
}
}
return fieldClass;
}
//------------------------------------------------------------------------
// gtIsGCStaticBaseHelperCall: true if tree is fetching the gc static base
// for a subsequent static field access
//
// Arguments:
// tree - tree to consider
//
// Return Value:
// true if the tree is a suitable helper call
//
// Notes:
// Excludes R2R helpers as they specify the target field in a way
// that is opaque to the jit.
bool Compiler::gtIsStaticGCBaseHelperCall(GenTree* tree)
{
if (tree->OperGet() != GT_CALL)
{
return false;
}
GenTreeCall* call = tree->AsCall();
if (call->gtCallType != CT_HELPER)
{
return false;
}
const CorInfoHelpFunc helper = eeGetHelperNum(call->gtCallMethHnd);
switch (helper)
{
// We are looking for a REF type so only need to check for the GC base helpers
case CORINFO_HELP_GETGENERICS_GCSTATIC_BASE:
case CORINFO_HELP_GETSHARED_GCSTATIC_BASE:
case CORINFO_HELP_GETSHARED_GCSTATIC_BASE_NOCTOR:
case CORINFO_HELP_GETSHARED_GCSTATIC_BASE_DYNAMICCLASS:
case CORINFO_HELP_GETGENERICS_GCTHREADSTATIC_BASE:
case CORINFO_HELP_GETSHARED_GCTHREADSTATIC_BASE:
case CORINFO_HELP_GETSHARED_GCTHREADSTATIC_BASE_NOCTOR:
case CORINFO_HELP_GETSHARED_GCTHREADSTATIC_BASE_DYNAMICCLASS:
return true;
default:
break;
}
return false;
}
void GenTree::ParseArrayAddress(
Compiler* comp, ArrayInfo* arrayInfo, GenTree** pArr, ValueNum* pInxVN, FieldSeqNode** pFldSeq)
{
*pArr = nullptr;
ValueNum inxVN = ValueNumStore::NoVN;
target_ssize_t offset = 0;
FieldSeqNode* fldSeq = nullptr;
ParseArrayAddressWork(comp, 1, pArr, &inxVN, &offset, &fldSeq);
// If we didn't find an array reference (perhaps it is the constant null?) we will give up.
if (*pArr == nullptr)
{
return;
}
// OK, new we have to figure out if any part of the "offset" is a constant contribution to the index.
// First, sum the offsets of any fields in fldSeq.
unsigned fieldOffsets = 0;
FieldSeqNode* fldSeqIter = fldSeq;
// Also, find the first non-pseudo field...
assert(*pFldSeq == nullptr);
while (fldSeqIter != nullptr)
{
if (fldSeqIter == FieldSeqStore::NotAField())
{
// TODO-Review: A NotAField here indicates a failure to properly maintain the field sequence
// See test case self_host_tests_x86\jit\regression\CLR-x86-JIT\v1-m12-beta2\ b70992\ b70992.exe
// Safest thing to do here is to drop back to MinOpts
CLANG_FORMAT_COMMENT_ANCHOR;
#ifdef DEBUG
if (comp->opts.optRepeat)
{
// We don't guarantee preserving these annotations through the entire optimizer, so
// just conservatively return null if under optRepeat.
*pArr = nullptr;
return;
}
#endif // DEBUG
noway_assert(!"fldSeqIter is NotAField() in ParseArrayAddress");
}
if (!FieldSeqStore::IsPseudoField(fldSeqIter->GetFieldHandleValue()))
{
if (*pFldSeq == nullptr)
{
*pFldSeq = fldSeqIter;
}
CORINFO_CLASS_HANDLE fldCls = nullptr;
noway_assert(fldSeqIter->GetFieldHandle() != NO_FIELD_HANDLE);
CorInfoType cit = comp->info.compCompHnd->getFieldType(fldSeqIter->GetFieldHandle(), &fldCls);
fieldOffsets += comp->compGetTypeSize(cit, fldCls);
}
fldSeqIter = fldSeqIter->GetNext();
}
// Is there some portion of the "offset" beyond the first-elem offset and the struct field suffix we just computed?
if (!FitsIn<target_ssize_t>(fieldOffsets + arrayInfo->m_elemOffset) ||
!FitsIn<target_ssize_t>(arrayInfo->m_elemSize))
{
// This seems unlikely, but no harm in being safe...
*pInxVN = comp->GetValueNumStore()->VNForExpr(nullptr, TYP_INT);
return;
}
// Otherwise...
target_ssize_t offsetAccountedFor = static_cast<target_ssize_t>(fieldOffsets + arrayInfo->m_elemOffset);
target_ssize_t elemSize = static_cast<target_ssize_t>(arrayInfo->m_elemSize);
target_ssize_t constIndOffset = offset - offsetAccountedFor;
// This should be divisible by the element size...
assert((constIndOffset % elemSize) == 0);
target_ssize_t constInd = constIndOffset / elemSize;
ValueNumStore* vnStore = comp->GetValueNumStore();
if (inxVN == ValueNumStore::NoVN)
{
// Must be a constant index.
*pInxVN = vnStore->VNForPtrSizeIntCon(constInd);
}
else
{
//
// Perform ((inxVN / elemSizeVN) + vnForConstInd)
//
// The value associated with the index value number (inxVN) is the offset into the array,
// which has been scaled by element size. We need to recover the array index from that offset
if (vnStore->IsVNConstant(inxVN))
{
target_ssize_t index = vnStore->CoercedConstantValue<target_ssize_t>(inxVN);
noway_assert(elemSize > 0 && ((index % elemSize) == 0));
*pInxVN = vnStore->VNForPtrSizeIntCon((index / elemSize) + constInd);
}
else
{
bool canFoldDiv = false;
// If the index VN is a MUL by elemSize, see if we can eliminate it instead of adding
// the division by elemSize.
VNFuncApp funcApp;
if (vnStore->GetVNFunc(inxVN, &funcApp) && funcApp.m_func == (VNFunc)GT_MUL)
{
ValueNum vnForElemSize = vnStore->VNForLongCon(elemSize);
// One of the multiply operand is elemSize, so the resulting
// index VN should simply be the other operand.
if (funcApp.m_args[1] == vnForElemSize)
{
*pInxVN = funcApp.m_args[0];
canFoldDiv = true;
}
else if (funcApp.m_args[0] == vnForElemSize)
{
*pInxVN = funcApp.m_args[1];
canFoldDiv = true;
}
}
// Perform ((inxVN / elemSizeVN) + vnForConstInd)
if (!canFoldDiv)
{
ValueNum vnForElemSize = vnStore->VNForPtrSizeIntCon(elemSize);
ValueNum vnForScaledInx = vnStore->VNForFunc(TYP_I_IMPL, VNFunc(GT_DIV), inxVN, vnForElemSize);
*pInxVN = vnForScaledInx;
}
if (constInd != 0)
{
ValueNum vnForConstInd = comp->GetValueNumStore()->VNForPtrSizeIntCon(constInd);
VNFunc vnFunc = VNFunc(GT_ADD);
*pInxVN = comp->GetValueNumStore()->VNForFunc(TYP_I_IMPL, vnFunc, *pInxVN, vnForConstInd);
}
}
}
}
void GenTree::ParseArrayAddressWork(Compiler* comp,
target_ssize_t inputMul,
GenTree** pArr,
ValueNum* pInxVN,
target_ssize_t* pOffset,
FieldSeqNode** pFldSeq)
{
if (TypeGet() == TYP_REF)
{
// This must be the array pointer.
*pArr = this;
assert(inputMul == 1); // Can't multiply the array pointer by anything.
}
else
{
switch (OperGet())
{
case GT_CNS_INT:
*pFldSeq = comp->GetFieldSeqStore()->Append(*pFldSeq, AsIntCon()->gtFieldSeq);
assert(!AsIntCon()->ImmedValNeedsReloc(comp));
// TODO-CrossBitness: we wouldn't need the cast below if GenTreeIntCon::gtIconVal had target_ssize_t
// type.
*pOffset += (inputMul * (target_ssize_t)(AsIntCon()->gtIconVal));
return;
case GT_ADD:
case GT_SUB:
AsOp()->gtOp1->ParseArrayAddressWork(comp, inputMul, pArr, pInxVN, pOffset, pFldSeq);
if (OperGet() == GT_SUB)
{
inputMul = -inputMul;
}
AsOp()->gtOp2->ParseArrayAddressWork(comp, inputMul, pArr, pInxVN, pOffset, pFldSeq);
return;
case GT_MUL:
{
// If one op is a constant, continue parsing down.
target_ssize_t subMul = 0;
GenTree* nonConst = nullptr;
if (AsOp()->gtOp1->IsCnsIntOrI())
{
// If the other arg is an int constant, and is a "not-a-field", choose
// that as the multiplier, thus preserving constant index offsets...
if (AsOp()->gtOp2->OperGet() == GT_CNS_INT &&
AsOp()->gtOp2->AsIntCon()->gtFieldSeq == FieldSeqStore::NotAField())
{
assert(!AsOp()->gtOp2->AsIntCon()->ImmedValNeedsReloc(comp));
// TODO-CrossBitness: we wouldn't need the cast below if GenTreeIntConCommon::gtIconVal had
// target_ssize_t type.
subMul = (target_ssize_t)AsOp()->gtOp2->AsIntConCommon()->IconValue();
nonConst = AsOp()->gtOp1;
}
else
{
assert(!AsOp()->gtOp1->AsIntCon()->ImmedValNeedsReloc(comp));
// TODO-CrossBitness: we wouldn't need the cast below if GenTreeIntConCommon::gtIconVal had
// target_ssize_t type.
subMul = (target_ssize_t)AsOp()->gtOp1->AsIntConCommon()->IconValue();
nonConst = AsOp()->gtOp2;
}
}
else if (AsOp()->gtOp2->IsCnsIntOrI())
{
assert(!AsOp()->gtOp2->AsIntCon()->ImmedValNeedsReloc(comp));
// TODO-CrossBitness: we wouldn't need the cast below if GenTreeIntConCommon::gtIconVal had
// target_ssize_t type.
subMul = (target_ssize_t)AsOp()->gtOp2->AsIntConCommon()->IconValue();
nonConst = AsOp()->gtOp1;
}
if (nonConst != nullptr)
{
nonConst->ParseArrayAddressWork(comp, inputMul * subMul, pArr, pInxVN, pOffset, pFldSeq);
return;
}
// Otherwise, exit the switch, treat as a contribution to the index.
}
break;
case GT_LSH:
// If one op is a constant, continue parsing down.
if (AsOp()->gtOp2->IsCnsIntOrI())
{
assert(!AsOp()->gtOp2->AsIntCon()->ImmedValNeedsReloc(comp));
// TODO-CrossBitness: we wouldn't need the cast below if GenTreeIntCon::gtIconVal had target_ssize_t
// type.
target_ssize_t shiftVal = (target_ssize_t)AsOp()->gtOp2->AsIntConCommon()->IconValue();
target_ssize_t subMul = target_ssize_t{1} << shiftVal;
AsOp()->gtOp1->ParseArrayAddressWork(comp, inputMul * subMul, pArr, pInxVN, pOffset, pFldSeq);
return;
}
// Otherwise, exit the switch, treat as a contribution to the index.
break;
case GT_COMMA:
// We don't care about exceptions for this purpose.
if (AsOp()->gtOp1->OperIs(GT_BOUNDS_CHECK) || AsOp()->gtOp1->IsNothingNode())
{
AsOp()->gtOp2->ParseArrayAddressWork(comp, inputMul, pArr, pInxVN, pOffset, pFldSeq);
return;
}
break;
default:
break;
}
// If we didn't return above, must be a contribution to the non-constant part of the index VN.
ValueNum vn = comp->GetValueNumStore()->VNLiberalNormalValue(gtVNPair);
if (inputMul != 1)
{
ValueNum mulVN = comp->GetValueNumStore()->VNForLongCon(inputMul);
vn = comp->GetValueNumStore()->VNForFunc(TypeGet(), VNFunc(GT_MUL), mulVN, vn);
}
if (*pInxVN == ValueNumStore::NoVN)
{
*pInxVN = vn;
}
else
{
*pInxVN = comp->GetValueNumStore()->VNForFunc(TypeGet(), VNFunc(GT_ADD), *pInxVN, vn);
}
}
}
bool GenTree::ParseArrayElemForm(Compiler* comp, ArrayInfo* arrayInfo, FieldSeqNode** pFldSeq)
{
if (OperIsIndir())
{
if (gtFlags & GTF_IND_ARR_INDEX)
{
bool b = comp->GetArrayInfoMap()->Lookup(this, arrayInfo);
assert(b);
return true;
}
// Otherwise...
GenTree* addr = AsIndir()->Addr();
return addr->ParseArrayElemAddrForm(comp, arrayInfo, pFldSeq);
}
else
{
return false;
}
}
bool GenTree::ParseArrayElemAddrForm(Compiler* comp, ArrayInfo* arrayInfo, FieldSeqNode** pFldSeq)
{
switch (OperGet())
{
case GT_ADD:
{
GenTree* arrAddr = nullptr;
GenTree* offset = nullptr;
if (AsOp()->gtOp1->TypeGet() == TYP_BYREF)
{
arrAddr = AsOp()->gtOp1;
offset = AsOp()->gtOp2;
}
else if (AsOp()->gtOp2->TypeGet() == TYP_BYREF)
{
arrAddr = AsOp()->gtOp2;
offset = AsOp()->gtOp1;
}
else
{
return false;
}
if (!offset->ParseOffsetForm(comp, pFldSeq))
{
return false;
}
return arrAddr->ParseArrayElemAddrForm(comp, arrayInfo, pFldSeq);
}
case GT_ADDR:
{
GenTree* addrArg = AsOp()->gtOp1;
if (addrArg->OperGet() != GT_IND)
{
return false;
}
else
{
// The "Addr" node might be annotated with a zero-offset field sequence.
FieldSeqNode* zeroOffsetFldSeq = nullptr;
if (comp->GetZeroOffsetFieldMap()->Lookup(this, &zeroOffsetFldSeq))
{
*pFldSeq = comp->GetFieldSeqStore()->Append(*pFldSeq, zeroOffsetFldSeq);
}
return addrArg->ParseArrayElemForm(comp, arrayInfo, pFldSeq);
}
}
default:
return false;
}
}
bool GenTree::ParseOffsetForm(Compiler* comp, FieldSeqNode** pFldSeq)
{
switch (OperGet())
{
case GT_CNS_INT:
{
GenTreeIntCon* icon = AsIntCon();
*pFldSeq = comp->GetFieldSeqStore()->Append(*pFldSeq, icon->gtFieldSeq);
return true;
}
case GT_ADD:
if (!AsOp()->gtOp1->ParseOffsetForm(comp, pFldSeq))
{
return false;
}
return AsOp()->gtOp2->ParseOffsetForm(comp, pFldSeq);
default:
return false;
}
}
void GenTree::LabelIndex(Compiler* comp, bool isConst)
{
switch (OperGet())
{
case GT_CNS_INT:
// If we got here, this is a contribution to the constant part of the index.
if (isConst)
{
AsIntCon()->gtFieldSeq =
comp->GetFieldSeqStore()->CreateSingleton(FieldSeqStore::ConstantIndexPseudoField);
}
return;
case GT_LCL_VAR:
gtFlags |= GTF_VAR_ARR_INDEX;
return;
case GT_ADD:
case GT_SUB:
AsOp()->gtOp1->LabelIndex(comp, isConst);
AsOp()->gtOp2->LabelIndex(comp, isConst);
break;
case GT_CAST:
AsOp()->gtOp1->LabelIndex(comp, isConst);
break;
case GT_ARR_LENGTH:
gtFlags |= GTF_ARRLEN_ARR_IDX;
return;
default:
// For all other operators, peel off one constant; and then label the other if it's also a constant.
if (OperIsArithmetic() || OperIsCompare())
{
if (AsOp()->gtOp2->OperGet() == GT_CNS_INT)
{
AsOp()->gtOp1->LabelIndex(comp, isConst);
break;
}
else if (AsOp()->gtOp1->OperGet() == GT_CNS_INT)
{
AsOp()->gtOp2->LabelIndex(comp, isConst);
break;
}
// Otherwise continue downward on both, labeling vars.
AsOp()->gtOp1->LabelIndex(comp, false);
AsOp()->gtOp2->LabelIndex(comp, false);
}
break;
}
}
// Note that the value of the below field doesn't matter; it exists only to provide a distinguished address.
//
// static
FieldSeqNode FieldSeqStore::s_notAField(nullptr, nullptr, FieldSeqNode::FieldKind::Instance);
// FieldSeqStore methods.
FieldSeqStore::FieldSeqStore(CompAllocator alloc) : m_alloc(alloc), m_canonMap(new (alloc) FieldSeqNodeCanonMap(alloc))
{
}
FieldSeqNode* FieldSeqStore::CreateSingleton(CORINFO_FIELD_HANDLE fieldHnd, FieldSeqNode::FieldKind fieldKind)
{
FieldSeqNode fsn(fieldHnd, nullptr, fieldKind);
FieldSeqNode* res = nullptr;
if (m_canonMap->Lookup(fsn, &res))
{
return res;
}
else
{
res = m_alloc.allocate<FieldSeqNode>(1);
*res = fsn;
m_canonMap->Set(fsn, res);
return res;
}
}
FieldSeqNode* FieldSeqStore::Append(FieldSeqNode* a, FieldSeqNode* b)
{
if (a == nullptr)
{
return b;
}
else if (a == NotAField())
{
return NotAField();
}
else if (b == nullptr)
{
return a;
}
else if (b == NotAField())
{
return NotAField();
// Extremely special case for ConstantIndex pseudo-fields -- appending consecutive such
// together collapse to one.
}
else if (a->GetNext() == nullptr && a->GetFieldHandleValue() == ConstantIndexPseudoField &&
b->GetFieldHandleValue() == ConstantIndexPseudoField)
{
return b;
}
else
{
// We should never add a duplicate FieldSeqNode
assert(a != b);
FieldSeqNode* tmp = Append(a->GetNext(), b);
FieldSeqNode fsn(a->GetFieldHandleValue(), tmp, a->GetKind());
FieldSeqNode* res = nullptr;
if (m_canonMap->Lookup(fsn, &res))
{
return res;
}
else
{
res = m_alloc.allocate<FieldSeqNode>(1);
*res = fsn;
m_canonMap->Set(fsn, res);
return res;
}
}
}
// Static vars.
int FieldSeqStore::FirstElemPseudoFieldStruct;
int FieldSeqStore::ConstantIndexPseudoFieldStruct;
CORINFO_FIELD_HANDLE FieldSeqStore::FirstElemPseudoField =
(CORINFO_FIELD_HANDLE)&FieldSeqStore::FirstElemPseudoFieldStruct;
CORINFO_FIELD_HANDLE FieldSeqStore::ConstantIndexPseudoField =
(CORINFO_FIELD_HANDLE)&FieldSeqStore::ConstantIndexPseudoFieldStruct;
FieldSeqNode::FieldSeqNode(CORINFO_FIELD_HANDLE fieldHnd, FieldSeqNode* next, FieldKind fieldKind) : m_next(next)
{
uintptr_t handleValue = reinterpret_cast<uintptr_t>(fieldHnd);
assert((handleValue & FIELD_KIND_MASK) == 0);
m_fieldHandleAndKind = handleValue | static_cast<uintptr_t>(fieldKind);
if (!FieldSeqStore::IsPseudoField(fieldHnd) && (fieldHnd != NO_FIELD_HANDLE))
{
assert(JitTls::GetCompiler()->eeIsFieldStatic(fieldHnd) == IsStaticField());
}
else
{
// Use the default for pseudo-fields.
assert(fieldKind == FieldKind::Instance);
}
}
bool FieldSeqNode::IsFirstElemFieldSeq() const
{
return GetFieldHandleValue() == FieldSeqStore::FirstElemPseudoField;
}
bool FieldSeqNode::IsConstantIndexFieldSeq() const
{
return GetFieldHandleValue() == FieldSeqStore::ConstantIndexPseudoField;
}
bool FieldSeqNode::IsPseudoField() const
{
return (GetFieldHandleValue() == FieldSeqStore::FirstElemPseudoField) ||
(GetFieldHandleValue() == FieldSeqStore::ConstantIndexPseudoField);
}
#ifdef FEATURE_SIMD
GenTreeSIMD* Compiler::gtNewSIMDNode(
var_types type, GenTree* op1, SIMDIntrinsicID simdIntrinsicID, CorInfoType simdBaseJitType, unsigned simdSize)
{
assert(op1 != nullptr);
SetOpLclRelatedToSIMDIntrinsic(op1);
GenTreeSIMD* simdNode = new (this, GT_SIMD)
GenTreeSIMD(type, getAllocator(CMK_ASTNode), op1, simdIntrinsicID, simdBaseJitType, simdSize);
return simdNode;
}
GenTreeSIMD* Compiler::gtNewSIMDNode(var_types type,
GenTree* op1,
GenTree* op2,
SIMDIntrinsicID simdIntrinsicID,
CorInfoType simdBaseJitType,
unsigned simdSize)
{
assert(op1 != nullptr);
SetOpLclRelatedToSIMDIntrinsic(op1);
SetOpLclRelatedToSIMDIntrinsic(op2);
GenTreeSIMD* simdNode = new (this, GT_SIMD)
GenTreeSIMD(type, getAllocator(CMK_ASTNode), op1, op2, simdIntrinsicID, simdBaseJitType, simdSize);
return simdNode;
}
//-------------------------------------------------------------------
// SetOpLclRelatedToSIMDIntrinsic: Determine if the tree has a local var that needs to be set
// as used by a SIMD intrinsic, and if so, set that local var appropriately.
//
// Arguments:
// op - The tree, to be an operand of a new GT_SIMD node, to check.
//
void Compiler::SetOpLclRelatedToSIMDIntrinsic(GenTree* op)
{
if (op == nullptr)
{
return;
}
if (op->OperIsLocal())
{
setLclRelatedToSIMDIntrinsic(op);
}
else if (op->OperIs(GT_OBJ))
{
GenTree* addr = op->AsIndir()->Addr();
if (addr->OperIs(GT_ADDR))
{
GenTree* addrOp1 = addr->AsOp()->gtGetOp1();
if (addrOp1->OperIsLocal())
{
setLclRelatedToSIMDIntrinsic(addrOp1);
}
}
}
}
bool GenTree::isCommutativeSIMDIntrinsic()
{
assert(gtOper == GT_SIMD);
switch (AsSIMD()->GetSIMDIntrinsicId())
{
case SIMDIntrinsicBitwiseAnd:
case SIMDIntrinsicBitwiseOr:
case SIMDIntrinsicEqual:
return true;
default:
return false;
}
}
void GenTreeMultiOp::ResetOperandArray(size_t newOperandCount,
Compiler* compiler,
GenTree** inlineOperands,
size_t inlineOperandCount)
{
size_t oldOperandCount = GetOperandCount();
GenTree** oldOperands = GetOperandArray();
if (newOperandCount > oldOperandCount)
{
if (newOperandCount <= inlineOperandCount)
{
assert(oldOperandCount <= inlineOperandCount);
assert(oldOperands == inlineOperands);
}
else
{
// The most difficult case: we need to recreate the dynamic array.
assert(compiler != nullptr);
m_operands = compiler->getAllocator(CMK_ASTNode).allocate<GenTree*>(newOperandCount);
}
}
else
{
// We are shrinking the array and may in process switch to an inline representation.
// We choose to do so for simplicity ("if a node has <= InlineOperandCount operands,
// then it stores them inline"), but actually it may be more profitable to not do that,
// it will save us a copy and a potential cache miss (though the latter seems unlikely).
if ((newOperandCount <= inlineOperandCount) && (oldOperands != inlineOperands))
{
m_operands = inlineOperands;
}
}
#ifdef DEBUG
for (size_t i = 0; i < newOperandCount; i++)
{
m_operands[i] = nullptr;
}
#endif // DEBUG
SetOperandCount(newOperandCount);
}
/* static */ bool GenTreeMultiOp::OperandsAreEqual(GenTreeMultiOp* op1, GenTreeMultiOp* op2)
{
if (op1->GetOperandCount() != op2->GetOperandCount())
{
return false;
}
for (size_t i = 1; i <= op1->GetOperandCount(); i++)
{
if (!Compare(op1->Op(i), op2->Op(i)))
{
return false;
}
}
return true;
}
void GenTreeMultiOp::InitializeOperands(GenTree** operands, size_t operandCount)
{
for (size_t i = 0; i < operandCount; i++)
{
m_operands[i] = operands[i];
gtFlags |= (operands[i]->gtFlags & GTF_ALL_EFFECT);
}
SetOperandCount(operandCount);
}
var_types GenTreeJitIntrinsic::GetAuxiliaryType() const
{
CorInfoType auxiliaryJitType = GetAuxiliaryJitType();
if (auxiliaryJitType == CORINFO_TYPE_UNDEF)
{
return TYP_UNKNOWN;
}
return JitType2PreciseVarType(auxiliaryJitType);
}
var_types GenTreeJitIntrinsic::GetSimdBaseType() const
{
CorInfoType simdBaseJitType = GetSimdBaseJitType();
if (simdBaseJitType == CORINFO_TYPE_UNDEF)
{
return TYP_UNKNOWN;
}
return JitType2PreciseVarType(simdBaseJitType);
}
// Returns true for the SIMD Intrinsic instructions that have MemoryLoad semantics, false otherwise
bool GenTreeSIMD::OperIsMemoryLoad() const
{
if (GetSIMDIntrinsicId() == SIMDIntrinsicInitArray)
{
return true;
}
return false;
}
// TODO-Review: why are layouts not compared here?
/* static */ bool GenTreeSIMD::Equals(GenTreeSIMD* op1, GenTreeSIMD* op2)
{
return (op1->TypeGet() == op2->TypeGet()) && (op1->GetSIMDIntrinsicId() == op2->GetSIMDIntrinsicId()) &&
(op1->GetSimdBaseType() == op2->GetSimdBaseType()) && (op1->GetSimdSize() == op2->GetSimdSize()) &&
OperandsAreEqual(op1, op2);
}
#endif // FEATURE_SIMD
#ifdef FEATURE_HW_INTRINSICS
bool GenTree::isCommutativeHWIntrinsic() const
{
assert(gtOper == GT_HWINTRINSIC);
#ifdef TARGET_XARCH
return HWIntrinsicInfo::IsCommutative(AsHWIntrinsic()->GetHWIntrinsicId());
#else
return false;
#endif // TARGET_XARCH
}
bool GenTree::isContainableHWIntrinsic() const
{
assert(gtOper == GT_HWINTRINSIC);
#ifdef TARGET_XARCH
switch (AsHWIntrinsic()->GetHWIntrinsicId())
{
case NI_SSE_LoadAlignedVector128:
case NI_SSE_LoadScalarVector128:
case NI_SSE_LoadVector128:
case NI_SSE2_LoadAlignedVector128:
case NI_SSE2_LoadScalarVector128:
case NI_SSE2_LoadVector128:
case NI_AVX_LoadAlignedVector256:
case NI_AVX_LoadVector256:
case NI_AVX_ExtractVector128:
case NI_AVX2_ExtractVector128:
{
return true;
}
default:
{
return false;
}
}
#elif TARGET_ARM64
switch (AsHWIntrinsic()->GetHWIntrinsicId())
{
case NI_Vector64_get_Zero:
case NI_Vector128_get_Zero:
{
return true;
}
default:
{
return false;
}
}
#else
return false;
#endif // TARGET_XARCH
}
bool GenTree::isRMWHWIntrinsic(Compiler* comp)
{
assert(gtOper == GT_HWINTRINSIC);
assert(comp != nullptr);
#if defined(TARGET_XARCH)
if (!comp->canUseVexEncoding())
{
return HWIntrinsicInfo::HasRMWSemantics(AsHWIntrinsic()->GetHWIntrinsicId());
}
switch (AsHWIntrinsic()->GetHWIntrinsicId())
{
// TODO-XArch-Cleanup: Move this switch block to be table driven.
case NI_SSE42_Crc32:
case NI_SSE42_X64_Crc32:
case NI_FMA_MultiplyAdd:
case NI_FMA_MultiplyAddNegated:
case NI_FMA_MultiplyAddNegatedScalar:
case NI_FMA_MultiplyAddScalar:
case NI_FMA_MultiplyAddSubtract:
case NI_FMA_MultiplySubtract:
case NI_FMA_MultiplySubtractAdd:
case NI_FMA_MultiplySubtractNegated:
case NI_FMA_MultiplySubtractNegatedScalar:
case NI_FMA_MultiplySubtractScalar:
{
return true;
}
default:
{
return false;
}
}
#elif defined(TARGET_ARM64)
return HWIntrinsicInfo::HasRMWSemantics(AsHWIntrinsic()->GetHWIntrinsicId());
#else
return false;
#endif
}
GenTreeHWIntrinsic* Compiler::gtNewSimdHWIntrinsicNode(var_types type,
NamedIntrinsic hwIntrinsicID,
CorInfoType simdBaseJitType,
unsigned simdSize,
bool isSimdAsHWIntrinsic)
{
return new (this, GT_HWINTRINSIC) GenTreeHWIntrinsic(type, getAllocator(CMK_ASTNode), hwIntrinsicID,
simdBaseJitType, simdSize, isSimdAsHWIntrinsic);
}
GenTreeHWIntrinsic* Compiler::gtNewSimdHWIntrinsicNode(var_types type,
GenTree* op1,
NamedIntrinsic hwIntrinsicID,
CorInfoType simdBaseJitType,
unsigned simdSize,
bool isSimdAsHWIntrinsic)
{
SetOpLclRelatedToSIMDIntrinsic(op1);
return new (this, GT_HWINTRINSIC) GenTreeHWIntrinsic(type, getAllocator(CMK_ASTNode), hwIntrinsicID,
simdBaseJitType, simdSize, isSimdAsHWIntrinsic, op1);
}
GenTreeHWIntrinsic* Compiler::gtNewSimdHWIntrinsicNode(var_types type,
GenTree* op1,
GenTree* op2,
NamedIntrinsic hwIntrinsicID,
CorInfoType simdBaseJitType,
unsigned simdSize,
bool isSimdAsHWIntrinsic)
{
SetOpLclRelatedToSIMDIntrinsic(op1);
SetOpLclRelatedToSIMDIntrinsic(op2);
return new (this, GT_HWINTRINSIC) GenTreeHWIntrinsic(type, getAllocator(CMK_ASTNode), hwIntrinsicID,
simdBaseJitType, simdSize, isSimdAsHWIntrinsic, op1, op2);
}
GenTreeHWIntrinsic* Compiler::gtNewSimdHWIntrinsicNode(var_types type,
GenTree* op1,
GenTree* op2,
GenTree* op3,
NamedIntrinsic hwIntrinsicID,
CorInfoType simdBaseJitType,
unsigned simdSize,
bool isSimdAsHWIntrinsic)
{
SetOpLclRelatedToSIMDIntrinsic(op1);
SetOpLclRelatedToSIMDIntrinsic(op2);
SetOpLclRelatedToSIMDIntrinsic(op3);
return new (this, GT_HWINTRINSIC) GenTreeHWIntrinsic(type, getAllocator(CMK_ASTNode), hwIntrinsicID,
simdBaseJitType, simdSize, isSimdAsHWIntrinsic, op1, op2, op3);
}
GenTreeHWIntrinsic* Compiler::gtNewSimdHWIntrinsicNode(var_types type,
GenTree* op1,
GenTree* op2,
GenTree* op3,
GenTree* op4,
NamedIntrinsic hwIntrinsicID,
CorInfoType simdBaseJitType,
unsigned simdSize,
bool isSimdAsHWIntrinsic)
{
SetOpLclRelatedToSIMDIntrinsic(op1);
SetOpLclRelatedToSIMDIntrinsic(op2);
SetOpLclRelatedToSIMDIntrinsic(op3);
SetOpLclRelatedToSIMDIntrinsic(op4);
return new (this, GT_HWINTRINSIC)
GenTreeHWIntrinsic(type, getAllocator(CMK_ASTNode), hwIntrinsicID, simdBaseJitType, simdSize,
isSimdAsHWIntrinsic, op1, op2, op3, op4);
}
GenTreeHWIntrinsic* Compiler::gtNewSimdHWIntrinsicNode(var_types type,
GenTree** operands,
size_t operandCount,
NamedIntrinsic hwIntrinsicID,
CorInfoType simdBaseJitType,
unsigned simdSize,
bool isSimdAsHWIntrinsic)
{
IntrinsicNodeBuilder nodeBuilder(getAllocator(CMK_ASTNode), operandCount);
for (size_t i = 0; i < operandCount; i++)
{
nodeBuilder.AddOperand(i, operands[i]);
SetOpLclRelatedToSIMDIntrinsic(operands[i]);
}
return new (this, GT_HWINTRINSIC)
GenTreeHWIntrinsic(type, std::move(nodeBuilder), hwIntrinsicID, simdBaseJitType, simdSize, isSimdAsHWIntrinsic);
}
GenTreeHWIntrinsic* Compiler::gtNewSimdHWIntrinsicNode(var_types type,
IntrinsicNodeBuilder&& nodeBuilder,
NamedIntrinsic hwIntrinsicID,
CorInfoType simdBaseJitType,
unsigned simdSize,
bool isSimdAsHWIntrinsic)
{
for (size_t i = 0; i < nodeBuilder.GetOperandCount(); i++)
{
SetOpLclRelatedToSIMDIntrinsic(nodeBuilder.GetOperand(i));
}
return new (this, GT_HWINTRINSIC)
GenTreeHWIntrinsic(type, std::move(nodeBuilder), hwIntrinsicID, simdBaseJitType, simdSize, isSimdAsHWIntrinsic);
}
GenTree* Compiler::gtNewSimdAbsNode(
var_types type, GenTree* op1, CorInfoType simdBaseJitType, unsigned simdSize, bool isSimdAsHWIntrinsic)
{
assert(IsBaselineSimdIsaSupportedDebugOnly());
assert(varTypeIsSIMD(type));
assert(getSIMDTypeForSize(simdSize) == type);
assert(op1 != nullptr);
assert(op1->TypeGet() == type);
var_types simdBaseType = JitType2PreciseVarType(simdBaseJitType);
assert(varTypeIsArithmetic(simdBaseType));
if (varTypeIsUnsigned(simdBaseType))
{
return op1;
}
#if defined(TARGET_XARCH)
if (varTypeIsFloating(simdBaseType))
{
// Abs(v) = v & ~new vector<T>(-0.0);
assert((simdSize != 32) || compIsaSupportedDebugOnly(InstructionSet_AVX));
GenTree* bitMask = gtNewDconNode(-0.0, simdBaseType);
bitMask = gtNewSimdCreateBroadcastNode(type, bitMask, simdBaseJitType, simdSize, isSimdAsHWIntrinsic);
return gtNewSimdBinOpNode(GT_AND_NOT, type, op1, bitMask, simdBaseJitType, simdSize, isSimdAsHWIntrinsic);
}
assert((simdSize != 32) || compIsaSupportedDebugOnly(InstructionSet_AVX2));
if ((simdBaseType != TYP_LONG) && ((simdSize == 32) || compOpportunisticallyDependsOn(InstructionSet_SSSE3)))
{
NamedIntrinsic intrinsic = (simdSize == 32) ? NI_AVX2_Abs : NI_SSSE3_Abs;
return gtNewSimdHWIntrinsicNode(type, op1, intrinsic, simdBaseJitType, simdSize, isSimdAsHWIntrinsic);
}
else
{
GenTree* tmp;
CORINFO_CLASS_HANDLE clsHnd = gtGetStructHandleForSIMD(type, simdBaseJitType);
GenTree* op1Dup1;
op1 = impCloneExpr(op1, &op1Dup1, clsHnd, (unsigned)CHECK_SPILL_ALL,
nullptr DEBUGARG("Clone op1 for vector abs"));
GenTree* op1Dup2;
op1Dup1 = impCloneExpr(op1Dup1, &op1Dup2, clsHnd, (unsigned)CHECK_SPILL_ALL,
nullptr DEBUGARG("Clone op1 for vector abs"));
// op1 = op1 < Zero
tmp = gtNewSimdZeroNode(type, simdBaseJitType, simdSize, isSimdAsHWIntrinsic);
op1 = gtNewSimdCmpOpNode(GT_LT, type, op1, tmp, simdBaseJitType, simdSize, isSimdAsHWIntrinsic);
// tmp = Zero - op1Dup1
tmp = gtNewSimdZeroNode(type, simdBaseJitType, simdSize, isSimdAsHWIntrinsic);
tmp = gtNewSimdBinOpNode(GT_SUB, type, tmp, op1Dup1, simdBaseJitType, simdSize, isSimdAsHWIntrinsic);
// result = ConditionalSelect(op1, tmp, op1Dup2)
return gtNewSimdCndSelNode(type, op1, tmp, op1Dup2, simdBaseJitType, simdSize, isSimdAsHWIntrinsic);
}
#elif defined(TARGET_ARM64)
NamedIntrinsic intrinsic = NI_AdvSimd_Abs;
if (simdBaseType == TYP_DOUBLE)
{
intrinsic = (simdSize == 8) ? NI_AdvSimd_AbsScalar : NI_AdvSimd_Arm64_Abs;
}
else if (varTypeIsLong(simdBaseType))
{
intrinsic = (simdSize == 8) ? NI_AdvSimd_Arm64_AbsScalar : NI_AdvSimd_Arm64_Abs;
}
assert(intrinsic != NI_Illegal);
return gtNewSimdHWIntrinsicNode(type, op1, intrinsic, simdBaseJitType, simdSize, isSimdAsHWIntrinsic);
#else
#error Unsupported platform
#endif
}
GenTree* Compiler::gtNewSimdBinOpNode(genTreeOps op,
var_types type,
GenTree* op1,
GenTree* op2,
CorInfoType simdBaseJitType,
unsigned simdSize,
bool isSimdAsHWIntrinsic)
{
assert(IsBaselineSimdIsaSupportedDebugOnly());
assert(varTypeIsSIMD(type));
assert(getSIMDTypeForSize(simdSize) == type);
var_types simdBaseType = JitType2PreciseVarType(simdBaseJitType);
assert(varTypeIsArithmetic(simdBaseType));
assert(op1 != nullptr);
assert(op1->TypeIs(type, simdBaseType, genActualType(simdBaseType)));
assert(op2 != nullptr);
if ((op == GT_LSH) || (op == GT_RSH) || (op == GT_RSZ))
{
assert(op2->TypeIs(TYP_INT));
}
else
{
assert(op2->TypeIs(type, simdBaseType, genActualType(simdBaseType)));
}
NamedIntrinsic intrinsic = NI_Illegal;
CORINFO_CLASS_HANDLE clsHnd = gtGetStructHandleForSIMD(type, simdBaseJitType);
switch (op)
{
#if defined(TARGET_XARCH)
case GT_ADD:
{
if (simdSize == 32)
{
assert(compIsaSupportedDebugOnly(InstructionSet_AVX));
if (varTypeIsFloating(simdBaseType))
{
intrinsic = NI_AVX_Add;
}
else
{
assert(compIsaSupportedDebugOnly(InstructionSet_AVX2));
intrinsic = NI_AVX2_Add;
}
}
else if (simdBaseType == TYP_FLOAT)
{
intrinsic = NI_SSE_Add;
}
else
{
intrinsic = NI_SSE2_Add;
}
break;
}
case GT_AND:
{
if (simdSize == 32)
{
assert(compIsaSupportedDebugOnly(InstructionSet_AVX));
if (varTypeIsFloating(simdBaseType))
{
intrinsic = NI_AVX_And;
}
else if (compOpportunisticallyDependsOn(InstructionSet_AVX2))
{
intrinsic = NI_AVX2_And;
}
else
{
// Since this is a bitwise operation, we can still support it by lying
// about the type and doing the operation using a supported instruction
intrinsic = NI_AVX_And;
simdBaseJitType = CORINFO_TYPE_FLOAT;
}
}
else if (simdBaseType == TYP_FLOAT)
{
intrinsic = NI_SSE_And;
}
else
{
intrinsic = NI_SSE2_And;
}
break;
}
case GT_AND_NOT:
{
if (simdSize == 32)
{
assert(compIsaSupportedDebugOnly(InstructionSet_AVX));
if (varTypeIsFloating(simdBaseType))
{
intrinsic = NI_AVX_AndNot;
}
else if (compOpportunisticallyDependsOn(InstructionSet_AVX2))
{
intrinsic = NI_AVX2_AndNot;
}
else
{
// Since this is a bitwise operation, we can still support it by lying
// about the type and doing the operation using a supported instruction
intrinsic = NI_AVX_AndNot;
simdBaseJitType = CORINFO_TYPE_FLOAT;
}
}
else if (simdBaseType == TYP_FLOAT)
{
intrinsic = NI_SSE_AndNot;
}
else
{
intrinsic = NI_SSE2_AndNot;
}
// GT_AND_NOT expects `op1 & ~op2`, but xarch does `~op1 & op2`
std::swap(op1, op2);
break;
}
case GT_DIV:
{
// TODO-XARCH-CQ: We could support division by constant for integral types
assert(varTypeIsFloating(simdBaseType));
if (simdSize == 32)
{
assert(compIsaSupportedDebugOnly(InstructionSet_AVX));
intrinsic = NI_AVX_Divide;
}
else if (simdBaseType == TYP_FLOAT)
{
intrinsic = NI_SSE_Divide;
}
else
{
intrinsic = NI_SSE2_Divide;
}
break;
}
case GT_LSH:
case GT_RSH:
case GT_RSZ:
{
assert(!varTypeIsByte(simdBaseType));
assert(!varTypeIsFloating(simdBaseType));
assert((op != GT_RSH) || !varTypeIsUnsigned(simdBaseType));
// "over shifting" is platform specific behavior. We will match the C# behavior
// this requires we mask with (sizeof(T) * 8) - 1 which ensures the shift cannot
// exceed the number of bits available in `T`. This is roughly equivalent to
// x % (sizeof(T) * 8), but that is "more expensive" and only the same for unsigned
// inputs, where-as we have a signed-input and so negative values would differ.
unsigned shiftCountMask = (genTypeSize(simdBaseType) * 8) - 1;
if (op2->IsCnsIntOrI())
{
op2->AsIntCon()->gtIconVal &= shiftCountMask;
}
else
{
op2 = gtNewOperNode(GT_AND, TYP_INT, op2, gtNewIconNode(shiftCountMask));
op2 = gtNewSimdHWIntrinsicNode(TYP_SIMD16, op2, NI_SSE2_ConvertScalarToVector128Int32, CORINFO_TYPE_INT,
16, isSimdAsHWIntrinsic);
}
if (simdSize == 32)
{
assert(compIsaSupportedDebugOnly(InstructionSet_AVX2));
if (op == GT_LSH)
{
intrinsic = NI_AVX2_ShiftLeftLogical;
}
else if (op == GT_RSH)
{
intrinsic = NI_AVX2_ShiftRightArithmetic;
}
else
{
assert(op == GT_RSZ);
intrinsic = NI_AVX2_ShiftRightLogical;
}
}
else if (op == GT_LSH)
{
intrinsic = NI_SSE2_ShiftLeftLogical;
}
else if (op == GT_RSH)
{
intrinsic = NI_SSE2_ShiftRightArithmetic;
}
else
{
assert(op == GT_RSZ);
intrinsic = NI_SSE2_ShiftRightLogical;
}
break;
}
case GT_MUL:
{
GenTree** broadcastOp = nullptr;
if (varTypeIsArithmetic(op1))
{
broadcastOp = &op1;
}
else if (varTypeIsArithmetic(op2))
{
broadcastOp = &op2;
}
if (broadcastOp != nullptr)
{
*broadcastOp =
gtNewSimdCreateBroadcastNode(type, *broadcastOp, simdBaseJitType, simdSize, isSimdAsHWIntrinsic);
}
switch (simdBaseType)
{
case TYP_SHORT:
case TYP_USHORT:
{
if (simdSize == 32)
{
assert(compIsaSupportedDebugOnly(InstructionSet_AVX2));
intrinsic = NI_AVX2_MultiplyLow;
}
else
{
intrinsic = NI_SSE2_MultiplyLow;
}
break;
}
case TYP_INT:
case TYP_UINT:
{
if (simdSize == 32)
{
assert(compIsaSupportedDebugOnly(InstructionSet_AVX2));
intrinsic = NI_AVX2_MultiplyLow;
}
else if (compOpportunisticallyDependsOn(InstructionSet_SSE41))
{
intrinsic = NI_SSE41_MultiplyLow;
}
else
{
// op1Dup = op1
GenTree* op1Dup;
op1 = impCloneExpr(op1, &op1Dup, clsHnd, (unsigned)CHECK_SPILL_ALL,
nullptr DEBUGARG("Clone op1 for vector multiply"));
// op2Dup = op2
GenTree* op2Dup;
op2 = impCloneExpr(op2, &op2Dup, clsHnd, (unsigned)CHECK_SPILL_ALL,
nullptr DEBUGARG("Clone op2 for vector multiply"));
// op1 = Sse2.ShiftRightLogical128BitLane(op1, 4)
op1 = gtNewSimdHWIntrinsicNode(type, op1, gtNewIconNode(4, TYP_INT),
NI_SSE2_ShiftRightLogical128BitLane, simdBaseJitType, simdSize,
isSimdAsHWIntrinsic);
// op2 = Sse2.ShiftRightLogical128BitLane(op1, 4)
op2 = gtNewSimdHWIntrinsicNode(type, op2, gtNewIconNode(4, TYP_INT),
NI_SSE2_ShiftRightLogical128BitLane, simdBaseJitType, simdSize,
isSimdAsHWIntrinsic);
// op2 = Sse2.Multiply(op2.AsUInt32(), op1.AsUInt32()).AsInt32()
op2 = gtNewSimdHWIntrinsicNode(type, op2, op1, NI_SSE2_Multiply, CORINFO_TYPE_ULONG, simdSize,
isSimdAsHWIntrinsic);
// op2 = Sse2.Shuffle(op2, (0, 0, 2, 0))
op2 = gtNewSimdHWIntrinsicNode(type, op2, gtNewIconNode(SHUFFLE_XXZX, TYP_INT), NI_SSE2_Shuffle,
simdBaseJitType, simdSize, isSimdAsHWIntrinsic);
// op1 = Sse2.Multiply(op1Dup.AsUInt32(), op2Dup.AsUInt32()).AsInt32()
op1 = gtNewSimdHWIntrinsicNode(type, op1Dup, op2Dup, NI_SSE2_Multiply, CORINFO_TYPE_ULONG,
simdSize, isSimdAsHWIntrinsic);
// op1 = Sse2.Shuffle(op1, (0, 0, 2, 0))
op1 = gtNewSimdHWIntrinsicNode(type, op1, gtNewIconNode(SHUFFLE_XXZX, TYP_INT), NI_SSE2_Shuffle,
simdBaseJitType, simdSize, isSimdAsHWIntrinsic);
// result = Sse2.UnpackLow(op1, op2)
intrinsic = NI_SSE2_UnpackLow;
}
break;
}
case TYP_FLOAT:
{
if (simdSize == 32)
{
assert(compIsaSupportedDebugOnly(InstructionSet_AVX));
intrinsic = NI_AVX_Multiply;
}
else
{
intrinsic = NI_SSE_Multiply;
}
break;
}
case TYP_DOUBLE:
{
if (simdSize == 32)
{
assert(compIsaSupportedDebugOnly(InstructionSet_AVX));
intrinsic = NI_AVX_Multiply;
}
else
{
intrinsic = NI_SSE2_Multiply;
}
break;
}
default:
{
unreached();
}
}
break;
}
case GT_OR:
{
if (simdSize == 32)
{
assert(compIsaSupportedDebugOnly(InstructionSet_AVX));
if (varTypeIsFloating(simdBaseType))
{
intrinsic = NI_AVX_Or;
}
else if (compOpportunisticallyDependsOn(InstructionSet_AVX2))
{
intrinsic = NI_AVX2_Or;
}
else
{
// Since this is a bitwise operation, we can still support it by lying
// about the type and doing the operation using a supported instruction
intrinsic = NI_AVX_Or;
simdBaseJitType = CORINFO_TYPE_FLOAT;
}
}
else if (simdBaseType == TYP_FLOAT)
{
intrinsic = NI_SSE_Or;
}
else
{
intrinsic = NI_SSE2_Or;
}
break;
}
case GT_SUB:
{
if (simdSize == 32)
{
assert(compIsaSupportedDebugOnly(InstructionSet_AVX));
if (varTypeIsFloating(simdBaseType))
{
intrinsic = NI_AVX_Subtract;
}
else
{
assert(compIsaSupportedDebugOnly(InstructionSet_AVX2));
intrinsic = NI_AVX2_Subtract;
}
}
else if (simdBaseType == TYP_FLOAT)
{
intrinsic = NI_SSE_Subtract;
}
else
{
intrinsic = NI_SSE2_Subtract;
}
break;
}
case GT_XOR:
{
if (simdSize == 32)
{
assert(compIsaSupportedDebugOnly(InstructionSet_AVX));
if (varTypeIsFloating(simdBaseType))
{
intrinsic = NI_AVX_Xor;
}
else if (compOpportunisticallyDependsOn(InstructionSet_AVX2))
{
intrinsic = NI_AVX2_Xor;
}
else
{
// Since this is a bitwise operation, we can still support it by lying
// about the type and doing the operation using a supported instruction
intrinsic = NI_AVX_Xor;
simdBaseJitType = CORINFO_TYPE_FLOAT;
}
}
else if (simdBaseType == TYP_FLOAT)
{
intrinsic = NI_SSE_Xor;
}
else
{
intrinsic = NI_SSE2_Xor;
}
break;
}
#elif defined(TARGET_ARM64)
case GT_ADD:
{
if (simdBaseType == TYP_DOUBLE)
{
intrinsic = (simdSize == 8) ? NI_AdvSimd_AddScalar : NI_AdvSimd_Arm64_Add;
}
else if ((simdSize == 8) && varTypeIsLong(simdBaseType))
{
intrinsic = NI_AdvSimd_AddScalar;
}
else
{
intrinsic = NI_AdvSimd_Add;
}
break;
}
case GT_AND:
{
intrinsic = NI_AdvSimd_And;
break;
}
case GT_AND_NOT:
{
intrinsic = NI_AdvSimd_BitwiseClear;
break;
}
case GT_DIV:
{
// TODO-AARCH-CQ: We could support division by constant for integral types
assert(varTypeIsFloating(simdBaseType));
if ((simdSize == 8) && (simdBaseType == TYP_DOUBLE))
{
intrinsic = NI_AdvSimd_DivideScalar;
}
else
{
intrinsic = NI_AdvSimd_Arm64_Divide;
}
break;
}
case GT_LSH:
case GT_RSH:
case GT_RSZ:
{
assert(!varTypeIsFloating(simdBaseType));
assert((op != GT_RSH) || !varTypeIsUnsigned(simdBaseType));
// "over shifting" is platform specific behavior. We will match the C# behavior
// this requires we mask with (sizeof(T) * 8) - 1 which ensures the shift cannot
// exceed the number of bits available in `T`. This is roughly equivalent to
// x % (sizeof(T) * 8), but that is "more expensive" and only the same for unsigned
// inputs, where-as we have a signed-input and so negative values would differ.
unsigned shiftCountMask = (genTypeSize(simdBaseType) * 8) - 1;
if (op2->IsCnsIntOrI())
{
op2->AsIntCon()->gtIconVal &= shiftCountMask;
if ((simdSize == 8) && varTypeIsLong(simdBaseType))
{
if (op == GT_LSH)
{
intrinsic = NI_AdvSimd_ShiftLeftLogicalScalar;
}
else if (op == GT_RSH)
{
intrinsic = NI_AdvSimd_ShiftRightArithmeticScalar;
}
else
{
assert(op == GT_RSZ);
intrinsic = NI_AdvSimd_ShiftRightLogicalScalar;
}
}
else if (op == GT_LSH)
{
intrinsic = NI_AdvSimd_ShiftLeftLogical;
}
else if (op == GT_RSH)
{
intrinsic = NI_AdvSimd_ShiftRightArithmetic;
}
else
{
assert(op == GT_RSZ);
intrinsic = NI_AdvSimd_ShiftRightLogical;
}
}
else
{
op2 = gtNewOperNode(GT_AND, TYP_INT, op2, gtNewIconNode(shiftCountMask));
if (op != GT_LSH)
{
op2 = gtNewOperNode(GT_NEG, TYP_INT, op2);
}
op2 = gtNewSimdCreateBroadcastNode(type, op2, simdBaseJitType, simdSize, isSimdAsHWIntrinsic);
if ((simdSize == 8) && varTypeIsLong(simdBaseType))
{
if (op == GT_LSH)
{
intrinsic = NI_AdvSimd_ShiftLogicalScalar;
}
else if (op == GT_RSH)
{
intrinsic = NI_AdvSimd_ShiftArithmeticScalar;
}
else
{
intrinsic = NI_AdvSimd_ShiftLogicalScalar;
}
}
else if (op == GT_LSH)
{
intrinsic = NI_AdvSimd_ShiftLogical;
}
else if (op == GT_RSH)
{
intrinsic = NI_AdvSimd_ShiftArithmetic;
}
else
{
assert(op == GT_RSZ);
intrinsic = NI_AdvSimd_ShiftLogical;
}
}
break;
}
case GT_MUL:
{
assert(!varTypeIsLong(simdBaseType));
GenTree** scalarOp = nullptr;
if (varTypeIsArithmetic(op1))
{
// MultiplyByScalar requires the scalar op to be op2
std::swap(op1, op2);
scalarOp = &op2;
}
else if (varTypeIsArithmetic(op2))
{
scalarOp = &op2;
}
switch (JitType2PreciseVarType(simdBaseJitType))
{
case TYP_BYTE:
case TYP_UBYTE:
{
if (scalarOp != nullptr)
{
*scalarOp = gtNewSimdCreateBroadcastNode(type, *scalarOp, simdBaseJitType, simdSize,
isSimdAsHWIntrinsic);
}
intrinsic = NI_AdvSimd_Multiply;
break;
}
case TYP_SHORT:
case TYP_USHORT:
case TYP_INT:
case TYP_UINT:
case TYP_FLOAT:
{
if (scalarOp != nullptr)
{
intrinsic = NI_AdvSimd_MultiplyByScalar;
*scalarOp = gtNewSimdHWIntrinsicNode(TYP_SIMD8, *scalarOp, NI_Vector64_CreateScalarUnsafe,
simdBaseJitType, 8, isSimdAsHWIntrinsic);
}
else
{
intrinsic = NI_AdvSimd_Multiply;
}
break;
}
case TYP_DOUBLE:
{
if (scalarOp != nullptr)
{
intrinsic = NI_AdvSimd_Arm64_MultiplyByScalar;
*scalarOp = gtNewSimdHWIntrinsicNode(TYP_SIMD8, *scalarOp, NI_Vector64_Create, simdBaseJitType,
8, isSimdAsHWIntrinsic);
}
else
{
intrinsic = NI_AdvSimd_Arm64_Multiply;
}
if (simdSize == 8)
{
intrinsic = NI_AdvSimd_MultiplyScalar;
}
break;
}
default:
{
unreached();
}
}
break;
}
case GT_OR:
{
intrinsic = NI_AdvSimd_Or;
break;
}
case GT_SUB:
{
if (simdBaseType == TYP_DOUBLE)
{
intrinsic = (simdSize == 8) ? NI_AdvSimd_SubtractScalar : NI_AdvSimd_Arm64_Subtract;
}
else if ((simdSize == 8) && varTypeIsLong(simdBaseType))
{
intrinsic = NI_AdvSimd_SubtractScalar;
}
else
{
intrinsic = NI_AdvSimd_Subtract;
}
break;
}
case GT_XOR:
{
intrinsic = NI_AdvSimd_Xor;
break;
}
#else
#error Unsupported platform
#endif // !TARGET_XARCH && !TARGET_ARM64
default:
{
unreached();
}
}
assert(intrinsic != NI_Illegal);
return gtNewSimdHWIntrinsicNode(type, op1, op2, intrinsic, simdBaseJitType, simdSize, isSimdAsHWIntrinsic);
}
GenTree* Compiler::gtNewSimdCeilNode(
var_types type, GenTree* op1, CorInfoType simdBaseJitType, unsigned simdSize, bool isSimdAsHWIntrinsic)
{
assert(IsBaselineSimdIsaSupportedDebugOnly());
assert(varTypeIsSIMD(type));
assert(getSIMDTypeForSize(simdSize) == type);
assert(op1 != nullptr);
assert(op1->TypeIs(type));
var_types simdBaseType = JitType2PreciseVarType(simdBaseJitType);
assert(varTypeIsFloating(simdBaseType));
NamedIntrinsic intrinsic = NI_Illegal;
#if defined(TARGET_XARCH)
if (simdSize == 32)
{
assert(compIsaSupportedDebugOnly(InstructionSet_AVX));
intrinsic = NI_AVX_Ceiling;
}
else
{
assert(compIsaSupportedDebugOnly(InstructionSet_SSE41));
intrinsic = NI_SSE41_Ceiling;
}
#elif defined(TARGET_ARM64)
if (simdBaseType == TYP_DOUBLE)
{
intrinsic = (simdSize == 8) ? NI_AdvSimd_CeilingScalar : NI_AdvSimd_Arm64_Ceiling;
}
else
{
intrinsic = NI_AdvSimd_Ceiling;
}
#else
#error Unsupported platform
#endif // !TARGET_XARCH && !TARGET_ARM64
assert(intrinsic != NI_Illegal);
return gtNewSimdHWIntrinsicNode(type, op1, intrinsic, simdBaseJitType, simdSize, isSimdAsHWIntrinsic);
}
GenTree* Compiler::gtNewSimdCmpOpNode(genTreeOps op,
var_types type,
GenTree* op1,
GenTree* op2,
CorInfoType simdBaseJitType,
unsigned simdSize,
bool isSimdAsHWIntrinsic)
{
assert(IsBaselineSimdIsaSupportedDebugOnly());
assert(varTypeIsSIMD(type));
assert(getSIMDTypeForSize(simdSize) == type);
assert(op1 != nullptr);
assert(op1->TypeIs(type));
assert(op2 != nullptr);
assert(op2->TypeIs(type));
var_types simdBaseType = JitType2PreciseVarType(simdBaseJitType);
assert(varTypeIsArithmetic(simdBaseType));
NamedIntrinsic intrinsic = NI_Illegal;
CORINFO_CLASS_HANDLE clsHnd = gtGetStructHandleForSIMD(type, simdBaseJitType);
switch (op)
{
#if defined(TARGET_XARCH)
case GT_EQ:
{
if (simdSize == 32)
{
assert(compIsaSupportedDebugOnly(InstructionSet_AVX));
if (varTypeIsFloating(simdBaseType))
{
intrinsic = NI_AVX_CompareEqual;
}
else
{
assert(compIsaSupportedDebugOnly(InstructionSet_AVX2));
intrinsic = NI_AVX2_CompareEqual;
}
}
else if (simdBaseType == TYP_FLOAT)
{
intrinsic = NI_SSE_CompareEqual;
}
else if (varTypeIsLong(simdBaseType))
{
if (compOpportunisticallyDependsOn(InstructionSet_SSE41))
{
intrinsic = NI_SSE41_CompareEqual;
}
else
{
// There is no direct SSE2 support for comparing TYP_LONG vectors.
// These have to be implemented in terms of TYP_INT vector comparison operations.
//
// tmp = (op1 == op2) i.e. compare for equality as if op1 and op2 are vector of int
// op1 = tmp
// op2 = Shuffle(tmp, (2, 3, 0, 1))
// result = BitwiseAnd(op1, op2)
//
// Shuffle is meant to swap the comparison results of low-32-bits and high 32-bits of
// respective long elements.
GenTree* tmp =
gtNewSimdCmpOpNode(op, type, op1, op2, CORINFO_TYPE_INT, simdSize, isSimdAsHWIntrinsic);
tmp = impCloneExpr(tmp, &op1, clsHnd, (unsigned)CHECK_SPILL_ALL,
nullptr DEBUGARG("Clone tmp for vector Equals"));
op2 = gtNewSimdHWIntrinsicNode(type, tmp, gtNewIconNode(SHUFFLE_ZWXY), NI_SSE2_Shuffle,
CORINFO_TYPE_INT, simdSize, isSimdAsHWIntrinsic);
return gtNewSimdBinOpNode(GT_AND, type, op1, op2, simdBaseJitType, simdSize, isSimdAsHWIntrinsic);
}
}
else
{
intrinsic = NI_SSE2_CompareEqual;
}
break;
}
case GT_GE:
{
if (simdSize == 32)
{
assert(compIsaSupportedDebugOnly(InstructionSet_AVX));
if (varTypeIsFloating(simdBaseType))
{
intrinsic = NI_AVX_CompareGreaterThanOrEqual;
}
}
else if (simdBaseType == TYP_FLOAT)
{
intrinsic = NI_SSE_CompareGreaterThanOrEqual;
}
else if (simdBaseType == TYP_DOUBLE)
{
intrinsic = NI_SSE2_CompareGreaterThanOrEqual;
}
if (intrinsic == NI_Illegal)
{
// There is no direct support for doing a combined comparison and equality for integral types.
// These have to be implemented by performing both halves and combining their results.
//
// op1Dup = op1
// op2Dup = op2
//
// op1 = GreaterThan(op1, op2)
// op2 = Equals(op1Dup, op2Dup)
//
// result = BitwiseOr(op1, op2)
GenTree* op1Dup;
op1 = impCloneExpr(op1, &op1Dup, clsHnd, (unsigned)CHECK_SPILL_ALL,
nullptr DEBUGARG("Clone op1 for vector GreaterThanOrEqual"));
GenTree* op2Dup;
op2 = impCloneExpr(op2, &op2Dup, clsHnd, (unsigned)CHECK_SPILL_ALL,
nullptr DEBUGARG("Clone op2 for vector GreaterThanOrEqual"));
op1 = gtNewSimdCmpOpNode(GT_GT, type, op1, op2, simdBaseJitType, simdSize, isSimdAsHWIntrinsic);
op2 = gtNewSimdCmpOpNode(GT_EQ, type, op1Dup, op2Dup, simdBaseJitType, simdSize, isSimdAsHWIntrinsic);
return gtNewSimdBinOpNode(GT_OR, type, op1, op2, simdBaseJitType, simdSize, isSimdAsHWIntrinsic);
}
break;
}
case GT_GT:
{
if (varTypeIsUnsigned(simdBaseType))
{
// Vector of byte, ushort, uint and ulong:
// Hardware supports > for signed comparison. Therefore, to use it for
// comparing unsigned numbers, we subtract a constant from both the
// operands such that the result fits within the corresponding signed
// type. The resulting signed numbers are compared using signed comparison.
//
// Vector of byte: constant to be subtracted is 2^7
// Vector of ushort: constant to be subtracted is 2^15
// Vector of uint: constant to be subtracted is 2^31
// Vector of ulong: constant to be subtracted is 2^63
//
// We need to treat op1 and op2 as signed for comparison purpose after
// the transformation.
GenTree* constVal = nullptr;
CorInfoType opJitType = simdBaseJitType;
var_types opType = simdBaseType;
CorInfoType constValJitType = CORINFO_TYPE_INT;
switch (simdBaseType)
{
case TYP_UBYTE:
{
constVal = gtNewIconNode(0x80808080);
simdBaseJitType = CORINFO_TYPE_BYTE;
simdBaseType = TYP_BYTE;
break;
}
case TYP_USHORT:
{
constVal = gtNewIconNode(0x80008000);
simdBaseJitType = CORINFO_TYPE_SHORT;
simdBaseType = TYP_SHORT;
break;
}
case TYP_UINT:
{
constVal = gtNewIconNode(0x80000000);
simdBaseJitType = CORINFO_TYPE_INT;
simdBaseType = TYP_INT;
break;
}
case TYP_ULONG:
{
constVal = gtNewLconNode(0x8000000000000000);
constValJitType = CORINFO_TYPE_LONG;
simdBaseJitType = CORINFO_TYPE_LONG;
simdBaseType = TYP_LONG;
break;
}
default:
{
unreached();
}
}
GenTree* constVector =
gtNewSimdCreateBroadcastNode(type, constVal, constValJitType, simdSize, isSimdAsHWIntrinsic);
GenTree* constVectorDup;
constVector = impCloneExpr(constVector, &constVectorDup, clsHnd, (unsigned)CHECK_SPILL_ALL,
nullptr DEBUGARG("Clone constVector for vector GreaterThan"));
// op1 = op1 - constVector
op1 = gtNewSimdBinOpNode(GT_SUB, type, op1, constVector, opJitType, simdSize, isSimdAsHWIntrinsic);
// op2 = op2 - constVector
op2 = gtNewSimdBinOpNode(GT_SUB, type, op2, constVectorDup, opJitType, simdSize, isSimdAsHWIntrinsic);
}
// This should have been mutated by the above path
assert(!varTypeIsUnsigned(simdBaseType));
if (simdSize == 32)
{
assert(compIsaSupportedDebugOnly(InstructionSet_AVX));
if (varTypeIsFloating(simdBaseType))
{
intrinsic = NI_AVX_CompareGreaterThan;
}
else
{
assert(compIsaSupportedDebugOnly(InstructionSet_AVX2));
intrinsic = NI_AVX2_CompareGreaterThan;
}
}
else if (simdBaseType == TYP_FLOAT)
{
intrinsic = NI_SSE_CompareGreaterThan;
}
else if (varTypeIsLong(simdBaseType))
{
if (compOpportunisticallyDependsOn(InstructionSet_SSE42))
{
intrinsic = NI_SSE42_CompareGreaterThan;
}
else
{
// There is no direct SSE2 support for comparing TYP_LONG vectors.
// These have to be implemented in terms of TYP_INT vector comparison operations.
//
// Let us consider the case of single long element comparison.
// Say op1 = (x1, y1) and op2 = (x2, y2) where x1, y1, x2, and y2 are 32-bit integers that comprise
// the
// longs op1 and op2.
//
// GreaterThan(op1, op2) can be expressed in terms of > relationship between 32-bit integers that
// comprise op1 and op2 as
// = (x1, y1) > (x2, y2)
// = (x1 > x2) || [(x1 == x2) && (y1 > y2)] - eq (1)
//
// op1Dup1 = op1
// op1Dup2 = op1Dup1
// op2Dup1 = op2
// op2Dup2 = op2Dup1
//
// t = (op1 > op2) - 32-bit signed comparison
// u = (op1Dup1 == op2Dup1) - 32-bit equality comparison
// v = (op1Dup2 > op2Dup2) - 32-bit unsigned comparison
//
// op1 = Shuffle(t, (3, 3, 1, 1)) - This corresponds to (x1 > x2) in eq(1) above
// v = Shuffle(v, (2, 2, 0, 0)) - This corresponds to (y1 > y2) in eq(1) above
// u = Shuffle(u, (3, 3, 1, 1)) - This corresponds to (x1 == x2) in eq(1) above
// op2 = BitwiseAnd(v, u) - This corresponds to [(x1 == x2) && (y1 > y2)] in eq(1) above
//
// result = BitwiseOr(op1, op2)
GenTree* op1Dup1;
op1 = impCloneExpr(op1, &op1Dup1, clsHnd, (unsigned)CHECK_SPILL_ALL,
nullptr DEBUGARG("Clone op1 for vector GreaterThan"));
GenTree* op1Dup2;
op1Dup1 = impCloneExpr(op1Dup1, &op1Dup2, clsHnd, (unsigned)CHECK_SPILL_ALL,
nullptr DEBUGARG("Clone op1 for vector GreaterThan"));
GenTree* op2Dup1;
op2 = impCloneExpr(op2, &op2Dup1, clsHnd, (unsigned)CHECK_SPILL_ALL,
nullptr DEBUGARG("Clone op2 for vector GreaterThan"));
GenTree* op2Dup2;
op2Dup1 = impCloneExpr(op2Dup1, &op2Dup2, clsHnd, (unsigned)CHECK_SPILL_ALL,
nullptr DEBUGARG("Clone op2 vector GreaterThan"));
GenTree* t =
gtNewSimdCmpOpNode(op, type, op1, op2, CORINFO_TYPE_INT, simdSize, isSimdAsHWIntrinsic);
GenTree* u = gtNewSimdCmpOpNode(GT_EQ, type, op1Dup1, op2Dup1, CORINFO_TYPE_INT, simdSize,
isSimdAsHWIntrinsic);
GenTree* v = gtNewSimdCmpOpNode(op, type, op1Dup2, op2Dup2, CORINFO_TYPE_UINT, simdSize,
isSimdAsHWIntrinsic);
op1 = gtNewSimdHWIntrinsicNode(type, t, gtNewIconNode(SHUFFLE_WWYY, TYP_INT), NI_SSE2_Shuffle,
CORINFO_TYPE_INT, simdSize, isSimdAsHWIntrinsic);
v = gtNewSimdHWIntrinsicNode(type, v, gtNewIconNode(SHUFFLE_ZZXX, TYP_INT), NI_SSE2_Shuffle,
CORINFO_TYPE_INT, simdSize, isSimdAsHWIntrinsic);
u = gtNewSimdHWIntrinsicNode(type, u, gtNewIconNode(SHUFFLE_WWYY, TYP_INT), NI_SSE2_Shuffle,
CORINFO_TYPE_INT, simdSize, isSimdAsHWIntrinsic);
op2 = gtNewSimdBinOpNode(GT_AND, type, v, u, simdBaseJitType, simdSize, isSimdAsHWIntrinsic);
return gtNewSimdBinOpNode(GT_OR, type, op1, op2, simdBaseJitType, simdSize, isSimdAsHWIntrinsic);
}
}
else
{
intrinsic = NI_SSE2_CompareGreaterThan;
}
break;
}
case GT_LE:
{
if (simdSize == 32)
{
assert(compIsaSupportedDebugOnly(InstructionSet_AVX));
if (varTypeIsFloating(simdBaseType))
{
intrinsic = NI_AVX_CompareLessThanOrEqual;
}
}
else if (simdBaseType == TYP_FLOAT)
{
intrinsic = NI_SSE_CompareLessThanOrEqual;
}
else if (simdBaseType == TYP_DOUBLE)
{
intrinsic = NI_SSE2_CompareLessThanOrEqual;
}
if (intrinsic == NI_Illegal)
{
// There is no direct support for doing a combined comparison and equality for integral types.
// These have to be implemented by performing both halves and combining their results.
//
// op1Dup = op1
// op2Dup = op2
//
// op1 = LessThan(op1, op2)
// op2 = Equals(op1Dup, op2Dup)
//
// result = BitwiseOr(op1, op2)
GenTree* op1Dup;
op1 = impCloneExpr(op1, &op1Dup, clsHnd, (unsigned)CHECK_SPILL_ALL,
nullptr DEBUGARG("Clone op1 for vector LessThanOrEqual"));
GenTree* op2Dup;
op2 = impCloneExpr(op2, &op2Dup, clsHnd, (unsigned)CHECK_SPILL_ALL,
nullptr DEBUGARG("Clone op2 for vector LessThanOrEqual"));
op1 = gtNewSimdCmpOpNode(GT_LT, type, op1, op2, simdBaseJitType, simdSize, isSimdAsHWIntrinsic);
op2 = gtNewSimdCmpOpNode(GT_EQ, type, op1Dup, op2Dup, simdBaseJitType, simdSize, isSimdAsHWIntrinsic);
return gtNewSimdBinOpNode(GT_OR, type, op1, op2, simdBaseJitType, simdSize, isSimdAsHWIntrinsic);
}
break;
}
case GT_LT:
{
if (varTypeIsUnsigned(simdBaseType))
{
// Vector of byte, ushort, uint and ulong:
// Hardware supports < for signed comparison. Therefore, to use it for
// comparing unsigned numbers, we subtract a constant from both the
// operands such that the result fits within the corresponding signed
// type. The resulting signed numbers are compared using signed comparison.
//
// Vector of byte: constant to be subtracted is 2^7
// Vector of ushort: constant to be subtracted is 2^15
// Vector of uint: constant to be subtracted is 2^31
// Vector of ulong: constant to be subtracted is 2^63
//
// We need to treat op1 and op2 as signed for comparison purpose after
// the transformation.
GenTree* constVal = nullptr;
CorInfoType opJitType = simdBaseJitType;
var_types opType = simdBaseType;
CorInfoType constValJitType = CORINFO_TYPE_INT;
switch (simdBaseType)
{
case TYP_UBYTE:
{
constVal = gtNewIconNode(0x80808080);
simdBaseJitType = CORINFO_TYPE_BYTE;
simdBaseType = TYP_BYTE;
break;
}
case TYP_USHORT:
{
constVal = gtNewIconNode(0x80008000);
simdBaseJitType = CORINFO_TYPE_SHORT;
simdBaseType = TYP_SHORT;
break;
}
case TYP_UINT:
{
constVal = gtNewIconNode(0x80000000);
simdBaseJitType = CORINFO_TYPE_INT;
simdBaseType = TYP_INT;
break;
}
case TYP_ULONG:
{
constVal = gtNewLconNode(0x8000000000000000);
constValJitType = CORINFO_TYPE_LONG;
simdBaseJitType = CORINFO_TYPE_LONG;
simdBaseType = TYP_LONG;
break;
}
default:
{
unreached();
}
}
GenTree* constVector =
gtNewSimdCreateBroadcastNode(type, constVal, constValJitType, simdSize, isSimdAsHWIntrinsic);
GenTree* constVectorDup;
constVector = impCloneExpr(constVector, &constVectorDup, clsHnd, (unsigned)CHECK_SPILL_ALL,
nullptr DEBUGARG("Clone constVector for vector LessThan"));
// op1 = op1 - constVector
op1 = gtNewSimdBinOpNode(GT_SUB, type, op1, constVector, opJitType, simdSize, isSimdAsHWIntrinsic);
// op2 = op2 - constVector
op2 = gtNewSimdBinOpNode(GT_SUB, type, op2, constVectorDup, opJitType, simdSize, isSimdAsHWIntrinsic);
}
// This should have been mutated by the above path
assert(!varTypeIsUnsigned(simdBaseType));
if (simdSize == 32)
{
assert(compIsaSupportedDebugOnly(InstructionSet_AVX));
if (varTypeIsFloating(simdBaseType))
{
intrinsic = NI_AVX_CompareLessThan;
}
else
{
assert(compIsaSupportedDebugOnly(InstructionSet_AVX2));
intrinsic = NI_AVX2_CompareLessThan;
}
}
else if (simdBaseType == TYP_FLOAT)
{
intrinsic = NI_SSE_CompareLessThan;
}
else if (varTypeIsLong(simdBaseType))
{
if (compOpportunisticallyDependsOn(InstructionSet_SSE42))
{
intrinsic = NI_SSE42_CompareLessThan;
}
else
{
// There is no direct SSE2 support for comparing TYP_LONG vectors.
// These have to be implemented in terms of TYP_INT vector comparison operations.
//
// Let us consider the case of single long element comparison.
// Say op1 = (x1, y1) and op2 = (x2, y2) where x1, y1, x2, and y2 are 32-bit integers that comprise
// the
// longs op1 and op2.
//
// LessThan(op1, op2) can be expressed in terms of > relationship between 32-bit integers that
// comprise op1 and op2 as
// = (x1, y1) > (x2, y2)
// = (x1 > x2) || [(x1 == x2) && (y1 > y2)] - eq (1)
//
// op1Dup1 = op1
// op1Dup2 = op1Dup1
// op2Dup1 = op2
// op2Dup2 = op2Dup1
//
// t = (op1 > op2) - 32-bit signed comparison
// u = (op1Dup1 == op2Dup1) - 32-bit equality comparison
// v = (op1Dup2 > op2Dup2) - 32-bit unsigned comparison
//
// op1 = Shuffle(t, (3, 3, 1, 1)) - This corresponds to (x1 > x2) in eq(1) above
// v = Shuffle(v, (2, 2, 0, 0)) - This corresponds to (y1 > y2) in eq(1) above
// u = Shuffle(u, (3, 3, 1, 1)) - This corresponds to (x1 == x2) in eq(1) above
// op2 = BitwiseAnd(v, u) - This corresponds to [(x1 == x2) && (y1 > y2)] in eq(1) above
//
// result = BitwiseOr(op1, op2)
GenTree* op1Dup1;
op1 = impCloneExpr(op1, &op1Dup1, clsHnd, (unsigned)CHECK_SPILL_ALL,
nullptr DEBUGARG("Clone op1 for vector LessThan"));
GenTree* op1Dup2;
op1Dup1 = impCloneExpr(op1Dup1, &op1Dup2, clsHnd, (unsigned)CHECK_SPILL_ALL,
nullptr DEBUGARG("Clone op1 for vector LessThan"));
GenTree* op2Dup1;
op2 = impCloneExpr(op2, &op2Dup1, clsHnd, (unsigned)CHECK_SPILL_ALL,
nullptr DEBUGARG("Clone op2 for vector LessThan"));
GenTree* op2Dup2;
op2Dup1 = impCloneExpr(op2Dup1, &op2Dup2, clsHnd, (unsigned)CHECK_SPILL_ALL,
nullptr DEBUGARG("Clone op2 vector LessThan"));
GenTree* t =
gtNewSimdCmpOpNode(op, type, op1, op2, CORINFO_TYPE_INT, simdSize, isSimdAsHWIntrinsic);
GenTree* u = gtNewSimdCmpOpNode(GT_EQ, type, op1Dup1, op2Dup1, CORINFO_TYPE_INT, simdSize,
isSimdAsHWIntrinsic);
GenTree* v = gtNewSimdCmpOpNode(op, type, op1Dup2, op2Dup2, CORINFO_TYPE_UINT, simdSize,
isSimdAsHWIntrinsic);
op1 = gtNewSimdHWIntrinsicNode(type, t, gtNewIconNode(SHUFFLE_WWYY, TYP_INT), NI_SSE2_Shuffle,
CORINFO_TYPE_INT, simdSize, isSimdAsHWIntrinsic);
v = gtNewSimdHWIntrinsicNode(type, v, gtNewIconNode(SHUFFLE_ZZXX, TYP_INT), NI_SSE2_Shuffle,
CORINFO_TYPE_INT, simdSize, isSimdAsHWIntrinsic);
u = gtNewSimdHWIntrinsicNode(type, u, gtNewIconNode(SHUFFLE_WWYY, TYP_INT), NI_SSE2_Shuffle,
CORINFO_TYPE_INT, simdSize, isSimdAsHWIntrinsic);
op2 = gtNewSimdBinOpNode(GT_AND, type, v, u, simdBaseJitType, simdSize, isSimdAsHWIntrinsic);
return gtNewSimdBinOpNode(GT_OR, type, op1, op2, simdBaseJitType, simdSize, isSimdAsHWIntrinsic);
}
}
else
{
intrinsic = NI_SSE2_CompareLessThan;
}
break;
}
#elif defined(TARGET_ARM64)
case GT_EQ:
{
if ((varTypeIsLong(simdBaseType) || (simdBaseType == TYP_DOUBLE)))
{
intrinsic = (simdSize == 8) ? NI_AdvSimd_Arm64_CompareEqualScalar : NI_AdvSimd_Arm64_CompareEqual;
}
else
{
intrinsic = NI_AdvSimd_CompareEqual;
}
break;
}
case GT_GE:
{
if ((varTypeIsLong(simdBaseType) || (simdBaseType == TYP_DOUBLE)))
{
intrinsic = (simdSize == 8) ? NI_AdvSimd_Arm64_CompareGreaterThanOrEqualScalar
: NI_AdvSimd_Arm64_CompareGreaterThanOrEqual;
}
else
{
intrinsic = NI_AdvSimd_CompareGreaterThanOrEqual;
}
break;
}
case GT_GT:
{
if ((varTypeIsLong(simdBaseType) || (simdBaseType == TYP_DOUBLE)))
{
intrinsic =
(simdSize == 8) ? NI_AdvSimd_Arm64_CompareGreaterThanScalar : NI_AdvSimd_Arm64_CompareGreaterThan;
}
else
{
intrinsic = NI_AdvSimd_CompareGreaterThan;
}
break;
}
case GT_LE:
{
if ((varTypeIsLong(simdBaseType) || (simdBaseType == TYP_DOUBLE)))
{
intrinsic = (simdSize == 8) ? NI_AdvSimd_Arm64_CompareLessThanOrEqualScalar
: NI_AdvSimd_Arm64_CompareLessThanOrEqual;
}
else
{
intrinsic = NI_AdvSimd_CompareLessThanOrEqual;
}
break;
}
case GT_LT:
{
if ((varTypeIsLong(simdBaseType) || (simdBaseType == TYP_DOUBLE)))
{
intrinsic = (simdSize == 8) ? NI_AdvSimd_Arm64_CompareLessThanScalar : NI_AdvSimd_Arm64_CompareLessThan;
}
else
{
intrinsic = NI_AdvSimd_CompareLessThan;
}
break;
}
#else
#error Unsupported platform
#endif // !TARGET_XARCH && !TARGET_ARM64
default:
{
unreached();
}
}
assert(intrinsic != NI_Illegal);
return gtNewSimdHWIntrinsicNode(type, op1, op2, intrinsic, simdBaseJitType, simdSize, isSimdAsHWIntrinsic);
}
GenTree* Compiler::gtNewSimdCmpOpAllNode(genTreeOps op,
var_types type,
GenTree* op1,
GenTree* op2,
CorInfoType simdBaseJitType,
unsigned simdSize,
bool isSimdAsHWIntrinsic)
{
assert(IsBaselineSimdIsaSupportedDebugOnly());
assert(type == TYP_BOOL);
var_types simdType = getSIMDTypeForSize(simdSize);
assert(varTypeIsSIMD(simdType));
assert(op1 != nullptr);
assert(op1->TypeIs(simdType));
assert(op2 != nullptr);
assert(op2->TypeIs(simdType));
var_types simdBaseType = JitType2PreciseVarType(simdBaseJitType);
assert(varTypeIsArithmetic(simdBaseType));
NamedIntrinsic intrinsic = NI_Illegal;
switch (op)
{
#if defined(TARGET_XARCH)
case GT_EQ:
{
if (simdSize == 32)
{
assert(compIsaSupportedDebugOnly(InstructionSet_AVX));
assert(varTypeIsFloating(simdBaseType) || compIsaSupportedDebugOnly(InstructionSet_AVX2));
intrinsic = NI_Vector256_op_Equality;
}
else
{
intrinsic = NI_Vector128_op_Equality;
}
break;
}
case GT_GE:
case GT_GT:
case GT_LE:
case GT_LT:
{
// We want to generate a comparison along the lines of
// GT_XX(op1, op2).As<T, TInteger>() == Vector128<TInteger>.AllBitsSet
NamedIntrinsic getAllBitsSet = NI_Illegal;
if (simdSize == 32)
{
// TODO-XArch-CQ: It's a non-trivial amount of work to support these
// for floating-point while only utilizing AVX. It would require, among
// other things, inverting the comparison and potentially support for a
// new Avx.TestNotZ intrinsic to ensure the codegen remains efficient.
assert(compIsaSupportedDebugOnly(InstructionSet_AVX2));
intrinsic = NI_Vector256_op_Equality;
getAllBitsSet = NI_Vector256_get_AllBitsSet;
}
else
{
intrinsic = NI_Vector128_op_Equality;
getAllBitsSet = NI_Vector128_get_AllBitsSet;
}
op1 = gtNewSimdCmpOpNode(op, simdType, op1, op2, simdBaseJitType, simdSize,
/* isSimdAsHWIntrinsic */ false);
if (simdBaseType == TYP_FLOAT)
{
simdBaseType = TYP_INT;
simdBaseJitType = CORINFO_TYPE_INT;
}
else if (simdBaseType == TYP_DOUBLE)
{
simdBaseType = TYP_LONG;
simdBaseJitType = CORINFO_TYPE_LONG;
}
op2 = gtNewSimdHWIntrinsicNode(simdType, getAllBitsSet, simdBaseJitType, simdSize);
break;
}
#elif defined(TARGET_ARM64)
case GT_EQ:
{
intrinsic = (simdSize == 8) ? NI_Vector64_op_Equality : NI_Vector128_op_Equality;
break;
}
case GT_GE:
case GT_GT:
case GT_LE:
case GT_LT:
{
// We want to generate a comparison along the lines of
// GT_XX(op1, op2).As<T, TInteger>() == Vector128<TInteger>.AllBitsSet
NamedIntrinsic getAllBitsSet = NI_Illegal;
if (simdSize == 8)
{
intrinsic = NI_Vector64_op_Equality;
getAllBitsSet = NI_Vector64_get_AllBitsSet;
}
else
{
intrinsic = NI_Vector128_op_Equality;
getAllBitsSet = NI_Vector128_get_AllBitsSet;
}
op1 = gtNewSimdCmpOpNode(op, simdType, op1, op2, simdBaseJitType, simdSize,
/* isSimdAsHWIntrinsic */ false);
if (simdBaseType == TYP_FLOAT)
{
simdBaseType = TYP_INT;
simdBaseJitType = CORINFO_TYPE_INT;
}
else if (simdBaseType == TYP_DOUBLE)
{
simdBaseType = TYP_LONG;
simdBaseJitType = CORINFO_TYPE_LONG;
}
op2 = gtNewSimdHWIntrinsicNode(simdType, getAllBitsSet, simdBaseJitType, simdSize);
break;
}
#else
#error Unsupported platform
#endif // !TARGET_XARCH && !TARGET_ARM64
default:
{
unreached();
}
}
assert(intrinsic != NI_Illegal);
return gtNewSimdHWIntrinsicNode(type, op1, op2, intrinsic, simdBaseJitType, simdSize, isSimdAsHWIntrinsic);
}
GenTree* Compiler::gtNewSimdCmpOpAnyNode(genTreeOps op,
var_types type,
GenTree* op1,
GenTree* op2,
CorInfoType simdBaseJitType,
unsigned simdSize,
bool isSimdAsHWIntrinsic)
{
assert(IsBaselineSimdIsaSupportedDebugOnly());
assert(type == TYP_BOOL);
var_types simdType = getSIMDTypeForSize(simdSize);
assert(varTypeIsSIMD(simdType));
assert(op1 != nullptr);
assert(op1->TypeIs(simdType));
assert(op2 != nullptr);
assert(op2->TypeIs(simdType));
var_types simdBaseType = JitType2PreciseVarType(simdBaseJitType);
assert(varTypeIsArithmetic(simdBaseType));
NamedIntrinsic intrinsic = NI_Illegal;
switch (op)
{
#if defined(TARGET_XARCH)
case GT_EQ:
case GT_GE:
case GT_GT:
case GT_LE:
case GT_LT:
{
// We want to generate a comparison along the lines of
// GT_XX(op1, op2).As<T, TInteger>() != Vector128<TInteger>.Zero
if (simdSize == 32)
{
// TODO-XArch-CQ: It's a non-trivial amount of work to support these
// for floating-point while only utilizing AVX. It would require, among
// other things, inverting the comparison and potentially support for a
// new Avx.TestNotZ intrinsic to ensure the codegen remains efficient.
assert(compIsaSupportedDebugOnly(InstructionSet_AVX2));
intrinsic = NI_Vector256_op_Inequality;
}
else
{
intrinsic = NI_Vector128_op_Inequality;
}
op1 = gtNewSimdCmpOpNode(op, simdType, op1, op2, simdBaseJitType, simdSize,
/* isSimdAsHWIntrinsic */ false);
if (simdBaseType == TYP_FLOAT)
{
simdBaseType = TYP_INT;
simdBaseJitType = CORINFO_TYPE_INT;
}
else if (simdBaseType == TYP_DOUBLE)
{
simdBaseType = TYP_LONG;
simdBaseJitType = CORINFO_TYPE_LONG;
}
op2 = gtNewSimdZeroNode(simdType, simdBaseJitType, simdSize, /* isSimdAsHWIntrinsic */ false);
break;
}
case GT_NE:
{
if (simdSize == 32)
{
assert(compIsaSupportedDebugOnly(InstructionSet_AVX));
assert(varTypeIsFloating(simdBaseType) || compIsaSupportedDebugOnly(InstructionSet_AVX2));
intrinsic = NI_Vector256_op_Inequality;
}
else
{
intrinsic = NI_Vector128_op_Inequality;
}
break;
}
#elif defined(TARGET_ARM64)
case GT_EQ:
case GT_GE:
case GT_GT:
case GT_LE:
case GT_LT:
{
// We want to generate a comparison along the lines of
// GT_XX(op1, op2).As<T, TInteger>() != Vector128<TInteger>.Zero
intrinsic = (simdSize == 8) ? NI_Vector64_op_Inequality : NI_Vector128_op_Inequality;
op1 = gtNewSimdCmpOpNode(op, simdType, op1, op2, simdBaseJitType, simdSize,
/* isSimdAsHWIntrinsic */ false);
if (simdBaseType == TYP_FLOAT)
{
simdBaseType = TYP_INT;
simdBaseJitType = CORINFO_TYPE_INT;
}
else if (simdBaseType == TYP_DOUBLE)
{
simdBaseType = TYP_LONG;
simdBaseJitType = CORINFO_TYPE_LONG;
}
op2 = gtNewSimdZeroNode(simdType, simdBaseJitType, simdSize, /* isSimdAsHWIntrinsic */ false);
break;
}
case GT_NE:
{
intrinsic = (simdSize == 8) ? NI_Vector64_op_Inequality : NI_Vector128_op_Inequality;
break;
}
#else
#error Unsupported platform
#endif // !TARGET_XARCH && !TARGET_ARM64
default:
{
unreached();
}
}
assert(intrinsic != NI_Illegal);
return gtNewSimdHWIntrinsicNode(type, op1, op2, intrinsic, simdBaseJitType, simdSize, isSimdAsHWIntrinsic);
}
GenTree* Compiler::gtNewSimdCndSelNode(var_types type,
GenTree* op1,
GenTree* op2,
GenTree* op3,
CorInfoType simdBaseJitType,
unsigned simdSize,
bool isSimdAsHWIntrinsic)
{
assert(IsBaselineSimdIsaSupportedDebugOnly());
assert(varTypeIsSIMD(type));
assert(getSIMDTypeForSize(simdSize) == type);
assert(op1 != nullptr);
assert(op1->TypeIs(type));
assert(op2 != nullptr);
assert(op2->TypeIs(type));
assert(op3 != nullptr);
assert(op3->TypeIs(type));
var_types simdBaseType = JitType2PreciseVarType(simdBaseJitType);
assert(varTypeIsArithmetic(simdBaseType));
NamedIntrinsic intrinsic = NI_Illegal;
#if defined(TARGET_XARCH)
// TODO-XARCH-CQ: It's likely beneficial to have a dedicated CndSel node so we
// can special case when the condition is the result of various compare operations.
//
// When it is, the condition is AllBitsSet or Zero on a per-element basis and we
// could change this to be a Blend operation in lowering as an optimization.
assert((simdSize != 32) || compIsaSupportedDebugOnly(InstructionSet_AVX));
CORINFO_CLASS_HANDLE clsHnd = gtGetStructHandleForSIMD(type, simdBaseJitType);
GenTree* op1Dup;
op1 = impCloneExpr(op1, &op1Dup, clsHnd, (unsigned)CHECK_SPILL_ALL,
nullptr DEBUGARG("Clone op1 for vector conditional select"));
// op2 = op2 & op1
op2 = gtNewSimdBinOpNode(GT_AND, type, op2, op1, simdBaseJitType, simdSize, isSimdAsHWIntrinsic);
// op3 = op3 & ~op1Dup
op3 = gtNewSimdBinOpNode(GT_AND_NOT, type, op3, op1Dup, simdBaseJitType, simdSize, isSimdAsHWIntrinsic);
// result = op2 | op3
return gtNewSimdBinOpNode(GT_OR, type, op2, op3, simdBaseJitType, simdSize, isSimdAsHWIntrinsic);
#elif defined(TARGET_ARM64)
return gtNewSimdHWIntrinsicNode(type, op1, op2, op3, NI_AdvSimd_BitwiseSelect, simdBaseJitType, simdSize,
isSimdAsHWIntrinsic);
#else
#error Unsupported platform
#endif // !TARGET_XARCH && !TARGET_ARM64
}
GenTree* Compiler::gtNewSimdCreateBroadcastNode(
var_types type, GenTree* op1, CorInfoType simdBaseJitType, unsigned simdSize, bool isSimdAsHWIntrinsic)
{
NamedIntrinsic hwIntrinsicID = NI_Vector128_Create;
var_types simdBaseType = JitType2PreciseVarType(simdBaseJitType);
#if defined(TARGET_XARCH)
#if defined(TARGET_X86)
if (varTypeIsLong(simdBaseType) && !op1->IsIntegralConst())
{
// TODO-XARCH-CQ: It may be beneficial to emit the movq
// instruction, which takes a 64-bit memory address and
// works on 32-bit x86 systems.
unreached();
}
#endif // TARGET_X86
if (simdSize == 32)
{
hwIntrinsicID = NI_Vector256_Create;
}
#elif defined(TARGET_ARM64)
if (simdSize == 8)
{
hwIntrinsicID = NI_Vector64_Create;
}
#else
#error Unsupported platform
#endif // !TARGET_XARCH && !TARGET_ARM64
return gtNewSimdHWIntrinsicNode(type, op1, hwIntrinsicID, simdBaseJitType, simdSize, isSimdAsHWIntrinsic);
}
GenTree* Compiler::gtNewSimdDotProdNode(var_types type,
GenTree* op1,
GenTree* op2,
CorInfoType simdBaseJitType,
unsigned simdSize,
bool isSimdAsHWIntrinsic)
{
assert(IsBaselineSimdIsaSupportedDebugOnly());
assert(varTypeIsArithmetic(type));
var_types simdType = getSIMDTypeForSize(simdSize);
assert(varTypeIsSIMD(simdType));
assert(op1 != nullptr);
assert(op1->TypeIs(simdType));
assert(op2 != nullptr);
assert(op2->TypeIs(simdType));
var_types simdBaseType = JitType2PreciseVarType(simdBaseJitType);
assert(JITtype2varType(simdBaseJitType) == type);
NamedIntrinsic intrinsic = NI_Illegal;
#if defined(TARGET_XARCH)
assert(!varTypeIsByte(simdBaseType) && !varTypeIsLong(simdBaseType));
if (simdSize == 32)
{
assert(varTypeIsFloating(simdBaseType) || compIsaSupportedDebugOnly(InstructionSet_AVX2));
intrinsic = NI_Vector256_Dot;
}
else
{
assert(((simdBaseType != TYP_INT) && (simdBaseType != TYP_UINT)) ||
compIsaSupportedDebugOnly(InstructionSet_SSE41));
intrinsic = NI_Vector128_Dot;
}
#elif defined(TARGET_ARM64)
assert(!varTypeIsLong(simdBaseType));
intrinsic = (simdSize == 8) ? NI_Vector64_Dot : NI_Vector128_Dot;
#else
#error Unsupported platform
#endif // !TARGET_XARCH && !TARGET_ARM64
assert(intrinsic != NI_Illegal);
return gtNewSimdHWIntrinsicNode(type, op1, op2, intrinsic, simdBaseJitType, simdSize, isSimdAsHWIntrinsic);
}
GenTree* Compiler::gtNewSimdFloorNode(
var_types type, GenTree* op1, CorInfoType simdBaseJitType, unsigned simdSize, bool isSimdAsHWIntrinsic)
{
assert(IsBaselineSimdIsaSupportedDebugOnly());
assert(varTypeIsSIMD(type));
assert(getSIMDTypeForSize(simdSize) == type);
assert(op1 != nullptr);
assert(op1->TypeIs(type));
var_types simdBaseType = JitType2PreciseVarType(simdBaseJitType);
assert(varTypeIsFloating(simdBaseType));
NamedIntrinsic intrinsic = NI_Illegal;
#if defined(TARGET_XARCH)
if (simdSize == 32)
{
intrinsic = NI_AVX_Floor;
}
else
{
assert(compIsaSupportedDebugOnly(InstructionSet_SSE41));
intrinsic = NI_SSE41_Floor;
}
#elif defined(TARGET_ARM64)
if (simdBaseType == TYP_DOUBLE)
{
intrinsic = (simdSize == 8) ? NI_AdvSimd_FloorScalar : NI_AdvSimd_Arm64_Floor;
}
else
{
intrinsic = NI_AdvSimd_Floor;
}
#else
#error Unsupported platform
#endif // !TARGET_XARCH && !TARGET_ARM64
assert(intrinsic != NI_Illegal);
return gtNewSimdHWIntrinsicNode(type, op1, intrinsic, simdBaseJitType, simdSize, isSimdAsHWIntrinsic);
}
GenTree* Compiler::gtNewSimdGetElementNode(var_types type,
GenTree* op1,
GenTree* op2,
CorInfoType simdBaseJitType,
unsigned simdSize,
bool isSimdAsHWIntrinsic)
{
NamedIntrinsic intrinsicId = NI_Vector128_GetElement;
var_types simdBaseType = JitType2PreciseVarType(simdBaseJitType);
assert(varTypeIsArithmetic(simdBaseType));
#if defined(TARGET_XARCH)
switch (simdBaseType)
{
// Using software fallback if simdBaseType is not supported by hardware
case TYP_BYTE:
case TYP_UBYTE:
case TYP_INT:
case TYP_UINT:
case TYP_LONG:
case TYP_ULONG:
assert(compIsaSupportedDebugOnly(InstructionSet_SSE41));
break;
case TYP_DOUBLE:
case TYP_FLOAT:
case TYP_SHORT:
case TYP_USHORT:
assert(compIsaSupportedDebugOnly(InstructionSet_SSE2));
break;
default:
unreached();
}
if (simdSize == 32)
{
intrinsicId = NI_Vector256_GetElement;
}
#elif defined(TARGET_ARM64)
if (simdSize == 8)
{
intrinsicId = NI_Vector64_GetElement;
}
#else
#error Unsupported platform
#endif // !TARGET_XARCH && !TARGET_ARM64
int immUpperBound = getSIMDVectorLength(simdSize, simdBaseType) - 1;
bool rangeCheckNeeded = !op2->OperIsConst();
if (!rangeCheckNeeded)
{
ssize_t imm8 = op2->AsIntCon()->IconValue();
rangeCheckNeeded = (imm8 < 0) || (imm8 > immUpperBound);
}
if (rangeCheckNeeded)
{
op2 = addRangeCheckForHWIntrinsic(op2, 0, immUpperBound);
}
return gtNewSimdHWIntrinsicNode(type, op1, op2, intrinsicId, simdBaseJitType, simdSize, isSimdAsHWIntrinsic);
}
GenTree* Compiler::gtNewSimdMaxNode(var_types type,
GenTree* op1,
GenTree* op2,
CorInfoType simdBaseJitType,
unsigned simdSize,
bool isSimdAsHWIntrinsic)
{
assert(IsBaselineSimdIsaSupportedDebugOnly());
assert(varTypeIsSIMD(type));
assert(getSIMDTypeForSize(simdSize) == type);
assert(op1 != nullptr);
assert(op1->TypeIs(type));
assert(op2 != nullptr);
assert(op2->TypeIs(type));
var_types simdBaseType = JitType2PreciseVarType(simdBaseJitType);
assert(varTypeIsArithmetic(simdBaseType));
NamedIntrinsic intrinsic = NI_Illegal;
CORINFO_CLASS_HANDLE clsHnd = gtGetStructHandleForSIMD(type, simdBaseJitType);
#if defined(TARGET_XARCH)
if (simdSize == 32)
{
assert(compIsaSupportedDebugOnly(InstructionSet_AVX));
if (varTypeIsFloating(simdBaseType))
{
intrinsic = NI_AVX_Max;
}
else
{
assert(compIsaSupportedDebugOnly(InstructionSet_AVX2));
if (!varTypeIsLong(simdBaseType))
{
intrinsic = NI_AVX2_Max;
}
}
}
else
{
switch (simdBaseType)
{
case TYP_BYTE:
case TYP_USHORT:
{
GenTree* constVal = nullptr;
CorInfoType opJitType = simdBaseJitType;
var_types opType = simdBaseType;
genTreeOps fixupOp1 = GT_NONE;
genTreeOps fixupOp2 = GT_NONE;
switch (simdBaseType)
{
case TYP_BYTE:
{
constVal = gtNewIconNode(0x80808080);
fixupOp1 = GT_SUB;
fixupOp2 = GT_ADD;
simdBaseJitType = CORINFO_TYPE_UBYTE;
simdBaseType = TYP_UBYTE;
break;
}
case TYP_USHORT:
{
constVal = gtNewIconNode(0x80008000);
fixupOp1 = GT_ADD;
fixupOp2 = GT_SUB;
simdBaseJitType = CORINFO_TYPE_SHORT;
simdBaseType = TYP_SHORT;
break;
}
default:
{
unreached();
}
}
assert(constVal != nullptr);
assert(fixupOp1 != GT_NONE);
assert(fixupOp2 != GT_NONE);
assert(opJitType != simdBaseJitType);
assert(opType != simdBaseType);
GenTree* constVector =
gtNewSimdCreateBroadcastNode(type, constVal, CORINFO_TYPE_INT, simdSize, isSimdAsHWIntrinsic);
GenTree* constVectorDup1;
constVector = impCloneExpr(constVector, &constVectorDup1, clsHnd, (unsigned)CHECK_SPILL_ALL,
nullptr DEBUGARG("Clone constVector for vector Max"));
GenTree* constVectorDup2;
constVectorDup1 = impCloneExpr(constVectorDup1, &constVectorDup2, clsHnd, (unsigned)CHECK_SPILL_ALL,
nullptr DEBUGARG("Clone constVector for vector Max"));
// op1 = op1 - constVector
// -or-
// op1 = op1 + constVector
op1 = gtNewSimdBinOpNode(fixupOp1, type, op1, constVector, opJitType, simdSize, isSimdAsHWIntrinsic);
// op2 = op2 - constVectorDup1
// -or-
// op2 = op2 + constVectorDup1
op2 =
gtNewSimdBinOpNode(fixupOp1, type, op2, constVectorDup1, opJitType, simdSize, isSimdAsHWIntrinsic);
// op1 = Max(op1, op2)
op1 = gtNewSimdMaxNode(type, op1, op2, simdBaseJitType, simdSize, isSimdAsHWIntrinsic);
// result = op1 + constVectorDup2
// -or-
// result = op1 - constVectorDup2
return gtNewSimdBinOpNode(fixupOp2, type, op1, constVectorDup2, opJitType, simdSize,
isSimdAsHWIntrinsic);
}
case TYP_INT:
case TYP_UINT:
case TYP_LONG:
case TYP_ULONG:
{
break;
}
case TYP_FLOAT:
{
intrinsic = NI_SSE_Max;
break;
}
case TYP_UBYTE:
case TYP_SHORT:
case TYP_DOUBLE:
{
intrinsic = NI_SSE2_Max;
break;
}
default:
{
unreached();
}
}
}
#elif defined(TARGET_ARM64)
if (!varTypeIsLong(simdBaseType))
{
if (simdBaseType == TYP_DOUBLE)
{
intrinsic = (simdSize == 8) ? NI_AdvSimd_Arm64_MaxScalar : NI_AdvSimd_Arm64_Max;
}
else
{
intrinsic = NI_AdvSimd_Max;
}
}
#else
#error Unsupported platform
#endif // !TARGET_XARCH && !TARGET_ARM64
if (intrinsic != NI_Illegal)
{
return gtNewSimdHWIntrinsicNode(type, op1, op2, intrinsic, simdBaseJitType, simdSize, isSimdAsHWIntrinsic);
}
GenTree* op1Dup;
op1 = impCloneExpr(op1, &op1Dup, clsHnd, (unsigned)CHECK_SPILL_ALL, nullptr DEBUGARG("Clone op1 for vector Max"));
GenTree* op2Dup;
op2 = impCloneExpr(op2, &op2Dup, clsHnd, (unsigned)CHECK_SPILL_ALL, nullptr DEBUGARG("Clone op2 for vector Max"));
// op1 = op1 > op2
op1 = gtNewSimdCmpOpNode(GT_GT, type, op1, op2, simdBaseJitType, simdSize, isSimdAsHWIntrinsic);
// result = ConditionalSelect(op1, op1Dup, op2Dup)
return gtNewSimdCndSelNode(type, op1, op1Dup, op2Dup, simdBaseJitType, simdSize, isSimdAsHWIntrinsic);
}
GenTree* Compiler::gtNewSimdMinNode(var_types type,
GenTree* op1,
GenTree* op2,
CorInfoType simdBaseJitType,
unsigned simdSize,
bool isSimdAsHWIntrinsic)
{
assert(IsBaselineSimdIsaSupportedDebugOnly());
assert(varTypeIsSIMD(type));
assert(getSIMDTypeForSize(simdSize) == type);
assert(op1 != nullptr);
assert(op1->TypeIs(type));
assert(op2 != nullptr);
assert(op2->TypeIs(type));
var_types simdBaseType = JitType2PreciseVarType(simdBaseJitType);
assert(varTypeIsArithmetic(simdBaseType));
NamedIntrinsic intrinsic = NI_Illegal;
CORINFO_CLASS_HANDLE clsHnd = gtGetStructHandleForSIMD(type, simdBaseJitType);
#if defined(TARGET_XARCH)
if (simdSize == 32)
{
assert(compIsaSupportedDebugOnly(InstructionSet_AVX));
if (varTypeIsFloating(simdBaseType))
{
intrinsic = NI_AVX_Min;
}
else
{
assert(compIsaSupportedDebugOnly(InstructionSet_AVX2));
if (!varTypeIsLong(simdBaseType))
{
intrinsic = NI_AVX2_Min;
}
}
}
else
{
switch (simdBaseType)
{
case TYP_BYTE:
case TYP_USHORT:
{
GenTree* constVal = nullptr;
CorInfoType opJitType = simdBaseJitType;
var_types opType = simdBaseType;
genTreeOps fixupOp1 = GT_NONE;
genTreeOps fixupOp2 = GT_NONE;
switch (simdBaseType)
{
case TYP_BYTE:
{
constVal = gtNewIconNode(0x80808080);
fixupOp1 = GT_SUB;
fixupOp2 = GT_ADD;
simdBaseJitType = CORINFO_TYPE_UBYTE;
simdBaseType = TYP_UBYTE;
break;
}
case TYP_USHORT:
{
constVal = gtNewIconNode(0x80008000);
fixupOp1 = GT_ADD;
fixupOp2 = GT_SUB;
simdBaseJitType = CORINFO_TYPE_SHORT;
simdBaseType = TYP_SHORT;
break;
}
default:
{
unreached();
}
}
assert(constVal != nullptr);
assert(fixupOp1 != GT_NONE);
assert(fixupOp2 != GT_NONE);
assert(opJitType != simdBaseJitType);
assert(opType != simdBaseType);
GenTree* constVector =
gtNewSimdCreateBroadcastNode(type, constVal, CORINFO_TYPE_INT, simdSize, isSimdAsHWIntrinsic);
GenTree* constVectorDup1;
constVector = impCloneExpr(constVector, &constVectorDup1, clsHnd, (unsigned)CHECK_SPILL_ALL,
nullptr DEBUGARG("Clone constVector for vector Min"));
GenTree* constVectorDup2;
constVectorDup1 = impCloneExpr(constVectorDup1, &constVectorDup2, clsHnd, (unsigned)CHECK_SPILL_ALL,
nullptr DEBUGARG("Clone constVector for vector Min"));
// op1 = op1 - constVector
// -or-
// op1 = op1 + constVector
op1 = gtNewSimdBinOpNode(fixupOp1, type, op1, constVector, opJitType, simdSize, isSimdAsHWIntrinsic);
// op2 = op2 - constVectorDup1
// -or-
// op2 = op2 + constVectorDup1
op2 =
gtNewSimdBinOpNode(fixupOp1, type, op2, constVectorDup1, opJitType, simdSize, isSimdAsHWIntrinsic);
// op1 = Min(op1, op2)
op1 = gtNewSimdMinNode(type, op1, op2, simdBaseJitType, simdSize, isSimdAsHWIntrinsic);
// result = op1 + constVectorDup2
// -or-
// result = op1 - constVectorDup2
return gtNewSimdBinOpNode(fixupOp2, type, op1, constVectorDup2, opJitType, simdSize,
isSimdAsHWIntrinsic);
}
case TYP_INT:
case TYP_UINT:
case TYP_LONG:
case TYP_ULONG:
{
break;
}
case TYP_FLOAT:
{
intrinsic = NI_SSE_Min;
break;
}
case TYP_UBYTE:
case TYP_SHORT:
case TYP_DOUBLE:
{
intrinsic = NI_SSE2_Min;
break;
}
default:
{
unreached();
}
}
}
#elif defined(TARGET_ARM64)
if (!varTypeIsLong(simdBaseType))
{
if (simdBaseType == TYP_DOUBLE)
{
intrinsic = (simdSize == 8) ? NI_AdvSimd_Arm64_MinScalar : NI_AdvSimd_Arm64_Min;
}
else
{
intrinsic = NI_AdvSimd_Min;
}
}
#else
#error Unsupported platform
#endif // !TARGET_XARCH && !TARGET_ARM64
if (intrinsic != NI_Illegal)
{
return gtNewSimdHWIntrinsicNode(type, op1, op2, intrinsic, simdBaseJitType, simdSize, isSimdAsHWIntrinsic);
}
GenTree* op1Dup;
op1 = impCloneExpr(op1, &op1Dup, clsHnd, (unsigned)CHECK_SPILL_ALL, nullptr DEBUGARG("Clone op1 for vector Min"));
GenTree* op2Dup;
op2 = impCloneExpr(op2, &op2Dup, clsHnd, (unsigned)CHECK_SPILL_ALL, nullptr DEBUGARG("Clone op2 for vector Min"));
// op1 = op1 < op2
op1 = gtNewSimdCmpOpNode(GT_LT, type, op1, op2, simdBaseJitType, simdSize, isSimdAsHWIntrinsic);
// result = ConditionalSelect(op1, op1Dup, op2Dup)
return gtNewSimdCndSelNode(type, op1, op1Dup, op2Dup, simdBaseJitType, simdSize, isSimdAsHWIntrinsic);
}
GenTree* Compiler::gtNewSimdNarrowNode(var_types type,
GenTree* op1,
GenTree* op2,
CorInfoType simdBaseJitType,
unsigned simdSize,
bool isSimdAsHWIntrinsic)
{
assert(IsBaselineSimdIsaSupportedDebugOnly());
assert(varTypeIsSIMD(type));
assert(getSIMDTypeForSize(simdSize) == type);
assert(op1 != nullptr);
assert(op1->TypeIs(type));
assert(op2 != nullptr);
assert(op2->TypeIs(type));
var_types simdBaseType = JitType2PreciseVarType(simdBaseJitType);
assert(varTypeIsArithmetic(simdBaseType) && !varTypeIsLong(simdBaseType));
GenTree* tmp1;
GenTree* tmp2;
#if defined(TARGET_XARCH)
GenTree* tmp3;
GenTree* tmp4;
if (simdSize == 32)
{
assert(compIsaSupportedDebugOnly(InstructionSet_AVX));
switch (simdBaseType)
{
case TYP_BYTE:
case TYP_UBYTE:
{
assert(compIsaSupportedDebugOnly(InstructionSet_AVX2));
// This is the same in principle to the other comments below, however due to
// code formatting, its too long to reasonably display here.
CorInfoType opBaseJitType = (simdBaseType == TYP_BYTE) ? CORINFO_TYPE_SHORT : CORINFO_TYPE_USHORT;
CORINFO_CLASS_HANDLE clsHnd = gtGetStructHandleForSIMD(type, opBaseJitType);
tmp1 = gtNewSimdHWIntrinsicNode(type, gtNewIconNode(0x00FF), NI_Vector256_Create, opBaseJitType,
simdSize, isSimdAsHWIntrinsic);
GenTree* tmp1Dup;
tmp1 = impCloneExpr(tmp1, &tmp1Dup, clsHnd, (unsigned)CHECK_SPILL_ALL,
nullptr DEBUGARG("Clone tmp1 for vector narrow"));
tmp2 = gtNewSimdHWIntrinsicNode(type, op1, tmp1, NI_SSE2_And, simdBaseJitType, simdSize,
isSimdAsHWIntrinsic);
tmp3 = gtNewSimdHWIntrinsicNode(type, op2, tmp1Dup, NI_SSE2_And, simdBaseJitType, simdSize,
isSimdAsHWIntrinsic);
tmp4 = gtNewSimdHWIntrinsicNode(type, tmp2, tmp3, NI_SSE2_PackUnsignedSaturate, CORINFO_TYPE_UBYTE,
simdSize, isSimdAsHWIntrinsic);
CorInfoType permuteBaseJitType = (simdBaseType == TYP_BYTE) ? CORINFO_TYPE_LONG : CORINFO_TYPE_ULONG;
return gtNewSimdHWIntrinsicNode(type, tmp4, gtNewIconNode(SHUFFLE_WYZX), NI_AVX2_Permute4x64,
permuteBaseJitType, simdSize, isSimdAsHWIntrinsic);
}
case TYP_SHORT:
case TYP_USHORT:
{
assert(compIsaSupportedDebugOnly(InstructionSet_AVX2));
// op1 = Elements 0L, 0U, 1L, 1U, 2L, 2U, 3L, 3U | 4L, 4U, 5L, 5U, 6L, 6U, 7L, 7U
// op2 = Elements 8L, 8U, 9L, 9U, AL, AU, BL, BU | CL, CU, DL, DU, EL, EU, FL, FU
//
// tmp2 = Elements 0L, --, 1L, --, 2L, --, 3L, -- | 4L, --, 5L, --, 6L, --, 7L, --
// tmp3 = Elements 8L, --, 9L, --, AL, --, BL, -- | CL, --, DL, --, EL, --, FL, --
// tmp4 = Elements 0L, 1L, 2L, 3L, 8L, 9L, AL, BL | 4L, 5L, 6L, 7L, CL, DL, EL, FL
// return Elements 0L, 1L, 2L, 3L, 4L, 5L, 6L, 7L | 8L, 9L, AL, BL, CL, DL, EL, FL
//
// var tmp1 = Vector256.Create(0x0000FFFF).AsInt16();
// var tmp2 = Avx2.And(op1.AsInt16(), tmp1);
// var tmp3 = Avx2.And(op2.AsInt16(), tmp1);
// var tmp4 = Avx2.PackUnsignedSaturate(tmp2, tmp3);
// return Avx2.Permute4x64(tmp4.AsUInt64(), SHUFFLE_WYZX).As<T>();
CorInfoType opBaseJitType = (simdBaseType == TYP_SHORT) ? CORINFO_TYPE_INT : CORINFO_TYPE_UINT;
CORINFO_CLASS_HANDLE clsHnd = gtGetStructHandleForSIMD(type, opBaseJitType);
tmp1 = gtNewSimdHWIntrinsicNode(type, gtNewIconNode(0x0000FFFF), NI_Vector256_Create, opBaseJitType,
simdSize, isSimdAsHWIntrinsic);
GenTree* tmp1Dup;
tmp1 = impCloneExpr(tmp1, &tmp1Dup, clsHnd, (unsigned)CHECK_SPILL_ALL,
nullptr DEBUGARG("Clone tmp1 for vector narrow"));
tmp2 = gtNewSimdHWIntrinsicNode(type, op1, tmp1, NI_SSE2_And, simdBaseJitType, simdSize,
isSimdAsHWIntrinsic);
tmp3 = gtNewSimdHWIntrinsicNode(type, op2, tmp1Dup, NI_SSE2_And, simdBaseJitType, simdSize,
isSimdAsHWIntrinsic);
tmp4 = gtNewSimdHWIntrinsicNode(type, tmp2, tmp3, NI_SSE41_PackUnsignedSaturate, CORINFO_TYPE_USHORT,
simdSize, isSimdAsHWIntrinsic);
CorInfoType permuteBaseJitType = (simdBaseType == TYP_BYTE) ? CORINFO_TYPE_LONG : CORINFO_TYPE_ULONG;
return gtNewSimdHWIntrinsicNode(type, tmp4, gtNewIconNode(SHUFFLE_WYZX), NI_AVX2_Permute4x64,
permuteBaseJitType, simdSize, isSimdAsHWIntrinsic);
}
case TYP_INT:
case TYP_UINT:
{
assert(compIsaSupportedDebugOnly(InstructionSet_AVX2));
// op1 = Elements 0, 1 | 2, 3; 0L, 0U, 1L, 1U | 2L, 2U, 3L, 3U
// op2 = Elements 4, 5 | 6, 7; 4L, 4U, 5L, 5U | 6L, 6U, 7L, 7U
//
// tmp1 = Elements 0L, 4L, 0U, 4U | 2L, 6L, 2U, 6U
// tmp2 = Elements 1L, 5L, 1U, 5U | 3L, 7L, 3U, 7U
// tmp3 = Elements 0L, 1L, 4L, 5L | 2L, 3L, 6L, 7L
// return Elements 0L, 1L, 2L, 3L | 4L, 5L, 6L, 7L
//
// var tmp1 = Avx2.UnpackLow(op1, op2);
// var tmp2 = Avx2.UnpackHigh(op1, op2);
// var tmp3 = Avx2.UnpackLow(tmp1, tmp2);
// return Avx2.Permute4x64(tmp3.AsUInt64(), SHUFFLE_WYZX).AsUInt32();
CorInfoType opBaseJitType = (simdBaseType == TYP_INT) ? CORINFO_TYPE_LONG : CORINFO_TYPE_ULONG;
CORINFO_CLASS_HANDLE clsHnd = gtGetStructHandleForSIMD(type, opBaseJitType);
GenTree* op1Dup;
op1 = impCloneExpr(op1, &op1Dup, clsHnd, (unsigned)CHECK_SPILL_ALL,
nullptr DEBUGARG("Clone op1 for vector narrow"));
GenTree* op2Dup;
op2 = impCloneExpr(op2, &op2Dup, clsHnd, (unsigned)CHECK_SPILL_ALL,
nullptr DEBUGARG("Clone op2 for vector narrow"));
tmp1 = gtNewSimdHWIntrinsicNode(type, op1, op2, NI_AVX2_UnpackLow, simdBaseJitType, simdSize,
isSimdAsHWIntrinsic);
tmp2 = gtNewSimdHWIntrinsicNode(type, op1Dup, op2Dup, NI_AVX2_UnpackHigh, simdBaseJitType, simdSize,
isSimdAsHWIntrinsic);
tmp3 = gtNewSimdHWIntrinsicNode(type, tmp1, tmp2, NI_AVX2_UnpackLow, simdBaseJitType, simdSize,
isSimdAsHWIntrinsic);
return gtNewSimdHWIntrinsicNode(type, tmp3, gtNewIconNode(SHUFFLE_WYZX), NI_AVX2_Permute4x64,
opBaseJitType, simdSize, isSimdAsHWIntrinsic);
}
case TYP_FLOAT:
{
// op1 = Elements 0, 1 | 2, 3
// op2 = Elements 4, 5 | 6, 7
//
// tmp1 = Elements 0, 1, 2, 3 | -, -, -, -
// tmp1 = Elements 4, 5, 6, 7
// return Elements 0, 1, 2, 3 | 4, 5, 6, 7
//
// var tmp1 = Avx.ConvertToVector128Single(op1).ToVector256Unsafe();
// var tmp2 = Avx.ConvertToVector128Single(op2);
// return Avx.InsertVector128(tmp1, tmp2, 1);
tmp1 = gtNewSimdHWIntrinsicNode(TYP_SIMD16, op1, NI_AVX_ConvertToVector128Single, simdBaseJitType,
simdSize, isSimdAsHWIntrinsic);
tmp2 = gtNewSimdHWIntrinsicNode(TYP_SIMD16, op2, NI_AVX_ConvertToVector128Single, simdBaseJitType,
simdSize, isSimdAsHWIntrinsic);
tmp1 = gtNewSimdHWIntrinsicNode(type, tmp1, NI_Vector128_ToVector256Unsafe, simdBaseJitType, 16,
isSimdAsHWIntrinsic);
return gtNewSimdHWIntrinsicNode(type, tmp1, tmp2, gtNewIconNode(1), NI_AVX_InsertVector128,
simdBaseJitType, simdSize, isSimdAsHWIntrinsic);
}
default:
{
unreached();
}
}
}
else
{
switch (simdBaseType)
{
case TYP_BYTE:
case TYP_UBYTE:
{
// op1 = Elements 0, 1, 2, 3, 4, 5, 6, 7; 0L, 0U, 1L, 1U, 2L, 2U, 3L, 3U, 4L, 4U, 5L, 5U, 6L, 6U, 7L, 7U
// op2 = Elements 8, 9, A, B, C, D, E, F; 8L, 8U, 9L, 9U, AL, AU, BL, BU, CL, CU, DL, DU, EL, EU, FL, FU
//
// tmp2 = Elements 0L, --, 1L, --, 2L, --, 3L, --, 4L, --, 5L, --, 6L, --, 7L, --
// tmp3 = Elements 8L, --, 9L, --, AL, --, BL, --, CL, --, DL, --, EL, --, FL, --
// return Elements 0L, 1L, 2L, 3L, 4L, 5L, 6L, 7L, 8L, 9L, AL, BL, CL, DL, EL, FL
//
// var tmp1 = Vector128.Create((ushort)(0x00FF)).AsSByte();
// var tmp2 = Sse2.And(op1.AsSByte(), tmp1);
// var tmp3 = Sse2.And(op2.AsSByte(), tmp1);
// return Sse2.PackUnsignedSaturate(tmp1, tmp2).As<T>();
CorInfoType opBaseJitType = (simdBaseType == TYP_BYTE) ? CORINFO_TYPE_SHORT : CORINFO_TYPE_USHORT;
CORINFO_CLASS_HANDLE clsHnd = gtGetStructHandleForSIMD(type, opBaseJitType);
tmp1 = gtNewSimdHWIntrinsicNode(type, gtNewIconNode(0x00FF), NI_Vector128_Create, opBaseJitType,
simdSize, isSimdAsHWIntrinsic);
GenTree* tmp1Dup;
tmp1 = impCloneExpr(tmp1, &tmp1Dup, clsHnd, (unsigned)CHECK_SPILL_ALL,
nullptr DEBUGARG("Clone tmp1 for vector narrow"));
tmp2 = gtNewSimdHWIntrinsicNode(type, op1, tmp1, NI_SSE2_And, simdBaseJitType, simdSize,
isSimdAsHWIntrinsic);
tmp3 = gtNewSimdHWIntrinsicNode(type, op2, tmp1Dup, NI_SSE2_And, simdBaseJitType, simdSize,
isSimdAsHWIntrinsic);
return gtNewSimdHWIntrinsicNode(type, tmp2, tmp3, NI_SSE2_PackUnsignedSaturate, CORINFO_TYPE_UBYTE,
simdSize, isSimdAsHWIntrinsic);
}
case TYP_SHORT:
case TYP_USHORT:
{
// op1 = Elements 0, 1, 2, 3; 0L, 0U, 1L, 1U, 2L, 2U, 3L, 3U
// op2 = Elements 4, 5, 6, 7; 4L, 4U, 5L, 5U, 6L, 6U, 7L, 7U
//
// ...
CorInfoType opBaseJitType = (simdBaseType == TYP_SHORT) ? CORINFO_TYPE_INT : CORINFO_TYPE_UINT;
CORINFO_CLASS_HANDLE clsHnd = gtGetStructHandleForSIMD(type, opBaseJitType);
if (compOpportunisticallyDependsOn(InstructionSet_SSE41))
{
// ...
//
// tmp2 = Elements 0L, --, 1L, --, 2L, --, 3L, --
// tmp3 = Elements 4L, --, 5L, --, 6L, --, 7L, --
// return Elements 0L, 1L, 2L, 3L, 4L, 5L, 6L, 7L
//
// var tmp1 = Vector128.Create(0x0000FFFF).AsInt16();
// var tmp2 = Sse2.And(op1.AsInt16(), tmp1);
// var tmp3 = Sse2.And(op2.AsInt16(), tmp1);
// return Sse2.PackUnsignedSaturate(tmp2, tmp3).As<T>();
tmp1 = gtNewSimdHWIntrinsicNode(type, gtNewIconNode(0x0000FFFF), NI_Vector128_Create, opBaseJitType,
simdSize, isSimdAsHWIntrinsic);
GenTree* tmp1Dup;
tmp1 = impCloneExpr(tmp1, &tmp1Dup, clsHnd, (unsigned)CHECK_SPILL_ALL,
nullptr DEBUGARG("Clone tmp1 for vector narrow"));
tmp2 = gtNewSimdHWIntrinsicNode(type, op1, tmp1, NI_SSE2_And, simdBaseJitType, simdSize,
isSimdAsHWIntrinsic);
tmp3 = gtNewSimdHWIntrinsicNode(type, op2, tmp1Dup, NI_SSE2_And, simdBaseJitType, simdSize,
isSimdAsHWIntrinsic);
return gtNewSimdHWIntrinsicNode(type, tmp2, tmp3, NI_SSE41_PackUnsignedSaturate,
CORINFO_TYPE_USHORT, simdSize, isSimdAsHWIntrinsic);
}
else
{
// ...
//
// tmp1 = Elements 0L, 4L, 0U, 4U, 1L, 5L, 1U, 5U
// tmp2 = Elements 2L, 6L, 2U, 6U, 3L, 7L, 3U, 7U
// tmp3 = Elements 0L, 2L, 4L, 6L, 0U, 2U, 4U, 6U
// tmp4 = Elements 1L, 3L, 5L, 7L, 1U, 3U, 5U, 7U
// return Elements 0L, 1L, 2L, 3L, 4L, 5L, 6L, 7L
//
// var tmp1 = Sse2.UnpackLow(op1.AsUInt16(), op2.AsUInt16());
// var tmp2 = Sse2.UnpackHigh(op1.AsUInt16(), op2.AsUInt16());
// var tmp3 = Sse2.UnpackLow(tmp1, tmp2);
// var tmp4 = Sse2.UnpackHigh(tmp1, tmp2);
// return Sse2.UnpackLow(tmp3, tmp4).As<T>();
GenTree* op1Dup;
op1 = impCloneExpr(op1, &op1Dup, clsHnd, (unsigned)CHECK_SPILL_ALL,
nullptr DEBUGARG("Clone op1 for vector narrow"));
GenTree* op2Dup;
op2 = impCloneExpr(op2, &op2Dup, clsHnd, (unsigned)CHECK_SPILL_ALL,
nullptr DEBUGARG("Clone op1 for vector narrow"));
tmp1 = gtNewSimdHWIntrinsicNode(type, op1, op2, NI_SSE2_UnpackLow, simdBaseJitType, simdSize,
isSimdAsHWIntrinsic);
tmp2 = gtNewSimdHWIntrinsicNode(type, op1Dup, op2Dup, NI_SSE2_UnpackHigh, simdBaseJitType, simdSize,
isSimdAsHWIntrinsic);
clsHnd = gtGetStructHandleForSIMD(type, simdBaseJitType);
GenTree* tmp1Dup;
tmp1 = impCloneExpr(tmp1, &tmp1Dup, clsHnd, (unsigned)CHECK_SPILL_ALL,
nullptr DEBUGARG("Clone tmp1 for vector narrow"));
GenTree* tmp2Dup;
tmp2 = impCloneExpr(tmp2, &tmp2Dup, clsHnd, (unsigned)CHECK_SPILL_ALL,
nullptr DEBUGARG("Clone tmp2 for vector narrow"));
tmp3 = gtNewSimdHWIntrinsicNode(type, tmp1, tmp2, NI_SSE2_UnpackLow, simdBaseJitType, simdSize,
isSimdAsHWIntrinsic);
tmp4 = gtNewSimdHWIntrinsicNode(type, tmp1Dup, tmp2Dup, NI_SSE2_UnpackHigh, simdBaseJitType,
simdSize, isSimdAsHWIntrinsic);
return gtNewSimdHWIntrinsicNode(type, tmp3, tmp4, NI_SSE2_UnpackLow, simdBaseJitType, simdSize,
isSimdAsHWIntrinsic);
}
}
case TYP_INT:
case TYP_UINT:
{
// op1 = Elements 0, 1; 0L, 0U, 1L, 1U
// op2 = Elements 2, 3; 2L, 2U, 3L, 3U
//
// tmp1 = Elements 0L, 2L, 0U, 2U
// tmp2 = Elements 1L, 3L, 1U, 3U
// return Elements 0L, 1L, 2L, 3L
//
// var tmp1 = Sse2.UnpackLow(op1.AsUInt32(), op2.AsUInt32());
// var tmp2 = Sse2.UnpackHigh(op1.AsUInt32(), op2.AsUInt32());
// return Sse2.UnpackLow(tmp1, tmp2).As<T>();
CorInfoType opBaseJitType = (simdBaseType == TYP_INT) ? CORINFO_TYPE_LONG : CORINFO_TYPE_ULONG;
CORINFO_CLASS_HANDLE clsHnd = gtGetStructHandleForSIMD(type, opBaseJitType);
GenTree* op1Dup;
op1 = impCloneExpr(op1, &op1Dup, clsHnd, (unsigned)CHECK_SPILL_ALL,
nullptr DEBUGARG("Clone op1 for vector narrow"));
GenTree* op2Dup;
op2 = impCloneExpr(op2, &op2Dup, clsHnd, (unsigned)CHECK_SPILL_ALL,
nullptr DEBUGARG("Clone op2 for vector narrow"));
tmp1 = gtNewSimdHWIntrinsicNode(type, op1, op2, NI_SSE2_UnpackLow, simdBaseJitType, simdSize,
isSimdAsHWIntrinsic);
tmp2 = gtNewSimdHWIntrinsicNode(type, op1Dup, op2Dup, NI_SSE2_UnpackHigh, simdBaseJitType, simdSize,
isSimdAsHWIntrinsic);
return gtNewSimdHWIntrinsicNode(type, tmp1, tmp2, NI_SSE2_UnpackLow, simdBaseJitType, simdSize,
isSimdAsHWIntrinsic);
}
case TYP_FLOAT:
{
// op1 = Elements 0, 1
// op2 = Elements 2, 3
//
// tmp1 = Elements 0, 1, -, -
// tmp1 = Elements 2, 3, -, -
// return Elements 0, 1, 2, 3
//
// var tmp1 = Sse2.ConvertToVector128Single(op1);
// var tmp2 = Sse2.ConvertToVector128Single(op2);
// return Sse.MoveLowToHigh(tmp1, tmp2);
CorInfoType opBaseJitType = CORINFO_TYPE_DOUBLE;
tmp1 = gtNewSimdHWIntrinsicNode(type, op1, NI_SSE2_ConvertToVector128Single, opBaseJitType, simdSize,
isSimdAsHWIntrinsic);
tmp2 = gtNewSimdHWIntrinsicNode(type, op2, NI_SSE2_ConvertToVector128Single, opBaseJitType, simdSize,
isSimdAsHWIntrinsic);
return gtNewSimdHWIntrinsicNode(type, tmp1, tmp2, NI_SSE_MoveLowToHigh, simdBaseJitType, simdSize,
isSimdAsHWIntrinsic);
}
default:
{
unreached();
}
}
}
#elif defined(TARGET_ARM64)
if (simdSize == 16)
{
if (varTypeIsFloating(simdBaseType))
{
// var tmp1 = AdvSimd.Arm64.ConvertToSingleLower(op1);
// return AdvSimd.Arm64.ConvertToSingleUpper(tmp1, op2);
tmp1 = gtNewSimdHWIntrinsicNode(TYP_SIMD8, op1, NI_AdvSimd_Arm64_ConvertToSingleLower, simdBaseJitType, 8,
isSimdAsHWIntrinsic);
return gtNewSimdHWIntrinsicNode(type, tmp1, op2, NI_AdvSimd_Arm64_ConvertToSingleUpper, simdBaseJitType,
simdSize, isSimdAsHWIntrinsic);
}
else
{
// var tmp1 = AdvSimd.ExtractNarrowingLower(op1);
// return AdvSimd.ExtractNarrowingUpper(tmp1, op2);
tmp1 = gtNewSimdHWIntrinsicNode(TYP_SIMD8, op1, NI_AdvSimd_ExtractNarrowingLower, simdBaseJitType, 8,
isSimdAsHWIntrinsic);
return gtNewSimdHWIntrinsicNode(type, tmp1, op2, NI_AdvSimd_ExtractNarrowingUpper, simdBaseJitType,
simdSize, isSimdAsHWIntrinsic);
}
}
else if (varTypeIsFloating(simdBaseType))
{
// var tmp1 = op1.ToVector128Unsafe();
// var tmp2 = AdvSimd.InsertScalar(tmp1, op2);
// return AdvSimd.Arm64.ConvertToSingleLower(tmp2);
CorInfoType tmp2BaseJitType = CORINFO_TYPE_DOUBLE;
tmp1 = gtNewSimdHWIntrinsicNode(TYP_SIMD16, op1, NI_Vector64_ToVector128Unsafe, simdBaseJitType, simdSize,
isSimdAsHWIntrinsic);
tmp2 = gtNewSimdHWIntrinsicNode(TYP_SIMD16, tmp1, gtNewIconNode(1), op2, NI_AdvSimd_InsertScalar,
tmp2BaseJitType, 16, isSimdAsHWIntrinsic);
return gtNewSimdHWIntrinsicNode(type, tmp2, NI_AdvSimd_Arm64_ConvertToSingleLower, simdBaseJitType, simdSize,
isSimdAsHWIntrinsic);
}
else
{
// var tmp1 = op1.ToVector128Unsafe();
// var tmp2 = AdvSimd.InsertScalar(tmp1.AsUInt64(), 1, op2.AsUInt64()).As<T>(); - signed integer use int64,
// unsigned integer use uint64
// return AdvSimd.ExtractNarrowingLower(tmp2);
CorInfoType tmp2BaseJitType = varTypeIsSigned(simdBaseType) ? CORINFO_TYPE_LONG : CORINFO_TYPE_ULONG;
tmp1 = gtNewSimdHWIntrinsicNode(TYP_SIMD16, op1, NI_Vector64_ToVector128Unsafe, simdBaseJitType, simdSize,
isSimdAsHWIntrinsic);
tmp2 = gtNewSimdHWIntrinsicNode(TYP_SIMD16, tmp1, gtNewIconNode(1), op2, NI_AdvSimd_InsertScalar,
tmp2BaseJitType, 16, isSimdAsHWIntrinsic);
return gtNewSimdHWIntrinsicNode(type, tmp2, NI_AdvSimd_ExtractNarrowingLower, simdBaseJitType, simdSize,
isSimdAsHWIntrinsic);
}
#else
#error Unsupported platform
#endif // !TARGET_XARCH && !TARGET_ARM64
}
GenTree* Compiler::gtNewSimdSqrtNode(
var_types type, GenTree* op1, CorInfoType simdBaseJitType, unsigned simdSize, bool isSimdAsHWIntrinsic)
{
assert(IsBaselineSimdIsaSupportedDebugOnly());
assert(varTypeIsSIMD(type));
assert(getSIMDTypeForSize(simdSize) == type);
assert(op1 != nullptr);
assert(op1->TypeIs(type));
var_types simdBaseType = JitType2PreciseVarType(simdBaseJitType);
assert(varTypeIsFloating(simdBaseType));
NamedIntrinsic intrinsic = NI_Illegal;
#if defined(TARGET_XARCH)
if (simdSize == 32)
{
assert(compIsaSupportedDebugOnly(InstructionSet_AVX));
intrinsic = NI_AVX_Sqrt;
}
else if (simdBaseType == TYP_FLOAT)
{
intrinsic = NI_SSE_Sqrt;
}
else
{
intrinsic = NI_SSE2_Sqrt;
}
#elif defined(TARGET_ARM64)
if ((simdSize == 8) && (simdBaseType == TYP_DOUBLE))
{
intrinsic = NI_AdvSimd_SqrtScalar;
}
else
{
intrinsic = NI_AdvSimd_Arm64_Sqrt;
}
#else
#error Unsupported platform
#endif // !TARGET_XARCH && !TARGET_ARM64
assert(intrinsic != NI_Illegal);
return gtNewSimdHWIntrinsicNode(type, op1, intrinsic, simdBaseJitType, simdSize, isSimdAsHWIntrinsic);
}
GenTree* Compiler::gtNewSimdSumNode(
var_types type, GenTree* op1, CorInfoType simdBaseJitType, unsigned simdSize, bool isSimdAsHWIntrinsic)
{
assert(IsBaselineSimdIsaSupportedDebugOnly());
var_types simdType = getSIMDTypeForSize(simdSize);
assert(varTypeIsSIMD(simdType));
assert(op1 != nullptr);
assert(op1->TypeIs(simdType));
var_types simdBaseType = JitType2PreciseVarType(simdBaseJitType);
assert(varTypeIsArithmetic(simdBaseType));
NamedIntrinsic intrinsic = NI_Illegal;
GenTree* tmp = nullptr;
CORINFO_CLASS_HANDLE clsHnd = gtGetStructHandleForSIMD(simdType, simdBaseJitType);
#if defined(TARGET_XARCH)
assert(!varTypeIsByte(simdBaseType) && !varTypeIsLong(simdBaseType));
// HorizontalAdd combines pairs so we need log2(vectorLength) passes to sum all elements together.
unsigned vectorLength = getSIMDVectorLength(simdSize, simdBaseType);
int haddCount = genLog2(vectorLength);
if (simdSize == 32)
{
// Minus 1 because for the last pass we split the vector to low / high and add them together.
haddCount -= 1;
if (varTypeIsFloating(simdBaseType))
{
assert(compIsaSupportedDebugOnly(InstructionSet_AVX));
intrinsic = NI_AVX_HorizontalAdd;
}
else
{
assert(compIsaSupportedDebugOnly(InstructionSet_AVX2));
intrinsic = NI_AVX2_HorizontalAdd;
}
}
else if (varTypeIsFloating(simdBaseType))
{
assert(compIsaSupportedDebugOnly(InstructionSet_SSE3));
intrinsic = NI_SSE3_HorizontalAdd;
}
else
{
assert(compIsaSupportedDebugOnly(InstructionSet_SSSE3));
intrinsic = NI_SSSE3_HorizontalAdd;
}
for (int i = 0; i < haddCount; i++)
{
op1 = impCloneExpr(op1, &tmp, clsHnd, (unsigned)CHECK_SPILL_ALL, nullptr DEBUGARG("Clone op1 for vector sum"));
op1 = gtNewSimdHWIntrinsicNode(simdType, op1, tmp, intrinsic, simdBaseJitType, simdSize, isSimdAsHWIntrinsic);
}
if (simdSize == 32)
{
intrinsic = (simdBaseType == TYP_FLOAT) ? NI_SSE_Add : NI_SSE2_Add;
op1 = impCloneExpr(op1, &tmp, clsHnd, (unsigned)CHECK_SPILL_ALL, nullptr DEBUGARG("Clone op1 for vector sum"));
op1 = gtNewSimdHWIntrinsicNode(TYP_SIMD16, op1, gtNewIconNode(0x01, TYP_INT), NI_AVX_ExtractVector128,
simdBaseJitType, simdSize, isSimdAsHWIntrinsic);
tmp = gtNewSimdHWIntrinsicNode(simdType, tmp, NI_Vector256_GetLower, simdBaseJitType, simdSize,
isSimdAsHWIntrinsic);
op1 = gtNewSimdHWIntrinsicNode(TYP_SIMD16, op1, tmp, intrinsic, simdBaseJitType, 16, isSimdAsHWIntrinsic);
}
return gtNewSimdHWIntrinsicNode(type, op1, NI_Vector128_ToScalar, simdBaseJitType, simdSize, isSimdAsHWIntrinsic);
#elif defined(TARGET_ARM64)
switch (simdBaseType)
{
case TYP_BYTE:
case TYP_UBYTE:
case TYP_SHORT:
case TYP_USHORT:
{
tmp = gtNewSimdHWIntrinsicNode(simdType, op1, NI_AdvSimd_Arm64_AddAcross, simdBaseJitType, simdSize,
isSimdAsHWIntrinsic);
return gtNewSimdHWIntrinsicNode(type, tmp, NI_Vector64_ToScalar, simdBaseJitType, 8, isSimdAsHWIntrinsic);
}
case TYP_INT:
case TYP_UINT:
{
if (simdSize == 8)
{
op1 = impCloneExpr(op1, &tmp, clsHnd, (unsigned)CHECK_SPILL_ALL,
nullptr DEBUGARG("Clone op1 for vector sum"));
tmp = gtNewSimdHWIntrinsicNode(simdType, op1, tmp, NI_AdvSimd_AddPairwise, simdBaseJitType, simdSize,
isSimdAsHWIntrinsic);
}
else
{
tmp = gtNewSimdHWIntrinsicNode(TYP_SIMD16, op1, NI_AdvSimd_Arm64_AddAcross, simdBaseJitType, 16,
isSimdAsHWIntrinsic);
}
return gtNewSimdHWIntrinsicNode(type, tmp, NI_Vector64_ToScalar, simdBaseJitType, 8, isSimdAsHWIntrinsic);
}
case TYP_FLOAT:
{
if (simdSize == 8)
{
op1 = gtNewSimdHWIntrinsicNode(TYP_SIMD8, op1, NI_AdvSimd_Arm64_AddPairwiseScalar, simdBaseJitType,
simdSize, isSimdAsHWIntrinsic);
}
else
{
unsigned vectorLength = getSIMDVectorLength(simdSize, simdBaseType);
int haddCount = genLog2(vectorLength);
for (int i = 0; i < haddCount; i++)
{
op1 = impCloneExpr(op1, &tmp, clsHnd, (unsigned)CHECK_SPILL_ALL,
nullptr DEBUGARG("Clone op1 for vector sum"));
op1 = gtNewSimdHWIntrinsicNode(simdType, op1, tmp, NI_AdvSimd_Arm64_AddPairwise, simdBaseJitType,
simdSize, isSimdAsHWIntrinsic);
}
}
return gtNewSimdHWIntrinsicNode(type, op1, NI_Vector128_ToScalar, simdBaseJitType, simdSize,
isSimdAsHWIntrinsic);
}
case TYP_DOUBLE:
case TYP_LONG:
case TYP_ULONG:
{
if (simdSize == 16)
{
op1 = gtNewSimdHWIntrinsicNode(TYP_SIMD8, op1, NI_AdvSimd_Arm64_AddPairwiseScalar, simdBaseJitType,
simdSize, isSimdAsHWIntrinsic);
}
return gtNewSimdHWIntrinsicNode(type, op1, NI_Vector64_ToScalar, simdBaseJitType, 8, isSimdAsHWIntrinsic);
}
default:
{
unreached();
}
}
#else
#error Unsupported platform
#endif // !TARGET_XARCH && !TARGET_ARM64
}
GenTree* Compiler::gtNewSimdUnOpNode(genTreeOps op,
var_types type,
GenTree* op1,
CorInfoType simdBaseJitType,
unsigned simdSize,
bool isSimdAsHWIntrinsic)
{
assert(IsBaselineSimdIsaSupportedDebugOnly());
assert(varTypeIsSIMD(type));
assert(getSIMDTypeForSize(simdSize) == type);
assert(op1 != nullptr);
assert(op1->TypeIs(type));
var_types simdBaseType = JitType2PreciseVarType(simdBaseJitType);
assert(varTypeIsArithmetic(simdBaseType));
NamedIntrinsic intrinsic = NI_Illegal;
GenTree* op2 = nullptr;
switch (op)
{
#if defined(TARGET_XARCH)
case GT_NEG:
{
if (simdSize == 32)
{
assert(compIsaSupportedDebugOnly(InstructionSet_AVX));
assert(varTypeIsFloating(simdBaseType) || compIsaSupportedDebugOnly(InstructionSet_AVX2));
}
op2 = gtNewSimdZeroNode(type, simdBaseJitType, simdSize, isSimdAsHWIntrinsic);
// Zero - op1
return gtNewSimdBinOpNode(GT_SUB, type, op2, op1, simdBaseJitType, simdSize, isSimdAsHWIntrinsic);
}
case GT_NOT:
{
assert((simdSize != 32) || compIsaSupportedDebugOnly(InstructionSet_AVX));
intrinsic = (simdSize == 32) ? NI_Vector256_get_AllBitsSet : NI_Vector128_get_AllBitsSet;
op2 = gtNewSimdHWIntrinsicNode(type, intrinsic, simdBaseJitType, simdSize, isSimdAsHWIntrinsic);
// op1 ^ AllBitsSet
return gtNewSimdBinOpNode(GT_XOR, type, op1, op2, simdBaseJitType, simdSize, isSimdAsHWIntrinsic);
}
#elif defined(TARGET_ARM64)
case GT_NEG:
{
if (varTypeIsSigned(simdBaseType))
{
if (simdBaseType == TYP_LONG)
{
intrinsic = (simdSize == 8) ? NI_AdvSimd_Arm64_NegateScalar : NI_AdvSimd_Arm64_Negate;
}
else if (simdBaseType == TYP_DOUBLE)
{
intrinsic = (simdSize == 8) ? NI_AdvSimd_NegateScalar : NI_AdvSimd_Arm64_Negate;
}
else
{
intrinsic = NI_AdvSimd_Negate;
}
return gtNewSimdHWIntrinsicNode(type, op1, intrinsic, simdBaseJitType, simdSize, isSimdAsHWIntrinsic);
}
else
{
// Zero - op1
op2 = gtNewSimdZeroNode(type, simdBaseJitType, simdSize, isSimdAsHWIntrinsic);
return gtNewSimdBinOpNode(GT_SUB, type, op2, op1, simdBaseJitType, simdSize, isSimdAsHWIntrinsic);
}
}
case GT_NOT:
{
return gtNewSimdHWIntrinsicNode(type, op1, NI_AdvSimd_Not, simdBaseJitType, simdSize, isSimdAsHWIntrinsic);
}
#else
#error Unsupported platform
#endif // !TARGET_XARCH && !TARGET_ARM64
default:
{
unreached();
}
}
}
GenTree* Compiler::gtNewSimdWidenLowerNode(
var_types type, GenTree* op1, CorInfoType simdBaseJitType, unsigned simdSize, bool isSimdAsHWIntrinsic)
{
assert(IsBaselineSimdIsaSupportedDebugOnly());
assert(varTypeIsSIMD(type));
assert(getSIMDTypeForSize(simdSize) == type);
assert(op1 != nullptr);
assert(op1->TypeIs(type));
var_types simdBaseType = JitType2PreciseVarType(simdBaseJitType);
assert(varTypeIsArithmetic(simdBaseType) && !varTypeIsLong(simdBaseType));
NamedIntrinsic intrinsic = NI_Illegal;
GenTree* tmp1;
#if defined(TARGET_XARCH)
if (simdSize == 32)
{
assert(compIsaSupportedDebugOnly(InstructionSet_AVX));
assert(!varTypeIsIntegral(simdBaseType) || compIsaSupportedDebugOnly(InstructionSet_AVX2));
tmp1 =
gtNewSimdHWIntrinsicNode(type, op1, NI_Vector256_GetLower, simdBaseJitType, simdSize, isSimdAsHWIntrinsic);
switch (simdBaseType)
{
case TYP_BYTE:
case TYP_UBYTE:
{
intrinsic = NI_AVX2_ConvertToVector256Int16;
break;
}
case TYP_SHORT:
case TYP_USHORT:
{
intrinsic = NI_AVX2_ConvertToVector256Int32;
break;
}
case TYP_INT:
case TYP_UINT:
{
intrinsic = NI_AVX2_ConvertToVector256Int64;
break;
}
case TYP_FLOAT:
{
intrinsic = NI_AVX_ConvertToVector256Double;
break;
}
default:
{
unreached();
}
}
assert(intrinsic != NI_Illegal);
return gtNewSimdHWIntrinsicNode(type, tmp1, intrinsic, simdBaseJitType, simdSize, isSimdAsHWIntrinsic);
}
else if ((simdBaseType == TYP_FLOAT) || compOpportunisticallyDependsOn(InstructionSet_SSE41))
{
switch (simdBaseType)
{
case TYP_BYTE:
case TYP_UBYTE:
{
intrinsic = NI_SSE41_ConvertToVector128Int16;
break;
}
case TYP_SHORT:
case TYP_USHORT:
{
intrinsic = NI_SSE41_ConvertToVector128Int32;
break;
}
case TYP_INT:
case TYP_UINT:
{
intrinsic = NI_SSE41_ConvertToVector128Int64;
break;
}
case TYP_FLOAT:
{
intrinsic = NI_SSE2_ConvertToVector128Double;
break;
}
default:
{
unreached();
}
}
assert(intrinsic != NI_Illegal);
return gtNewSimdHWIntrinsicNode(type, op1, intrinsic, simdBaseJitType, simdSize, isSimdAsHWIntrinsic);
}
else
{
tmp1 = gtNewSimdZeroNode(type, simdBaseJitType, simdSize, isSimdAsHWIntrinsic);
if (varTypeIsSigned(simdBaseType))
{
CORINFO_CLASS_HANDLE clsHnd = gtGetStructHandleForSIMD(type, simdBaseJitType);
GenTree* op1Dup;
op1 = impCloneExpr(op1, &op1Dup, clsHnd, (unsigned)CHECK_SPILL_ALL,
nullptr DEBUGARG("Clone op1 for vector widen lower"));
tmp1 = gtNewSimdHWIntrinsicNode(type, op1, tmp1, NI_SSE2_CompareLessThan, simdBaseJitType, simdSize,
isSimdAsHWIntrinsic);
op1 = op1Dup;
}
return gtNewSimdHWIntrinsicNode(type, op1, tmp1, NI_SSE2_UnpackLow, simdBaseJitType, simdSize,
isSimdAsHWIntrinsic);
}
#elif defined(TARGET_ARM64)
if (simdSize == 16)
{
tmp1 = gtNewSimdHWIntrinsicNode(TYP_SIMD8, op1, NI_Vector128_GetLower, simdBaseJitType, simdSize,
isSimdAsHWIntrinsic);
}
else
{
assert(simdSize == 8);
tmp1 = op1;
}
if (varTypeIsFloating(simdBaseType))
{
assert(simdBaseType == TYP_FLOAT);
intrinsic = NI_AdvSimd_Arm64_ConvertToDouble;
}
else if (varTypeIsSigned(simdBaseType))
{
intrinsic = NI_AdvSimd_SignExtendWideningLower;
}
else
{
intrinsic = NI_AdvSimd_ZeroExtendWideningLower;
}
assert(intrinsic != NI_Illegal);
tmp1 = gtNewSimdHWIntrinsicNode(type, tmp1, intrinsic, simdBaseJitType, 8, isSimdAsHWIntrinsic);
if (simdSize == 8)
{
tmp1 = gtNewSimdHWIntrinsicNode(type, tmp1, NI_Vector128_GetLower, simdBaseJitType, 16, isSimdAsHWIntrinsic);
}
return tmp1;
#else
#error Unsupported platform
#endif // !TARGET_XARCH && !TARGET_ARM64
}
GenTree* Compiler::gtNewSimdWidenUpperNode(
var_types type, GenTree* op1, CorInfoType simdBaseJitType, unsigned simdSize, bool isSimdAsHWIntrinsic)
{
assert(IsBaselineSimdIsaSupportedDebugOnly());
assert(varTypeIsSIMD(type));
assert(getSIMDTypeForSize(simdSize) == type);
assert(op1 != nullptr);
assert(op1->TypeIs(type));
var_types simdBaseType = JitType2PreciseVarType(simdBaseJitType);
assert(varTypeIsArithmetic(simdBaseType) && !varTypeIsLong(simdBaseType));
NamedIntrinsic intrinsic = NI_Illegal;
GenTree* tmp1;
#if defined(TARGET_XARCH)
if (simdSize == 32)
{
assert(compIsaSupportedDebugOnly(InstructionSet_AVX));
assert(!varTypeIsIntegral(simdBaseType) || compIsaSupportedDebugOnly(InstructionSet_AVX2));
tmp1 = gtNewSimdHWIntrinsicNode(type, op1, gtNewIconNode(1), NI_AVX_ExtractVector128, simdBaseJitType, simdSize,
isSimdAsHWIntrinsic);
switch (simdBaseType)
{
case TYP_BYTE:
case TYP_UBYTE:
{
intrinsic = NI_AVX2_ConvertToVector256Int16;
break;
}
case TYP_SHORT:
case TYP_USHORT:
{
intrinsic = NI_AVX2_ConvertToVector256Int32;
break;
}
case TYP_INT:
case TYP_UINT:
{
intrinsic = NI_AVX2_ConvertToVector256Int64;
break;
}
case TYP_FLOAT:
{
intrinsic = NI_AVX_ConvertToVector256Double;
break;
}
default:
{
unreached();
}
}
assert(intrinsic != NI_Illegal);
return gtNewSimdHWIntrinsicNode(type, tmp1, intrinsic, simdBaseJitType, simdSize, isSimdAsHWIntrinsic);
}
else if (varTypeIsFloating(simdBaseType))
{
assert(simdBaseType == TYP_FLOAT);
CORINFO_CLASS_HANDLE clsHnd = gtGetStructHandleForSIMD(type, simdBaseJitType);
GenTree* op1Dup;
op1 = impCloneExpr(op1, &op1Dup, clsHnd, (unsigned)CHECK_SPILL_ALL,
nullptr DEBUGARG("Clone op1 for vector widen upper"));
tmp1 = gtNewSimdHWIntrinsicNode(type, op1, op1Dup, NI_SSE_MoveHighToLow, simdBaseJitType, simdSize,
isSimdAsHWIntrinsic);
return gtNewSimdHWIntrinsicNode(type, tmp1, NI_SSE2_ConvertToVector128Double, simdBaseJitType, simdSize,
isSimdAsHWIntrinsic);
}
else if (compOpportunisticallyDependsOn(InstructionSet_SSE41))
{
tmp1 = gtNewSimdHWIntrinsicNode(type, op1, gtNewIconNode(8), NI_SSE2_ShiftRightLogical128BitLane,
simdBaseJitType, simdSize, isSimdAsHWIntrinsic);
switch (simdBaseType)
{
case TYP_BYTE:
case TYP_UBYTE:
{
intrinsic = NI_SSE41_ConvertToVector128Int16;
break;
}
case TYP_SHORT:
case TYP_USHORT:
{
intrinsic = NI_SSE41_ConvertToVector128Int32;
break;
}
case TYP_INT:
case TYP_UINT:
{
intrinsic = NI_SSE41_ConvertToVector128Int64;
break;
}
default:
{
unreached();
}
}
assert(intrinsic != NI_Illegal);
return gtNewSimdHWIntrinsicNode(type, tmp1, intrinsic, simdBaseJitType, simdSize, isSimdAsHWIntrinsic);
}
else
{
tmp1 = gtNewSimdZeroNode(type, simdBaseJitType, simdSize, isSimdAsHWIntrinsic);
if (varTypeIsSigned(simdBaseType))
{
CORINFO_CLASS_HANDLE clsHnd = gtGetStructHandleForSIMD(type, simdBaseJitType);
GenTree* op1Dup;
op1 = impCloneExpr(op1, &op1Dup, clsHnd, (unsigned)CHECK_SPILL_ALL,
nullptr DEBUGARG("Clone op1 for vector widen upper"));
tmp1 = gtNewSimdHWIntrinsicNode(type, op1, tmp1, NI_SSE2_CompareLessThan, simdBaseJitType, simdSize,
isSimdAsHWIntrinsic);
op1 = op1Dup;
}
return gtNewSimdHWIntrinsicNode(type, op1, tmp1, NI_SSE2_UnpackHigh, simdBaseJitType, simdSize,
isSimdAsHWIntrinsic);
}
#elif defined(TARGET_ARM64)
GenTree* zero;
if (simdSize == 16)
{
if (varTypeIsFloating(simdBaseType))
{
assert(simdBaseType == TYP_FLOAT);
intrinsic = NI_AdvSimd_Arm64_ConvertToDoubleUpper;
}
else if (varTypeIsSigned(simdBaseType))
{
intrinsic = NI_AdvSimd_SignExtendWideningUpper;
}
else
{
intrinsic = NI_AdvSimd_ZeroExtendWideningUpper;
}
assert(intrinsic != NI_Illegal);
return gtNewSimdHWIntrinsicNode(type, op1, intrinsic, simdBaseJitType, simdSize, isSimdAsHWIntrinsic);
}
else
{
assert(simdSize == 8);
ssize_t index = 8 / genTypeSize(simdBaseType);
if (varTypeIsFloating(simdBaseType))
{
assert(simdBaseType == TYP_FLOAT);
intrinsic = NI_AdvSimd_Arm64_ConvertToDouble;
}
else if (varTypeIsSigned(simdBaseType))
{
intrinsic = NI_AdvSimd_SignExtendWideningLower;
}
else
{
intrinsic = NI_AdvSimd_ZeroExtendWideningLower;
}
assert(intrinsic != NI_Illegal);
tmp1 = gtNewSimdHWIntrinsicNode(TYP_SIMD16, op1, intrinsic, simdBaseJitType, simdSize, isSimdAsHWIntrinsic);
zero = gtNewSimdZeroNode(TYP_SIMD16, simdBaseJitType, 16, isSimdAsHWIntrinsic);
tmp1 = gtNewSimdHWIntrinsicNode(TYP_SIMD16, tmp1, zero, gtNewIconNode(index), NI_AdvSimd_ExtractVector128,
simdBaseJitType, 16, isSimdAsHWIntrinsic);
return gtNewSimdHWIntrinsicNode(type, tmp1, NI_Vector128_GetLower, simdBaseJitType, simdSize,
isSimdAsHWIntrinsic);
}
#else
#error Unsupported platform
#endif // !TARGET_XARCH && !TARGET_ARM64
}
GenTree* Compiler::gtNewSimdWithElementNode(var_types type,
GenTree* op1,
GenTree* op2,
GenTree* op3,
CorInfoType simdBaseJitType,
unsigned simdSize,
bool isSimdAsHWIntrinsic)
{
NamedIntrinsic hwIntrinsicID = NI_Vector128_WithElement;
var_types simdBaseType = JitType2PreciseVarType(simdBaseJitType);
assert(varTypeIsArithmetic(simdBaseType));
assert(op2->IsCnsIntOrI());
ssize_t imm8 = op2->AsIntCon()->IconValue();
ssize_t count = simdSize / genTypeSize(simdBaseType);
assert((0 <= imm8) && (imm8 < count));
#if defined(TARGET_XARCH)
switch (simdBaseType)
{
// Using software fallback if simdBaseType is not supported by hardware
case TYP_BYTE:
case TYP_UBYTE:
case TYP_INT:
case TYP_UINT:
assert(compIsaSupportedDebugOnly(InstructionSet_SSE41));
break;
case TYP_LONG:
case TYP_ULONG:
assert(compIsaSupportedDebugOnly(InstructionSet_SSE41_X64));
break;
case TYP_DOUBLE:
case TYP_FLOAT:
case TYP_SHORT:
case TYP_USHORT:
assert(compIsaSupportedDebugOnly(InstructionSet_SSE2));
break;
default:
unreached();
}
if (simdSize == 32)
{
hwIntrinsicID = NI_Vector256_WithElement;
}
#elif defined(TARGET_ARM64)
switch (simdBaseType)
{
case TYP_LONG:
case TYP_ULONG:
case TYP_DOUBLE:
if (simdSize == 8)
{
return gtNewSimdHWIntrinsicNode(type, op3, NI_Vector64_Create, simdBaseJitType, simdSize,
isSimdAsHWIntrinsic);
}
break;
case TYP_FLOAT:
case TYP_BYTE:
case TYP_UBYTE:
case TYP_SHORT:
case TYP_USHORT:
case TYP_INT:
case TYP_UINT:
break;
default:
unreached();
}
hwIntrinsicID = NI_AdvSimd_Insert;
#else
#error Unsupported platform
#endif // !TARGET_XARCH && !TARGET_ARM64
return gtNewSimdHWIntrinsicNode(type, op1, op2, op3, hwIntrinsicID, simdBaseJitType, simdSize, isSimdAsHWIntrinsic);
}
GenTree* Compiler::gtNewSimdZeroNode(var_types type,
CorInfoType simdBaseJitType,
unsigned simdSize,
bool isSimdAsHWIntrinsic)
{
assert(IsBaselineSimdIsaSupportedDebugOnly());
assert(varTypeIsSIMD(type));
assert(getSIMDTypeForSize(simdSize) == type);
var_types simdBaseType = JitType2PreciseVarType(simdBaseJitType);
assert(varTypeIsArithmetic(simdBaseType));
NamedIntrinsic intrinsic = NI_Illegal;
#if defined(TARGET_XARCH)
intrinsic = (simdSize == 32) ? NI_Vector256_get_Zero : NI_Vector128_get_Zero;
#elif defined(TARGET_ARM64)
intrinsic = (simdSize > 8) ? NI_Vector128_get_Zero : NI_Vector64_get_Zero;
#else
#error Unsupported platform
#endif // !TARGET_XARCH && !TARGET_ARM64
return gtNewSimdHWIntrinsicNode(type, intrinsic, simdBaseJitType, simdSize, isSimdAsHWIntrinsic);
}
GenTreeHWIntrinsic* Compiler::gtNewScalarHWIntrinsicNode(var_types type, NamedIntrinsic hwIntrinsicID)
{
return new (this, GT_HWINTRINSIC) GenTreeHWIntrinsic(type, getAllocator(CMK_ASTNode), hwIntrinsicID,
CORINFO_TYPE_UNDEF, 0, /* isSimdAsHWIntrinsic */ false);
}
GenTreeHWIntrinsic* Compiler::gtNewScalarHWIntrinsicNode(var_types type, GenTree* op1, NamedIntrinsic hwIntrinsicID)
{
SetOpLclRelatedToSIMDIntrinsic(op1);
return new (this, GT_HWINTRINSIC) GenTreeHWIntrinsic(type, getAllocator(CMK_ASTNode), hwIntrinsicID,
CORINFO_TYPE_UNDEF, 0, /* isSimdAsHWIntrinsic */ false, op1);
}
GenTreeHWIntrinsic* Compiler::gtNewScalarHWIntrinsicNode(var_types type,
GenTree* op1,
GenTree* op2,
NamedIntrinsic hwIntrinsicID)
{
SetOpLclRelatedToSIMDIntrinsic(op1);
SetOpLclRelatedToSIMDIntrinsic(op2);
return new (this, GT_HWINTRINSIC)
GenTreeHWIntrinsic(type, getAllocator(CMK_ASTNode), hwIntrinsicID, CORINFO_TYPE_UNDEF, 0,
/* isSimdAsHWIntrinsic */ false, op1, op2);
}
GenTreeHWIntrinsic* Compiler::gtNewScalarHWIntrinsicNode(
var_types type, GenTree* op1, GenTree* op2, GenTree* op3, NamedIntrinsic hwIntrinsicID)
{
SetOpLclRelatedToSIMDIntrinsic(op1);
SetOpLclRelatedToSIMDIntrinsic(op2);
SetOpLclRelatedToSIMDIntrinsic(op3);
return new (this, GT_HWINTRINSIC)
GenTreeHWIntrinsic(type, getAllocator(CMK_ASTNode), hwIntrinsicID, CORINFO_TYPE_UNDEF, 0,
/* isSimdAsHWIntrinsic */ false, op1, op2, op3);
}
// Returns true for the HW Intrinsic instructions that have MemoryLoad semantics, false otherwise
bool GenTreeHWIntrinsic::OperIsMemoryLoad() const
{
#if defined(TARGET_XARCH) || defined(TARGET_ARM64)
NamedIntrinsic intrinsicId = GetHWIntrinsicId();
HWIntrinsicCategory category = HWIntrinsicInfo::lookupCategory(intrinsicId);
if (category == HW_Category_MemoryLoad)
{
return true;
}
#ifdef TARGET_XARCH
else if (HWIntrinsicInfo::MaybeMemoryLoad(GetHWIntrinsicId()))
{
// Some intrinsics (without HW_Category_MemoryLoad) also have MemoryLoad semantics
// This is generally because they have both vector and pointer overloads, e.g.,
// * Vector128<byte> BroadcastScalarToVector128(Vector128<byte> value)
// * Vector128<byte> BroadcastScalarToVector128(byte* source)
// So, we need to check the argument's type is memory-reference or Vector128
if ((category == HW_Category_SimpleSIMD) || (category == HW_Category_SIMDScalar))
{
assert(GetOperandCount() == 1);
switch (intrinsicId)
{
case NI_SSE41_ConvertToVector128Int16:
case NI_SSE41_ConvertToVector128Int32:
case NI_SSE41_ConvertToVector128Int64:
case NI_AVX2_BroadcastScalarToVector128:
case NI_AVX2_BroadcastScalarToVector256:
case NI_AVX2_ConvertToVector256Int16:
case NI_AVX2_ConvertToVector256Int32:
case NI_AVX2_ConvertToVector256Int64:
{
CorInfoType auxiliaryType = GetAuxiliaryJitType();
if (auxiliaryType == CORINFO_TYPE_PTR)
{
return true;
}
assert(auxiliaryType == CORINFO_TYPE_UNDEF);
return false;
}
default:
{
unreached();
}
}
}
else if (category == HW_Category_IMM)
{
// Do we have less than 3 operands?
if (GetOperandCount() < 3)
{
return false;
}
else if (HWIntrinsicInfo::isAVX2GatherIntrinsic(GetHWIntrinsicId()))
{
return true;
}
}
}
#endif // TARGET_XARCH
#endif // TARGET_XARCH || TARGET_ARM64
return false;
}
// Returns true for the HW Intrinsic instructions that have MemoryStore semantics, false otherwise
bool GenTreeHWIntrinsic::OperIsMemoryStore() const
{
#if defined(TARGET_XARCH) || defined(TARGET_ARM64)
HWIntrinsicCategory category = HWIntrinsicInfo::lookupCategory(GetHWIntrinsicId());
if (category == HW_Category_MemoryStore)
{
return true;
}
#ifdef TARGET_XARCH
else if (HWIntrinsicInfo::MaybeMemoryStore(GetHWIntrinsicId()) &&
(category == HW_Category_IMM || category == HW_Category_Scalar))
{
// Some intrinsics (without HW_Category_MemoryStore) also have MemoryStore semantics
// Bmi2/Bmi2.X64.MultiplyNoFlags may return the lower half result by a out argument
// unsafe ulong MultiplyNoFlags(ulong left, ulong right, ulong* low)
//
// So, the 3-argument form is MemoryStore
if (GetOperandCount() == 3)
{
switch (GetHWIntrinsicId())
{
case NI_BMI2_MultiplyNoFlags:
case NI_BMI2_X64_MultiplyNoFlags:
return true;
default:
return false;
}
}
}
#endif // TARGET_XARCH
#endif // TARGET_XARCH || TARGET_ARM64
return false;
}
// Returns true for the HW Intrinsic instructions that have MemoryLoad or MemoryStore semantics, false otherwise
bool GenTreeHWIntrinsic::OperIsMemoryLoadOrStore() const
{
#if defined(TARGET_XARCH) || defined(TARGET_ARM64)
return OperIsMemoryLoad() || OperIsMemoryStore();
#else
return false;
#endif
}
NamedIntrinsic GenTreeHWIntrinsic::GetHWIntrinsicId() const
{
NamedIntrinsic id = gtHWIntrinsicId;
int numArgs = HWIntrinsicInfo::lookupNumArgs(id);
bool numArgsUnknown = numArgs < 0;
assert((static_cast<size_t>(numArgs) == GetOperandCount()) || numArgsUnknown);
return id;
}
void GenTreeHWIntrinsic::SetHWIntrinsicId(NamedIntrinsic intrinsicId)
{
#ifdef DEBUG
size_t oldOperandCount = GetOperandCount();
int newOperandCount = HWIntrinsicInfo::lookupNumArgs(intrinsicId);
bool newCountUnknown = newOperandCount < 0;
// We'll choose to trust the programmer here.
assert((oldOperandCount == static_cast<size_t>(newOperandCount)) || newCountUnknown);
#endif // DEBUG
gtHWIntrinsicId = intrinsicId;
}
// TODO-Review: why are layouts not compared here?
/* static */ bool GenTreeHWIntrinsic::Equals(GenTreeHWIntrinsic* op1, GenTreeHWIntrinsic* op2)
{
return (op1->TypeGet() == op2->TypeGet()) && (op1->GetHWIntrinsicId() == op2->GetHWIntrinsicId()) &&
(op1->GetSimdBaseType() == op2->GetSimdBaseType()) && (op1->GetSimdSize() == op2->GetSimdSize()) &&
(op1->GetAuxiliaryType() == op2->GetAuxiliaryType()) && (op1->GetOtherReg() == op2->GetOtherReg()) &&
OperandsAreEqual(op1, op2);
}
#endif // FEATURE_HW_INTRINSICS
//---------------------------------------------------------------------------------------
// gtNewMustThrowException:
// create a throw node (calling into JIT helper) that must be thrown.
// The result would be a comma node: COMMA(jithelperthrow(void), x) where x's type should be specified.
//
// Arguments
// helper - JIT helper ID
// type - return type of the node
//
// Return Value
// pointer to the throw node
//
GenTree* Compiler::gtNewMustThrowException(unsigned helper, var_types type, CORINFO_CLASS_HANDLE clsHnd)
{
GenTreeCall* node = gtNewHelperCallNode(helper, TYP_VOID);
node->gtCallMoreFlags |= GTF_CALL_M_DOES_NOT_RETURN;
if (type != TYP_VOID)
{
unsigned dummyTemp = lvaGrabTemp(true DEBUGARG("dummy temp of must thrown exception"));
if (type == TYP_STRUCT)
{
lvaSetStruct(dummyTemp, clsHnd, false);
type = lvaTable[dummyTemp].lvType; // struct type is normalized
}
else
{
lvaTable[dummyTemp].lvType = type;
}
GenTree* dummyNode = gtNewLclvNode(dummyTemp, type);
return gtNewOperNode(GT_COMMA, type, node, dummyNode);
}
return node;
}
//---------------------------------------------------------------------------------------
// InitializeStructReturnType:
// Initialize the Return Type Descriptor for a method that returns a struct type
//
// Arguments
// comp - Compiler Instance
// retClsHnd - VM handle to the struct type returned by the method
//
// Return Value
// None
//
void ReturnTypeDesc::InitializeStructReturnType(Compiler* comp,
CORINFO_CLASS_HANDLE retClsHnd,
CorInfoCallConvExtension callConv)
{
assert(!m_inited);
#if FEATURE_MULTIREG_RET
assert(retClsHnd != NO_CLASS_HANDLE);
unsigned structSize = comp->info.compCompHnd->getClassSize(retClsHnd);
Compiler::structPassingKind howToReturnStruct;
var_types returnType = comp->getReturnTypeForStruct(retClsHnd, callConv, &howToReturnStruct, structSize);
switch (howToReturnStruct)
{
case Compiler::SPK_EnclosingType:
m_isEnclosingType = true;
FALLTHROUGH;
case Compiler::SPK_PrimitiveType:
{
assert(returnType != TYP_UNKNOWN);
assert(returnType != TYP_STRUCT);
m_regType[0] = returnType;
break;
}
case Compiler::SPK_ByValueAsHfa:
{
assert(varTypeIsStruct(returnType));
var_types hfaType = comp->GetHfaType(retClsHnd);
// We should have an hfa struct type
assert(varTypeIsValidHfaType(hfaType));
// Note that the retail build issues a warning about a potential divsion by zero without this Max function
unsigned elemSize = Max((unsigned)1, EA_SIZE_IN_BYTES(emitActualTypeSize(hfaType)));
// The size of this struct should be evenly divisible by elemSize
assert((structSize % elemSize) == 0);
unsigned hfaCount = (structSize / elemSize);
for (unsigned i = 0; i < hfaCount; ++i)
{
m_regType[i] = hfaType;
}
if (comp->compFloatingPointUsed == false)
{
comp->compFloatingPointUsed = true;
}
break;
}
case Compiler::SPK_ByValue:
{
assert(varTypeIsStruct(returnType));
#ifdef UNIX_AMD64_ABI
SYSTEMV_AMD64_CORINFO_STRUCT_REG_PASSING_DESCRIPTOR structDesc;
comp->eeGetSystemVAmd64PassStructInRegisterDescriptor(retClsHnd, &structDesc);
assert(structDesc.passedInRegisters);
for (int i = 0; i < structDesc.eightByteCount; i++)
{
assert(i < MAX_RET_REG_COUNT);
m_regType[i] = comp->GetEightByteType(structDesc, i);
}
#elif defined(TARGET_ARM64)
// a non-HFA struct returned using two registers
//
assert((structSize > TARGET_POINTER_SIZE) && (structSize <= (2 * TARGET_POINTER_SIZE)));
BYTE gcPtrs[2] = {TYPE_GC_NONE, TYPE_GC_NONE};
comp->info.compCompHnd->getClassGClayout(retClsHnd, &gcPtrs[0]);
for (unsigned i = 0; i < 2; ++i)
{
m_regType[i] = comp->getJitGCType(gcPtrs[i]);
}
#elif defined(TARGET_X86)
// an 8-byte struct returned using two registers
assert(structSize == 8);
BYTE gcPtrs[2] = {TYPE_GC_NONE, TYPE_GC_NONE};
comp->info.compCompHnd->getClassGClayout(retClsHnd, &gcPtrs[0]);
for (unsigned i = 0; i < 2; ++i)
{
m_regType[i] = comp->getJitGCType(gcPtrs[i]);
}
#else // TARGET_XXX
// This target needs support here!
//
NYI("Unsupported TARGET returning a TYP_STRUCT in InitializeStructReturnType");
#endif // UNIX_AMD64_ABI
break; // for case SPK_ByValue
}
case Compiler::SPK_ByReference:
// We are returning using the return buffer argument
// There are no return registers
break;
default:
unreached(); // By the contract of getReturnTypeForStruct we should never get here.
} // end of switch (howToReturnStruct)
#endif // FEATURE_MULTIREG_RET
#ifdef DEBUG
m_inited = true;
#endif
}
//---------------------------------------------------------------------------------------
// InitializeLongReturnType:
// Initialize the Return Type Descriptor for a method that returns a TYP_LONG
//
void ReturnTypeDesc::InitializeLongReturnType()
{
assert(!m_inited);
#if defined(TARGET_X86) || defined(TARGET_ARM)
// Setups up a ReturnTypeDesc for returning a long using two registers
//
assert(MAX_RET_REG_COUNT >= 2);
m_regType[0] = TYP_INT;
m_regType[1] = TYP_INT;
#else // not (TARGET_X86 or TARGET_ARM)
m_regType[0] = TYP_LONG;
#endif // TARGET_X86 or TARGET_ARM
#ifdef DEBUG
m_inited = true;
#endif
}
//-------------------------------------------------------------------
// GetABIReturnReg: Return i'th return register as per target ABI
//
// Arguments:
// idx - Index of the return register.
// The first return register has an index of 0 and so on.
//
// Return Value:
// Returns i'th return register as per target ABI.
//
// Notes:
// x86 and ARM return long in multiple registers.
// ARM and ARM64 return HFA struct in multiple registers.
//
regNumber ReturnTypeDesc::GetABIReturnReg(unsigned idx) const
{
unsigned count = GetReturnRegCount();
assert(idx < count);
regNumber resultReg = REG_NA;
#ifdef UNIX_AMD64_ABI
var_types regType0 = GetReturnRegType(0);
if (idx == 0)
{
if (varTypeIsIntegralOrI(regType0))
{
resultReg = REG_INTRET;
}
else
{
noway_assert(varTypeUsesFloatReg(regType0));
resultReg = REG_FLOATRET;
}
}
else if (idx == 1)
{
var_types regType1 = GetReturnRegType(1);
if (varTypeIsIntegralOrI(regType1))
{
if (varTypeIsIntegralOrI(regType0))
{
resultReg = REG_INTRET_1;
}
else
{
resultReg = REG_INTRET;
}
}
else
{
noway_assert(varTypeUsesFloatReg(regType1));
if (varTypeUsesFloatReg(regType0))
{
resultReg = REG_FLOATRET_1;
}
else
{
resultReg = REG_FLOATRET;
}
}
}
#elif defined(TARGET_X86)
if (idx == 0)
{
resultReg = REG_LNGRET_LO;
}
else if (idx == 1)
{
resultReg = REG_LNGRET_HI;
}
#elif defined(TARGET_ARM)
var_types regType = GetReturnRegType(idx);
if (varTypeIsIntegralOrI(regType))
{
// Ints are returned in one return register.
// Longs are returned in two return registers.
if (idx == 0)
{
resultReg = REG_LNGRET_LO;
}
else if (idx == 1)
{
resultReg = REG_LNGRET_HI;
}
}
else
{
// Floats are returned in one return register (f0).
// Doubles are returned in one return register (d0).
// Structs are returned in four registers with HFAs.
assert(idx < MAX_RET_REG_COUNT); // Up to 4 return registers for HFA's
if (regType == TYP_DOUBLE)
{
resultReg = (regNumber)((unsigned)(REG_FLOATRET) + idx * 2); // d0, d1, d2 or d3
}
else
{
resultReg = (regNumber)((unsigned)(REG_FLOATRET) + idx); // f0, f1, f2 or f3
}
}
#elif defined(TARGET_ARM64)
var_types regType = GetReturnRegType(idx);
if (varTypeIsIntegralOrI(regType))
{
noway_assert(idx < 2); // Up to 2 return registers for 16-byte structs
resultReg = (idx == 0) ? REG_INTRET : REG_INTRET_1; // X0 or X1
}
else
{
noway_assert(idx < 4); // Up to 4 return registers for HFA's
resultReg = (regNumber)((unsigned)(REG_FLOATRET) + idx); // V0, V1, V2 or V3
}
#endif // TARGET_XXX
assert(resultReg != REG_NA);
return resultReg;
}
//--------------------------------------------------------------------------------
// GetABIReturnRegs: get the mask of return registers as per target arch ABI.
//
// Arguments:
// None
//
// Return Value:
// reg mask of return registers in which the return type is returned.
//
// Note:
// This routine can be used when the caller is not particular about the order
// of return registers and wants to know the set of return registers.
//
// static
regMaskTP ReturnTypeDesc::GetABIReturnRegs() const
{
regMaskTP resultMask = RBM_NONE;
unsigned count = GetReturnRegCount();
for (unsigned i = 0; i < count; ++i)
{
resultMask |= genRegMask(GetABIReturnReg(i));
}
return resultMask;
}
//------------------------------------------------------------------------
// The following functions manage the gtRsvdRegs set of temporary registers
// created by LSRA during code generation.
//------------------------------------------------------------------------
// AvailableTempRegCount: return the number of available temporary registers in the (optional) given set
// (typically, RBM_ALLINT or RBM_ALLFLOAT).
//
// Arguments:
// mask - (optional) Check for available temporary registers only in this set.
//
// Return Value:
// Count of available temporary registers in given set.
//
unsigned GenTree::AvailableTempRegCount(regMaskTP mask /* = (regMaskTP)-1 */) const
{
return genCountBits(gtRsvdRegs & mask);
}
//------------------------------------------------------------------------
// GetSingleTempReg: There is expected to be exactly one available temporary register
// in the given mask in the gtRsvdRegs set. Get that register. No future calls to get
// a temporary register are expected. Removes the register from the set, but only in
// DEBUG to avoid doing unnecessary work in non-DEBUG builds.
//
// Arguments:
// mask - (optional) Get an available temporary register only in this set.
//
// Return Value:
// Available temporary register in given mask.
//
regNumber GenTree::GetSingleTempReg(regMaskTP mask /* = (regMaskTP)-1 */)
{
regMaskTP availableSet = gtRsvdRegs & mask;
assert(genCountBits(availableSet) == 1);
regNumber tempReg = genRegNumFromMask(availableSet);
INDEBUG(gtRsvdRegs &= ~availableSet;) // Remove the register from the set, so it can't be used again.
return tempReg;
}
//------------------------------------------------------------------------
// ExtractTempReg: Find the lowest number temporary register from the gtRsvdRegs set
// that is also in the optional given mask (typically, RBM_ALLINT or RBM_ALLFLOAT),
// and return it. Remove this register from the temporary register set, so it won't
// be returned again.
//
// Arguments:
// mask - (optional) Extract an available temporary register only in this set.
//
// Return Value:
// Available temporary register in given mask.
//
regNumber GenTree::ExtractTempReg(regMaskTP mask /* = (regMaskTP)-1 */)
{
regMaskTP availableSet = gtRsvdRegs & mask;
assert(genCountBits(availableSet) >= 1);
regMaskTP tempRegMask = genFindLowestBit(availableSet);
gtRsvdRegs &= ~tempRegMask;
return genRegNumFromMask(tempRegMask);
}
//------------------------------------------------------------------------
// GetLclOffs: if `this` is a field or a field address it returns offset
// of the field inside the struct, for not a field it returns 0.
//
// Return Value:
// The offset value.
//
uint16_t GenTreeLclVarCommon::GetLclOffs() const
{
if (OperIsLocalField())
{
return AsLclFld()->GetLclOffs();
}
else
{
return 0;
}
}
#if defined(TARGET_XARCH) && defined(FEATURE_HW_INTRINSICS)
//------------------------------------------------------------------------
// GetResultOpNumForFMA: check if the result is written into one of the operands.
// In the case that none of the operand is overwritten, check if any of them is lastUse.
//
// Return Value:
// The operand number overwritten or lastUse. 0 is the default value, where the result is written into
// a destination that is not one of the source operands and there is no last use op.
//
unsigned GenTreeHWIntrinsic::GetResultOpNumForFMA(GenTree* use, GenTree* op1, GenTree* op2, GenTree* op3)
{
// only FMA intrinsic node should call into this function
assert(HWIntrinsicInfo::lookupIsa(gtHWIntrinsicId) == InstructionSet_FMA);
if (use != nullptr && use->OperIs(GT_STORE_LCL_VAR))
{
// For store_lcl_var, check if any op is overwritten
GenTreeLclVarCommon* overwritten = use->AsLclVarCommon();
unsigned overwrittenLclNum = overwritten->GetLclNum();
if (op1->IsLocal() && op1->AsLclVarCommon()->GetLclNum() == overwrittenLclNum)
{
return 1;
}
else if (op2->IsLocal() && op2->AsLclVarCommon()->GetLclNum() == overwrittenLclNum)
{
return 2;
}
else if (op3->IsLocal() && op3->AsLclVarCommon()->GetLclNum() == overwrittenLclNum)
{
return 3;
}
}
// If no overwritten op, check if there is any last use op
// https://github.com/dotnet/runtime/issues/62215
if (op1->OperIs(GT_LCL_VAR) && op1->IsLastUse(0))
return 1;
else if (op2->OperIs(GT_LCL_VAR) && op2->IsLastUse(0))
return 2;
else if (op3->OperIs(GT_LCL_VAR) && op3->IsLastUse(0))
return 3;
return 0;
}
#endif // TARGET_XARCH && FEATURE_HW_INTRINSICS
#ifdef TARGET_ARM
//------------------------------------------------------------------------
// IsOffsetMisaligned: check if the field needs a special handling on arm.
//
// Return Value:
// true if it is a float field with a misaligned offset, false otherwise.
//
bool GenTreeLclFld::IsOffsetMisaligned() const
{
if (varTypeIsFloating(gtType))
{
return ((m_lclOffs % emitTypeSize(TYP_FLOAT)) != 0);
}
return false;
}
#endif // TARGET_ARM
bool GenTree::IsInvariant() const
{
return OperIsConst() || Compiler::impIsAddressInLocal(this);
}
//------------------------------------------------------------------------
// IsNeverNegative: returns true if the given tree is known to be never
// negative, i. e. the upper bit will always be zero.
// Only valid for integral types.
//
// Arguments:
// comp - Compiler object, needed for IntegralRange::ForNode
//
// Return Value:
// true if the given tree is known to be never negative
//
bool GenTree::IsNeverNegative(Compiler* comp) const
{
assert(varTypeIsIntegral(this));
if (IsIntegralConst())
{
return AsIntConCommon()->IntegralValue() >= 0;
}
// TODO-Casts: extend IntegralRange to handle constants
return IntegralRange::ForNode((GenTree*)this, comp).IsPositive();
}
| // Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
/*XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XX XX
XX GenTree XX
XX XX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
*/
#include "jitpch.h"
#include "hwintrinsic.h"
#include "simd.h"
#ifdef _MSC_VER
#pragma hdrstop
#endif
/*****************************************************************************/
const unsigned char GenTree::gtOperKindTable[] = {
#define GTNODE(en, st, cm, ok) ((ok)>K_MASK) + GTK_COMMUTE *cm,
#include "gtlist.h"
};
#ifdef DEBUG
const GenTreeDebugOperKind GenTree::gtDebugOperKindTable[] = {
#define GTNODE(en, st, cm, ok) static_cast<GenTreeDebugOperKind>((ok)&DBK_MASK),
#include "gtlist.h"
};
#endif // DEBUG
/*****************************************************************************
*
* The types of different GenTree nodes
*/
#ifdef DEBUG
#define INDENT_SIZE 3
//--------------------------------------------
//
// IndentStack: This struct is used, along with its related enums and strings,
// to control both the indendtation and the printing of arcs.
//
// Notes:
// The mode of printing is set in the Constructor, using its 'compiler' argument.
// Currently it only prints arcs when fgOrder == fgOrderLinear.
// The type of arc to print is specified by the IndentInfo enum, and is controlled
// by the caller of the Push() method.
enum IndentChars
{
ICVertical,
ICBottom,
ICTop,
ICMiddle,
ICDash,
ICTerminal,
ICError,
IndentCharCount
};
// clang-format off
// Sets of strings for different dumping options vert bot top mid dash embedded terminal error
static const char* emptyIndents[IndentCharCount] = { " ", " ", " ", " ", " ", "", "?" };
static const char* asciiIndents[IndentCharCount] = { "|", "\\", "/", "+", "-", "*", "?" };
static const char* unicodeIndents[IndentCharCount] = { "\xe2\x94\x82", "\xe2\x94\x94", "\xe2\x94\x8c", "\xe2\x94\x9c", "\xe2\x94\x80", "\xe2\x96\x8c", "?" };
// clang-format on
typedef ArrayStack<Compiler::IndentInfo> IndentInfoStack;
struct IndentStack
{
IndentInfoStack stack;
const char** indents;
// Constructor for IndentStack. Uses 'compiler' to determine the mode of printing.
IndentStack(Compiler* compiler) : stack(compiler->getAllocator(CMK_DebugOnly))
{
if (compiler->asciiTrees)
{
indents = asciiIndents;
}
else
{
indents = unicodeIndents;
}
}
// Return the depth of the current indentation.
unsigned Depth()
{
return stack.Height();
}
// Push a new indentation onto the stack, of the given type.
void Push(Compiler::IndentInfo info)
{
stack.Push(info);
}
// Pop the most recent indentation type off the stack.
Compiler::IndentInfo Pop()
{
return stack.Pop();
}
// Print the current indentation and arcs.
void print()
{
unsigned indentCount = Depth();
for (unsigned i = 0; i < indentCount; i++)
{
unsigned index = indentCount - 1 - i;
switch (stack.Top(index))
{
case Compiler::IndentInfo::IINone:
printf(" ");
break;
case Compiler::IndentInfo::IIArc:
if (index == 0)
{
printf("%s%s%s", indents[ICMiddle], indents[ICDash], indents[ICDash]);
}
else
{
printf("%s ", indents[ICVertical]);
}
break;
case Compiler::IndentInfo::IIArcBottom:
printf("%s%s%s", indents[ICBottom], indents[ICDash], indents[ICDash]);
break;
case Compiler::IndentInfo::IIArcTop:
printf("%s%s%s", indents[ICTop], indents[ICDash], indents[ICDash]);
break;
case Compiler::IndentInfo::IIError:
printf("%s%s%s", indents[ICError], indents[ICDash], indents[ICDash]);
break;
default:
unreached();
}
}
printf("%s", indents[ICTerminal]);
}
};
//------------------------------------------------------------------------
// printIndent: This is a static method which simply invokes the 'print'
// method on its 'indentStack' argument.
//
// Arguments:
// indentStack - specifies the information for the indentation & arcs to be printed
//
// Notes:
// This method exists to localize the checking for the case where indentStack is null.
static void printIndent(IndentStack* indentStack)
{
if (indentStack == nullptr)
{
return;
}
indentStack->print();
}
#endif
#if defined(DEBUG) || NODEBASH_STATS || MEASURE_NODE_SIZE || COUNT_AST_OPERS || DUMP_FLOWGRAPHS
static const char* opNames[] = {
#define GTNODE(en, st, cm, ok) #en,
#include "gtlist.h"
};
const char* GenTree::OpName(genTreeOps op)
{
assert((unsigned)op < ArrLen(opNames));
return opNames[op];
}
#endif
#if MEASURE_NODE_SIZE
static const char* opStructNames[] = {
#define GTNODE(en, st, cm, ok) #st,
#include "gtlist.h"
};
const char* GenTree::OpStructName(genTreeOps op)
{
assert((unsigned)op < ArrLen(opStructNames));
return opStructNames[op];
}
#endif
//
// We allocate tree nodes in 2 different sizes:
// - TREE_NODE_SZ_SMALL for most nodes
// - TREE_NODE_SZ_LARGE for the few nodes (such as calls) that have
// more fields and take up a lot more space.
//
/* GT_COUNT'th oper is overloaded as 'undefined oper', so allocate storage for GT_COUNT'th oper also */
/* static */
unsigned char GenTree::s_gtNodeSizes[GT_COUNT + 1];
#if NODEBASH_STATS || MEASURE_NODE_SIZE || COUNT_AST_OPERS
unsigned char GenTree::s_gtTrueSizes[GT_COUNT + 1]{
#define GTNODE(en, st, cm, ok) sizeof(st),
#include "gtlist.h"
};
#endif // NODEBASH_STATS || MEASURE_NODE_SIZE || COUNT_AST_OPERS
#if COUNT_AST_OPERS
unsigned GenTree::s_gtNodeCounts[GT_COUNT + 1] = {0};
#endif // COUNT_AST_OPERS
/* static */
void GenTree::InitNodeSize()
{
/* Set all sizes to 'small' first */
for (unsigned op = 0; op <= GT_COUNT; op++)
{
GenTree::s_gtNodeSizes[op] = TREE_NODE_SZ_SMALL;
}
// Now set all of the appropriate entries to 'large'
CLANG_FORMAT_COMMENT_ANCHOR;
// clang-format off
if (GlobalJitOptions::compFeatureHfa
#if defined(UNIX_AMD64_ABI)
|| true
#endif // defined(UNIX_AMD64_ABI)
)
{
// On ARM32, ARM64 and System V for struct returning
// there is code that does GT_ASG-tree.CopyObj call.
// CopyObj is a large node and the GT_ASG is small, which triggers an exception.
GenTree::s_gtNodeSizes[GT_ASG] = TREE_NODE_SZ_LARGE;
GenTree::s_gtNodeSizes[GT_RETURN] = TREE_NODE_SZ_LARGE;
}
GenTree::s_gtNodeSizes[GT_CALL] = TREE_NODE_SZ_LARGE;
GenTree::s_gtNodeSizes[GT_CAST] = TREE_NODE_SZ_LARGE;
GenTree::s_gtNodeSizes[GT_FTN_ADDR] = TREE_NODE_SZ_LARGE;
GenTree::s_gtNodeSizes[GT_BOX] = TREE_NODE_SZ_LARGE;
GenTree::s_gtNodeSizes[GT_INDEX] = TREE_NODE_SZ_LARGE;
GenTree::s_gtNodeSizes[GT_INDEX_ADDR] = TREE_NODE_SZ_LARGE;
GenTree::s_gtNodeSizes[GT_BOUNDS_CHECK] = TREE_NODE_SZ_SMALL;
GenTree::s_gtNodeSizes[GT_ARR_ELEM] = TREE_NODE_SZ_LARGE;
GenTree::s_gtNodeSizes[GT_ARR_INDEX] = TREE_NODE_SZ_LARGE;
GenTree::s_gtNodeSizes[GT_ARR_OFFSET] = TREE_NODE_SZ_LARGE;
GenTree::s_gtNodeSizes[GT_RET_EXPR] = TREE_NODE_SZ_LARGE;
GenTree::s_gtNodeSizes[GT_FIELD] = TREE_NODE_SZ_LARGE;
GenTree::s_gtNodeSizes[GT_CMPXCHG] = TREE_NODE_SZ_LARGE;
GenTree::s_gtNodeSizes[GT_QMARK] = TREE_NODE_SZ_LARGE;
GenTree::s_gtNodeSizes[GT_STORE_DYN_BLK] = TREE_NODE_SZ_LARGE;
GenTree::s_gtNodeSizes[GT_INTRINSIC] = TREE_NODE_SZ_LARGE;
GenTree::s_gtNodeSizes[GT_ALLOCOBJ] = TREE_NODE_SZ_LARGE;
#if USE_HELPERS_FOR_INT_DIV
GenTree::s_gtNodeSizes[GT_DIV] = TREE_NODE_SZ_LARGE;
GenTree::s_gtNodeSizes[GT_UDIV] = TREE_NODE_SZ_LARGE;
GenTree::s_gtNodeSizes[GT_MOD] = TREE_NODE_SZ_LARGE;
GenTree::s_gtNodeSizes[GT_UMOD] = TREE_NODE_SZ_LARGE;
#endif
#ifdef FEATURE_PUT_STRUCT_ARG_STK
// TODO-Throughput: This should not need to be a large node. The object info should be
// obtained from the child node.
GenTree::s_gtNodeSizes[GT_PUTARG_STK] = TREE_NODE_SZ_LARGE;
#if FEATURE_ARG_SPLIT
GenTree::s_gtNodeSizes[GT_PUTARG_SPLIT] = TREE_NODE_SZ_LARGE;
#endif // FEATURE_ARG_SPLIT
#endif // FEATURE_PUT_STRUCT_ARG_STK
assert(GenTree::s_gtNodeSizes[GT_RETURN] == GenTree::s_gtNodeSizes[GT_ASG]);
// This list of assertions should come to contain all GenTree subtypes that are declared
// "small".
assert(sizeof(GenTreeLclFld) <= GenTree::s_gtNodeSizes[GT_LCL_FLD]);
assert(sizeof(GenTreeLclVar) <= GenTree::s_gtNodeSizes[GT_LCL_VAR]);
static_assert_no_msg(sizeof(GenTree) <= TREE_NODE_SZ_SMALL);
static_assert_no_msg(sizeof(GenTreeUnOp) <= TREE_NODE_SZ_SMALL);
static_assert_no_msg(sizeof(GenTreeOp) <= TREE_NODE_SZ_SMALL);
static_assert_no_msg(sizeof(GenTreeVal) <= TREE_NODE_SZ_SMALL);
static_assert_no_msg(sizeof(GenTreeIntConCommon) <= TREE_NODE_SZ_SMALL);
static_assert_no_msg(sizeof(GenTreePhysReg) <= TREE_NODE_SZ_SMALL);
static_assert_no_msg(sizeof(GenTreeIntCon) <= TREE_NODE_SZ_SMALL);
static_assert_no_msg(sizeof(GenTreeLngCon) <= TREE_NODE_SZ_SMALL);
static_assert_no_msg(sizeof(GenTreeDblCon) <= TREE_NODE_SZ_SMALL);
static_assert_no_msg(sizeof(GenTreeStrCon) <= TREE_NODE_SZ_SMALL);
static_assert_no_msg(sizeof(GenTreeLclVarCommon) <= TREE_NODE_SZ_SMALL);
static_assert_no_msg(sizeof(GenTreeLclVar) <= TREE_NODE_SZ_SMALL);
static_assert_no_msg(sizeof(GenTreeLclFld) <= TREE_NODE_SZ_SMALL);
static_assert_no_msg(sizeof(GenTreeCC) <= TREE_NODE_SZ_SMALL);
static_assert_no_msg(sizeof(GenTreeCast) <= TREE_NODE_SZ_LARGE); // *** large node
static_assert_no_msg(sizeof(GenTreeBox) <= TREE_NODE_SZ_LARGE); // *** large node
static_assert_no_msg(sizeof(GenTreeField) <= TREE_NODE_SZ_LARGE); // *** large node
static_assert_no_msg(sizeof(GenTreeFieldList) <= TREE_NODE_SZ_SMALL);
static_assert_no_msg(sizeof(GenTreeColon) <= TREE_NODE_SZ_SMALL);
static_assert_no_msg(sizeof(GenTreeCall) <= TREE_NODE_SZ_LARGE); // *** large node
static_assert_no_msg(sizeof(GenTreeCmpXchg) <= TREE_NODE_SZ_LARGE); // *** large node
static_assert_no_msg(sizeof(GenTreeFptrVal) <= TREE_NODE_SZ_LARGE); // *** large node
static_assert_no_msg(sizeof(GenTreeQmark) <= TREE_NODE_SZ_LARGE); // *** large node
static_assert_no_msg(sizeof(GenTreeIntrinsic) <= TREE_NODE_SZ_LARGE); // *** large node
static_assert_no_msg(sizeof(GenTreeIndex) <= TREE_NODE_SZ_LARGE); // *** large node
static_assert_no_msg(sizeof(GenTreeIndexAddr) <= TREE_NODE_SZ_LARGE); // *** large node
static_assert_no_msg(sizeof(GenTreeArrLen) <= TREE_NODE_SZ_LARGE); // *** large node
static_assert_no_msg(sizeof(GenTreeBoundsChk) <= TREE_NODE_SZ_SMALL);
static_assert_no_msg(sizeof(GenTreeArrElem) <= TREE_NODE_SZ_LARGE); // *** large node
static_assert_no_msg(sizeof(GenTreeArrIndex) <= TREE_NODE_SZ_LARGE); // *** large node
static_assert_no_msg(sizeof(GenTreeArrOffs) <= TREE_NODE_SZ_LARGE); // *** large node
static_assert_no_msg(sizeof(GenTreeIndir) <= TREE_NODE_SZ_SMALL);
static_assert_no_msg(sizeof(GenTreeStoreInd) <= TREE_NODE_SZ_SMALL);
static_assert_no_msg(sizeof(GenTreeAddrMode) <= TREE_NODE_SZ_SMALL);
static_assert_no_msg(sizeof(GenTreeObj) <= TREE_NODE_SZ_SMALL);
static_assert_no_msg(sizeof(GenTreeBlk) <= TREE_NODE_SZ_SMALL);
static_assert_no_msg(sizeof(GenTreeStoreDynBlk) <= TREE_NODE_SZ_LARGE); // *** large node
static_assert_no_msg(sizeof(GenTreeRetExpr) <= TREE_NODE_SZ_LARGE); // *** large node
static_assert_no_msg(sizeof(GenTreeILOffset) <= TREE_NODE_SZ_SMALL);
static_assert_no_msg(sizeof(GenTreeClsVar) <= TREE_NODE_SZ_SMALL);
static_assert_no_msg(sizeof(GenTreeArgPlace) <= TREE_NODE_SZ_SMALL);
static_assert_no_msg(sizeof(GenTreePhiArg) <= TREE_NODE_SZ_SMALL);
static_assert_no_msg(sizeof(GenTreeAllocObj) <= TREE_NODE_SZ_LARGE); // *** large node
#ifndef FEATURE_PUT_STRUCT_ARG_STK
static_assert_no_msg(sizeof(GenTreePutArgStk) <= TREE_NODE_SZ_SMALL);
#else // FEATURE_PUT_STRUCT_ARG_STK
// TODO-Throughput: This should not need to be a large node. The object info should be
// obtained from the child node.
static_assert_no_msg(sizeof(GenTreePutArgStk) <= TREE_NODE_SZ_LARGE);
#if FEATURE_ARG_SPLIT
static_assert_no_msg(sizeof(GenTreePutArgSplit) <= TREE_NODE_SZ_LARGE);
#endif // FEATURE_ARG_SPLIT
#endif // FEATURE_PUT_STRUCT_ARG_STK
#ifdef FEATURE_SIMD
static_assert_no_msg(sizeof(GenTreeSIMD) <= TREE_NODE_SZ_SMALL);
#endif // FEATURE_SIMD
#ifdef FEATURE_HW_INTRINSICS
static_assert_no_msg(sizeof(GenTreeHWIntrinsic) <= TREE_NODE_SZ_SMALL);
#endif // FEATURE_HW_INTRINSICS
// clang-format on
}
size_t GenTree::GetNodeSize() const
{
return GenTree::s_gtNodeSizes[gtOper];
}
#ifdef DEBUG
bool GenTree::IsNodeProperlySized() const
{
size_t size;
if (gtDebugFlags & GTF_DEBUG_NODE_SMALL)
{
size = TREE_NODE_SZ_SMALL;
}
else
{
assert(gtDebugFlags & GTF_DEBUG_NODE_LARGE);
size = TREE_NODE_SZ_LARGE;
}
return GenTree::s_gtNodeSizes[gtOper] <= size;
}
#endif
//------------------------------------------------------------------------
// ReplaceWith: replace this with the src node. The source must be an isolated node
// and cannot be used after the replacement.
//
// Arguments:
// src - source tree, that replaces this.
// comp - the compiler instance to transfer annotations for arrays.
//
void GenTree::ReplaceWith(GenTree* src, Compiler* comp)
{
// The source may be big only if the target is also a big node
assert((gtDebugFlags & GTF_DEBUG_NODE_LARGE) || GenTree::s_gtNodeSizes[src->gtOper] == TREE_NODE_SZ_SMALL);
// The check is effective only if nodes have been already threaded.
assert((src->gtPrev == nullptr) && (src->gtNext == nullptr));
RecordOperBashing(OperGet(), src->OperGet()); // nop unless NODEBASH_STATS is enabled
GenTree* prev = gtPrev;
GenTree* next = gtNext;
// The VTable pointer is copied intentionally here
memcpy((void*)this, (void*)src, src->GetNodeSize());
this->gtPrev = prev;
this->gtNext = next;
#ifdef DEBUG
gtSeqNum = 0;
#endif
// Transfer any annotations.
if (src->OperGet() == GT_IND && src->gtFlags & GTF_IND_ARR_INDEX)
{
ArrayInfo arrInfo;
bool b = comp->GetArrayInfoMap()->Lookup(src, &arrInfo);
assert(b);
comp->GetArrayInfoMap()->Set(this, arrInfo);
}
DEBUG_DESTROY_NODE(src);
}
/*****************************************************************************
*
* When 'NODEBASH_STATS' is enabled in "jit.h" we record all instances of
* an existing GenTree node having its operator changed. This can be useful
* for two (related) things - to see what is being bashed (and what isn't),
* and to verify that the existing choices for what nodes are marked 'large'
* are reasonable (to minimize "wasted" space).
*
* And yes, the hash function / logic is simplistic, but it is conflict-free
* and transparent for what we need.
*/
#if NODEBASH_STATS
#define BASH_HASH_SIZE 211
inline unsigned hashme(genTreeOps op1, genTreeOps op2)
{
return ((op1 * 104729) ^ (op2 * 56569)) % BASH_HASH_SIZE;
}
struct BashHashDsc
{
unsigned __int32 bhFullHash; // the hash value (unique for all old->new pairs)
unsigned __int32 bhCount; // the same old->new bashings seen so far
unsigned __int8 bhOperOld; // original gtOper
unsigned __int8 bhOperNew; // new gtOper
};
static BashHashDsc BashHash[BASH_HASH_SIZE];
void GenTree::RecordOperBashing(genTreeOps operOld, genTreeOps operNew)
{
unsigned hash = hashme(operOld, operNew);
BashHashDsc* desc = BashHash + hash;
if (desc->bhFullHash != hash)
{
noway_assert(desc->bhCount == 0); // if this ever fires, need fix the hash fn
desc->bhFullHash = hash;
}
desc->bhCount += 1;
desc->bhOperOld = operOld;
desc->bhOperNew = operNew;
}
void GenTree::ReportOperBashing(FILE* f)
{
unsigned total = 0;
fflush(f);
fprintf(f, "\n");
fprintf(f, "Bashed gtOper stats:\n");
fprintf(f, "\n");
fprintf(f, " Old operator New operator #bytes old->new Count\n");
fprintf(f, " ---------------------------------------------------------------\n");
for (unsigned h = 0; h < BASH_HASH_SIZE; h++)
{
unsigned count = BashHash[h].bhCount;
if (count == 0)
continue;
unsigned opOld = BashHash[h].bhOperOld;
unsigned opNew = BashHash[h].bhOperNew;
fprintf(f, " GT_%-13s -> GT_%-13s [size: %3u->%3u] %c %7u\n", OpName((genTreeOps)opOld),
OpName((genTreeOps)opNew), s_gtTrueSizes[opOld], s_gtTrueSizes[opNew],
(s_gtTrueSizes[opOld] < s_gtTrueSizes[opNew]) ? 'X' : ' ', count);
total += count;
}
fprintf(f, "\n");
fprintf(f, "Total bashings: %u\n", total);
fprintf(f, "\n");
fflush(f);
}
#endif // NODEBASH_STATS
/*****************************************************************************/
#if MEASURE_NODE_SIZE
void GenTree::DumpNodeSizes(FILE* fp)
{
// Dump the sizes of the various GenTree flavors
fprintf(fp, "Small tree node size = %zu bytes\n", TREE_NODE_SZ_SMALL);
fprintf(fp, "Large tree node size = %zu bytes\n", TREE_NODE_SZ_LARGE);
fprintf(fp, "\n");
// Verify that node sizes are set kosherly and dump sizes
for (unsigned op = GT_NONE + 1; op < GT_COUNT; op++)
{
unsigned needSize = s_gtTrueSizes[op];
unsigned nodeSize = s_gtNodeSizes[op];
const char* structNm = OpStructName((genTreeOps)op);
const char* operName = OpName((genTreeOps)op);
bool repeated = false;
// Have we seen this struct flavor before?
for (unsigned mop = GT_NONE + 1; mop < op; mop++)
{
if (strcmp(structNm, OpStructName((genTreeOps)mop)) == 0)
{
repeated = true;
break;
}
}
// Don't repeat the same GenTree flavor unless we have an error
if (!repeated || needSize > nodeSize)
{
unsigned sizeChar = '?';
if (nodeSize == TREE_NODE_SZ_SMALL)
sizeChar = 'S';
else if (nodeSize == TREE_NODE_SZ_LARGE)
sizeChar = 'L';
fprintf(fp, "GT_%-16s ... %-19s = %3u bytes (%c)", operName, structNm, needSize, sizeChar);
if (needSize > nodeSize)
{
fprintf(fp, " -- ERROR -- allocation is only %u bytes!", nodeSize);
}
else if (needSize <= TREE_NODE_SZ_SMALL && nodeSize == TREE_NODE_SZ_LARGE)
{
fprintf(fp, " ... could be small");
}
fprintf(fp, "\n");
}
}
}
#endif // MEASURE_NODE_SIZE
/*****************************************************************************
*
* Walk all basic blocks and call the given function pointer for all tree
* nodes contained therein.
*/
void Compiler::fgWalkAllTreesPre(fgWalkPreFn* visitor, void* pCallBackData)
{
for (BasicBlock* const block : Blocks())
{
for (Statement* const stmt : block->Statements())
{
fgWalkTreePre(stmt->GetRootNodePointer(), visitor, pCallBackData);
}
}
}
//-----------------------------------------------------------
// CopyReg: Copy the _gtRegNum/gtRegTag fields.
//
// Arguments:
// from - GenTree node from which to copy
//
// Return Value:
// None
void GenTree::CopyReg(GenTree* from)
{
_gtRegNum = from->_gtRegNum;
INDEBUG(gtRegTag = from->gtRegTag;)
// Also copy multi-reg state if this is a call node
if (IsCall())
{
assert(from->IsCall());
this->AsCall()->CopyOtherRegs(from->AsCall());
}
else if (IsCopyOrReload())
{
this->AsCopyOrReload()->CopyOtherRegs(from->AsCopyOrReload());
}
}
//------------------------------------------------------------------
// gtHasReg: Whether node been assigned a register by LSRA
//
// Arguments:
// comp - Compiler instance. Required for multi-reg lcl var; ignored otherwise.
//
// Return Value:
// Returns true if the node was assigned a register.
//
// In case of multi-reg call nodes, it is considered having a reg if regs are allocated for ALL its
// return values.
// REVIEW: why is this ALL and the other cases are ANY? Explain.
//
// In case of GT_COPY or GT_RELOAD of a multi-reg call, GT_COPY/GT_RELOAD is considered having a reg if it
// has a reg assigned to ANY of its positions.
//
// In case of multi-reg local vars, it is considered having a reg if it has a reg assigned for ANY
// of its positions.
//
bool GenTree::gtHasReg(Compiler* comp) const
{
bool hasReg = false;
if (IsMultiRegCall())
{
const GenTreeCall* call = AsCall();
const unsigned regCount = call->GetReturnTypeDesc()->GetReturnRegCount();
// A Multi-reg call node is said to have regs, if it has
// reg assigned to each of its result registers.
for (unsigned i = 0; i < regCount; ++i)
{
hasReg = (call->GetRegNumByIdx(i) != REG_NA);
if (!hasReg)
{
break;
}
}
}
else if (IsCopyOrReloadOfMultiRegCall())
{
const GenTreeCopyOrReload* copyOrReload = AsCopyOrReload();
const GenTreeCall* call = copyOrReload->gtGetOp1()->AsCall();
const unsigned regCount = call->GetReturnTypeDesc()->GetReturnRegCount();
// A Multi-reg copy or reload node is said to have regs,
// if it has valid regs in any of the positions.
for (unsigned i = 0; i < regCount; ++i)
{
hasReg = (copyOrReload->GetRegNumByIdx(i) != REG_NA);
if (hasReg)
{
break;
}
}
}
else if (IsMultiRegLclVar())
{
assert(comp != nullptr);
const GenTreeLclVar* lclNode = AsLclVar();
const unsigned regCount = GetMultiRegCount(comp);
// A Multi-reg local vars is said to have regs,
// if it has valid regs in any of the positions.
for (unsigned i = 0; i < regCount; i++)
{
hasReg = (lclNode->GetRegNumByIdx(i) != REG_NA);
if (hasReg)
{
break;
}
}
}
else
{
hasReg = (GetRegNum() != REG_NA);
}
return hasReg;
}
//-----------------------------------------------------------------------------
// GetRegisterDstCount: Get the number of registers defined by the node.
//
// Arguments:
// None
//
// Return Value:
// The number of registers that this node defines.
//
// Notes:
// This should not be called on a contained node.
// This does not look at the actual register assignments, if any, and so
// is valid after Lowering.
//
int GenTree::GetRegisterDstCount(Compiler* compiler) const
{
assert(!isContained());
if (!IsMultiRegNode())
{
return (IsValue()) ? 1 : 0;
}
else if (IsMultiRegCall())
{
return AsCall()->GetReturnTypeDesc()->GetReturnRegCount();
}
else if (IsCopyOrReload())
{
return gtGetOp1()->GetRegisterDstCount(compiler);
}
#if FEATURE_ARG_SPLIT
else if (OperIsPutArgSplit())
{
return (const_cast<GenTree*>(this))->AsPutArgSplit()->gtNumRegs;
}
#endif
#if !defined(TARGET_64BIT)
else if (OperIsMultiRegOp())
{
// A MultiRegOp is a GT_MUL_LONG, GT_PUTARG_REG, or GT_BITCAST.
// For the latter two (ARM-only), they only have multiple registers if they produce a long value
// (GT_MUL_LONG always produces a long value).
CLANG_FORMAT_COMMENT_ANCHOR;
#ifdef TARGET_ARM
return (TypeGet() == TYP_LONG) ? 2 : 1;
#else
assert(OperIs(GT_MUL_LONG));
return 2;
#endif
}
#endif
#ifdef FEATURE_HW_INTRINSICS
else if (OperIsHWIntrinsic())
{
assert(TypeIs(TYP_STRUCT));
const GenTreeHWIntrinsic* intrinsic = AsHWIntrinsic();
const NamedIntrinsic intrinsicId = intrinsic->GetHWIntrinsicId();
assert(HWIntrinsicInfo::IsMultiReg(intrinsicId));
return HWIntrinsicInfo::GetMultiRegCount(intrinsicId);
}
#endif // FEATURE_HW_INTRINSICS
if (OperIsScalarLocal())
{
return AsLclVar()->GetFieldCount(compiler);
}
assert(!"Unexpected multi-reg node");
return 0;
}
//-----------------------------------------------------------------------------------
// IsMultiRegNode: whether a node returning its value in more than one register
//
// Arguments:
// None
//
// Return Value:
// Returns true if this GenTree is a multi-reg node.
//
// Notes:
// All targets that support multi-reg ops of any kind also support multi-reg return
// values for calls. Should that change with a future target, this method will need
// to change accordingly.
//
bool GenTree::IsMultiRegNode() const
{
#if FEATURE_MULTIREG_RET
if (IsMultiRegCall())
{
return true;
}
#if FEATURE_ARG_SPLIT
if (OperIsPutArgSplit())
{
return true;
}
#endif
#if !defined(TARGET_64BIT)
if (OperIsMultiRegOp())
{
return true;
}
#endif
if (OperIs(GT_COPY, GT_RELOAD))
{
return true;
}
#endif // FEATURE_MULTIREG_RET
#ifdef FEATURE_HW_INTRINSICS
if (OperIsHWIntrinsic())
{
return HWIntrinsicInfo::IsMultiReg(AsHWIntrinsic()->GetHWIntrinsicId());
}
#endif // FEATURE_HW_INTRINSICS
if (IsMultiRegLclVar())
{
return true;
}
return false;
}
//-----------------------------------------------------------------------------------
// GetMultiRegCount: Return the register count for a multi-reg node.
//
// Arguments:
// comp - Compiler instance. Required for MultiRegLclVar, unused otherwise.
//
// Return Value:
// Returns the number of registers defined by this node.
//
unsigned GenTree::GetMultiRegCount(Compiler* comp) const
{
#if FEATURE_MULTIREG_RET
if (IsMultiRegCall())
{
return AsCall()->GetReturnTypeDesc()->GetReturnRegCount();
}
#if FEATURE_ARG_SPLIT
if (OperIsPutArgSplit())
{
return AsPutArgSplit()->gtNumRegs;
}
#endif
#if !defined(TARGET_64BIT)
if (OperIsMultiRegOp())
{
return AsMultiRegOp()->GetRegCount();
}
#endif
if (OperIs(GT_COPY, GT_RELOAD))
{
return AsCopyOrReload()->GetRegCount();
}
#endif // FEATURE_MULTIREG_RET
#ifdef FEATURE_HW_INTRINSICS
if (OperIsHWIntrinsic())
{
return HWIntrinsicInfo::GetMultiRegCount(AsHWIntrinsic()->GetHWIntrinsicId());
}
#endif // FEATURE_HW_INTRINSICS
if (IsMultiRegLclVar())
{
assert(comp != nullptr);
return AsLclVar()->GetFieldCount(comp);
}
assert(!"GetMultiRegCount called with non-multireg node");
return 1;
}
//---------------------------------------------------------------
// gtGetContainedRegMask: Get the reg mask of the node including
// contained nodes (recursive).
//
// Arguments:
// None
//
// Return Value:
// Reg Mask of GenTree node.
//
regMaskTP GenTree::gtGetContainedRegMask()
{
if (!isContained())
{
return gtGetRegMask();
}
regMaskTP mask = 0;
for (GenTree* operand : Operands())
{
mask |= operand->gtGetContainedRegMask();
}
return mask;
}
//---------------------------------------------------------------
// gtGetRegMask: Get the reg mask of the node.
//
// Arguments:
// None
//
// Return Value:
// Reg Mask of GenTree node.
//
regMaskTP GenTree::gtGetRegMask() const
{
regMaskTP resultMask;
if (IsMultiRegCall())
{
resultMask = genRegMask(GetRegNum());
resultMask |= AsCall()->GetOtherRegMask();
}
else if (IsCopyOrReloadOfMultiRegCall())
{
// A multi-reg copy or reload, will have valid regs for only those
// positions that need to be copied or reloaded. Hence we need
// to consider only those registers for computing reg mask.
const GenTreeCopyOrReload* copyOrReload = AsCopyOrReload();
const GenTreeCall* call = copyOrReload->gtGetOp1()->AsCall();
const unsigned regCount = call->GetReturnTypeDesc()->GetReturnRegCount();
resultMask = RBM_NONE;
for (unsigned i = 0; i < regCount; ++i)
{
regNumber reg = copyOrReload->GetRegNumByIdx(i);
if (reg != REG_NA)
{
resultMask |= genRegMask(reg);
}
}
}
#if FEATURE_ARG_SPLIT
else if (compFeatureArgSplit() && OperIsPutArgSplit())
{
const GenTreePutArgSplit* splitArg = AsPutArgSplit();
const unsigned regCount = splitArg->gtNumRegs;
resultMask = RBM_NONE;
for (unsigned i = 0; i < regCount; ++i)
{
regNumber reg = splitArg->GetRegNumByIdx(i);
assert(reg != REG_NA);
resultMask |= genRegMask(reg);
}
}
#endif // FEATURE_ARG_SPLIT
else
{
resultMask = genRegMask(GetRegNum());
}
return resultMask;
}
void GenTreeFieldList::AddField(Compiler* compiler, GenTree* node, unsigned offset, var_types type)
{
m_uses.AddUse(new (compiler, CMK_ASTNode) Use(node, offset, type));
gtFlags |= node->gtFlags & GTF_ALL_EFFECT;
}
void GenTreeFieldList::AddFieldLIR(Compiler* compiler, GenTree* node, unsigned offset, var_types type)
{
m_uses.AddUse(new (compiler, CMK_ASTNode) Use(node, offset, type));
}
void GenTreeFieldList::InsertField(Compiler* compiler, Use* insertAfter, GenTree* node, unsigned offset, var_types type)
{
m_uses.InsertUse(insertAfter, new (compiler, CMK_ASTNode) Use(node, offset, type));
gtFlags |= node->gtFlags & GTF_ALL_EFFECT;
}
void GenTreeFieldList::InsertFieldLIR(
Compiler* compiler, Use* insertAfter, GenTree* node, unsigned offset, var_types type)
{
m_uses.InsertUse(insertAfter, new (compiler, CMK_ASTNode) Use(node, offset, type));
}
//---------------------------------------------------------------
// GetOtherRegMask: Get the reg mask of gtOtherRegs of call node
//
// Arguments:
// None
//
// Return Value:
// Reg mask of gtOtherRegs of call node.
//
regMaskTP GenTreeCall::GetOtherRegMask() const
{
regMaskTP resultMask = RBM_NONE;
#if FEATURE_MULTIREG_RET
for (unsigned i = 0; i < MAX_RET_REG_COUNT - 1; ++i)
{
if (gtOtherRegs[i] != REG_NA)
{
resultMask |= genRegMask((regNumber)gtOtherRegs[i]);
continue;
}
break;
}
#endif
return resultMask;
}
//-------------------------------------------------------------------------
// IsPure:
// Returns true if this call is pure. For now, this uses the same
// definition of "pure" that is that used by HelperCallProperties: a
// pure call does not read or write any aliased (e.g. heap) memory or
// have other global side effects (e.g. class constructors, finalizers),
// but is allowed to throw an exception.
//
// NOTE: this call currently only returns true if the call target is a
// helper method that is known to be pure. No other analysis is
// performed.
//
// Arguments:
// Copiler - the compiler context.
//
// Returns:
// True if the call is pure; false otherwise.
//
bool GenTreeCall::IsPure(Compiler* compiler) const
{
return (gtCallType == CT_HELPER) &&
compiler->s_helperCallProperties.IsPure(compiler->eeGetHelperNum(gtCallMethHnd));
}
//-------------------------------------------------------------------------
// HasSideEffects:
// Returns true if this call has any side effects. All non-helpers are considered to have side-effects. Only helpers
// that do not mutate the heap, do not run constructors, may not throw, and are either a) pure or b) non-finalizing
// allocation functions are considered side-effect-free.
//
// Arguments:
// compiler - the compiler instance
// ignoreExceptions - when `true`, ignores exception side effects
// ignoreCctors - when `true`, ignores class constructor side effects
//
// Return Value:
// true if this call has any side-effects; false otherwise.
bool GenTreeCall::HasSideEffects(Compiler* compiler, bool ignoreExceptions, bool ignoreCctors) const
{
// Generally all GT_CALL nodes are considered to have side-effects, but we may have extra information about helper
// calls that can prove them side-effect-free.
if (gtCallType != CT_HELPER)
{
return true;
}
CorInfoHelpFunc helper = compiler->eeGetHelperNum(gtCallMethHnd);
HelperCallProperties& helperProperties = compiler->s_helperCallProperties;
// We definitely care about the side effects if MutatesHeap is true
if (helperProperties.MutatesHeap(helper))
{
return true;
}
// Unless we have been instructed to ignore cctors (CSE, for example, ignores cctors), consider them side effects.
if (!ignoreCctors && helperProperties.MayRunCctor(helper))
{
return true;
}
// If we also care about exceptions then check if the helper can throw
if (!ignoreExceptions && !helperProperties.NoThrow(helper))
{
return true;
}
// If this is not a Pure helper call or an allocator (that will not need to run a finalizer)
// then this call has side effects.
return !helperProperties.IsPure(helper) &&
(!helperProperties.IsAllocator(helper) || ((gtCallMoreFlags & GTF_CALL_M_ALLOC_SIDE_EFFECTS) != 0));
}
//-------------------------------------------------------------------------
// HasNonStandardAddedArgs: Return true if the method has non-standard args added to the call
// argument list during argument morphing (fgMorphArgs), e.g., passed in R10 or R11 on AMD64.
// See also GetNonStandardAddedArgCount().
//
// Arguments:
// compiler - the compiler instance
//
// Return Value:
// true if there are any such args, false otherwise.
//
bool GenTreeCall::HasNonStandardAddedArgs(Compiler* compiler) const
{
return GetNonStandardAddedArgCount(compiler) != 0;
}
//-------------------------------------------------------------------------
// GetNonStandardAddedArgCount: Get the count of non-standard arguments that have been added
// during call argument morphing (fgMorphArgs). Do not count non-standard args that are already
// counted in the argument list prior to morphing.
//
// This function is used to help map the caller and callee arguments during tail call setup.
//
// Arguments:
// compiler - the compiler instance
//
// Return Value:
// The count of args, as described.
//
// Notes:
// It would be more general to have fgMorphArgs set a bit on the call node when such
// args are added to a call, and a bit on each such arg, and then have this code loop
// over the call args when the special call bit is set, counting the args with the special
// arg bit. This seems pretty heavyweight, though. Instead, this logic needs to be kept
// in sync with fgMorphArgs.
//
int GenTreeCall::GetNonStandardAddedArgCount(Compiler* compiler) const
{
if (IsUnmanaged() && !compiler->opts.ShouldUsePInvokeHelpers())
{
// R11 = PInvoke cookie param
return 1;
}
else if (IsVirtualStub())
{
// R11 = Virtual stub param
return 1;
}
else if ((gtCallType == CT_INDIRECT) && (gtCallCookie != nullptr))
{
// R10 = PInvoke target param
// R11 = PInvoke cookie param
return 2;
}
return 0;
}
//-------------------------------------------------------------------------
// TreatAsHasRetBufArg:
//
// Arguments:
// compiler, the compiler instance so that we can call eeGetHelperNum
//
// Return Value:
// Returns true if we treat the call as if it has a retBuf argument
// This method may actually have a retBuf argument
// or it could be a JIT helper that we are still transforming during
// the importer phase.
//
// Notes:
// On ARM64 marking the method with the GTF_CALL_M_RETBUFFARG flag
// will make HasRetBufArg() return true, but will also force the
// use of register x8 to pass the RetBuf argument.
//
// These two Jit Helpers that we handle here by returning true
// aren't actually defined to return a struct, so they don't expect
// their RetBuf to be passed in x8, instead they expect it in x0.
//
bool GenTreeCall::TreatAsHasRetBufArg(Compiler* compiler) const
{
if (HasRetBufArg())
{
return true;
}
else
{
// If we see a Jit helper call that returns a TYP_STRUCT we will
// transform it as if it has a Return Buffer Argument
//
if (IsHelperCall() && (gtReturnType == TYP_STRUCT))
{
// There are two possible helper calls that use this path:
// CORINFO_HELP_GETFIELDSTRUCT and CORINFO_HELP_UNBOX_NULLABLE
//
CorInfoHelpFunc helpFunc = compiler->eeGetHelperNum(gtCallMethHnd);
if (helpFunc == CORINFO_HELP_GETFIELDSTRUCT)
{
return true;
}
else if (helpFunc == CORINFO_HELP_UNBOX_NULLABLE)
{
return true;
}
else
{
assert(!"Unexpected JIT helper in TreatAsHasRetBufArg");
}
}
}
return false;
}
//-------------------------------------------------------------------------
// IsHelperCall: Determine if this GT_CALL node is a specific helper call.
//
// Arguments:
// compiler - the compiler instance so that we can call eeFindHelper
//
// Return Value:
// Returns true if this GT_CALL node is a call to the specified helper.
//
bool GenTreeCall::IsHelperCall(Compiler* compiler, unsigned helper) const
{
return IsHelperCall(compiler->eeFindHelper(helper));
}
//------------------------------------------------------------------------
// GenTreeCall::ReplaceCallOperand:
// Replaces a given operand to a call node and updates the call
// argument table if necessary.
//
// Arguments:
// useEdge - the use edge that points to the operand to be replaced.
// replacement - the replacement node.
//
void GenTreeCall::ReplaceCallOperand(GenTree** useEdge, GenTree* replacement)
{
assert(useEdge != nullptr);
assert(replacement != nullptr);
assert(TryGetUse(*useEdge, &useEdge));
GenTree* originalOperand = *useEdge;
*useEdge = replacement;
const bool isArgument =
(replacement != gtControlExpr) &&
((gtCallType != CT_INDIRECT) || ((replacement != gtCallCookie) && (replacement != gtCallAddr)));
if (isArgument)
{
if ((originalOperand->gtFlags & GTF_LATE_ARG) != 0)
{
replacement->gtFlags |= GTF_LATE_ARG;
}
else
{
assert((replacement->gtFlags & GTF_LATE_ARG) == 0);
fgArgTabEntry* fp = Compiler::gtArgEntryByNode(this, replacement);
assert(fp->GetNode() == replacement);
}
}
}
//-------------------------------------------------------------------------
// AreArgsComplete: Determine if this GT_CALL node's arguments have been processed.
//
// Return Value:
// Returns true if fgMorphArgs has processed the arguments.
//
bool GenTreeCall::AreArgsComplete() const
{
if (fgArgInfo == nullptr)
{
return false;
}
if (fgArgInfo->AreArgsComplete())
{
assert((gtCallLateArgs != nullptr) || !fgArgInfo->HasRegArgs());
return true;
}
#if defined(FEATURE_FASTTAILCALL)
// If we have FEATURE_FASTTAILCALL, 'fgCanFastTailCall()' can call 'fgInitArgInfo()', and in that
// scenario it is valid to have 'fgArgInfo' be non-null when 'fgMorphArgs()' first queries this,
// when it hasn't yet morphed the arguments.
#else
assert(gtCallArgs == nullptr);
#endif
return false;
}
//-------------------------------------------------------------------------
// SetRetBufArg: Sets the "return buffer" argument use.
//
void GenTreeCall::SetLclRetBufArg(Use* retBufArg)
{
assert(retBufArg->GetNode()->TypeIs(TYP_I_IMPL, TYP_BYREF) && retBufArg->GetNode()->OperIs(GT_ADDR, GT_ASG));
assert(HasRetBufArg());
gtRetBufArg = retBufArg;
}
//--------------------------------------------------------------------------
// Equals: Check if 2 CALL nodes are equal.
//
// Arguments:
// c1 - The first call node
// c2 - The second call node
//
// Return Value:
// true if the 2 CALL nodes have the same type and operands
//
bool GenTreeCall::Equals(GenTreeCall* c1, GenTreeCall* c2)
{
assert(c1->OperGet() == c2->OperGet());
if (c1->TypeGet() != c2->TypeGet())
{
return false;
}
if (c1->gtCallType != c2->gtCallType)
{
return false;
}
if (c1->gtCallType != CT_INDIRECT)
{
if (c1->gtCallMethHnd != c2->gtCallMethHnd)
{
return false;
}
#ifdef FEATURE_READYTORUN
if (c1->gtEntryPoint.addr != c2->gtEntryPoint.addr)
{
return false;
}
#endif
}
else
{
if (!Compare(c1->gtCallAddr, c2->gtCallAddr))
{
return false;
}
}
if ((c1->gtCallThisArg != nullptr) != (c2->gtCallThisArg != nullptr))
{
return false;
}
if ((c1->gtCallThisArg != nullptr) && !Compare(c1->gtCallThisArg->GetNode(), c2->gtCallThisArg->GetNode()))
{
return false;
}
GenTreeCall::UseIterator i1 = c1->Args().begin();
GenTreeCall::UseIterator end1 = c1->Args().end();
GenTreeCall::UseIterator i2 = c2->Args().begin();
GenTreeCall::UseIterator end2 = c2->Args().end();
for (; (i1 != end1) && (i2 != end2); ++i1, ++i2)
{
if (!Compare(i1->GetNode(), i2->GetNode()))
{
return false;
}
}
if ((i1 != end1) || (i2 != end2))
{
return false;
}
i1 = c1->LateArgs().begin();
end1 = c1->LateArgs().end();
i2 = c2->LateArgs().begin();
end2 = c2->LateArgs().end();
for (; (i1 != end1) && (i2 != end2); ++i1, ++i2)
{
if (!Compare(i1->GetNode(), i2->GetNode()))
{
return false;
}
}
if ((i1 != end1) || (i2 != end2))
{
return false;
}
if (!Compare(c1->gtControlExpr, c2->gtControlExpr))
{
return false;
}
return true;
}
//--------------------------------------------------------------------------
// ResetArgInfo: The argument info needs to be reset so it can be recomputed based on some change
// in conditions, such as changing the return type of a call due to giving up on doing a tailcall.
// If there is no fgArgInfo computed yet for this call, then there is nothing to reset.
//
void GenTreeCall::ResetArgInfo()
{
if (fgArgInfo == nullptr)
{
return;
}
// We would like to just set `fgArgInfo = nullptr`. But `fgInitArgInfo()` not
// only sets up fgArgInfo, it also adds non-standard args to the IR, and we need
// to remove that extra IR so it doesn't get added again.
//
unsigned argNum = 0;
if (gtCallThisArg != nullptr)
{
argNum++;
}
Use** link = >CallArgs;
while ((*link) != nullptr)
{
const fgArgTabEntry* entry = fgArgInfo->GetArgEntry(argNum);
if (entry->isNonStandard() && entry->isNonStandardArgAddedLate())
{
JITDUMP("Removing non-standarg arg %s [%06u] to prepare for re-morphing call [%06u]\n",
getNonStandardArgKindName(entry->nonStandardArgKind), Compiler::dspTreeID((*link)->GetNode()),
gtTreeID);
*link = (*link)->GetNext();
}
else
{
link = &(*link)->NextRef();
}
argNum++;
}
fgArgInfo = nullptr;
}
#if !defined(FEATURE_PUT_STRUCT_ARG_STK)
unsigned GenTreePutArgStk::GetStackByteSize() const
{
return genTypeSize(genActualType(gtOp1->gtType));
}
#endif // !defined(FEATURE_PUT_STRUCT_ARG_STK)
/*****************************************************************************
*
* Returns non-zero if the two trees are identical.
*/
bool GenTree::Compare(GenTree* op1, GenTree* op2, bool swapOK)
{
genTreeOps oper;
unsigned kind;
// printf("tree1:\n"); gtDispTree(op1);
// printf("tree2:\n"); gtDispTree(op2);
AGAIN:
if (op1 == nullptr)
{
return (op2 == nullptr);
}
if (op2 == nullptr)
{
return false;
}
if (op1 == op2)
{
return true;
}
oper = op1->OperGet();
/* The operators must be equal */
if (oper != op2->gtOper)
{
return false;
}
/* The types must be equal */
if (op1->gtType != op2->gtType)
{
return false;
}
/* Overflow must be equal */
if (op1->gtOverflowEx() != op2->gtOverflowEx())
{
return false;
}
/* Sensible flags must be equal */
if ((op1->gtFlags & (GTF_UNSIGNED)) != (op2->gtFlags & (GTF_UNSIGNED)))
{
return false;
}
/* Figure out what kind of nodes we're comparing */
kind = op1->OperKind();
/* Is this a constant node? */
if (op1->OperIsConst())
{
switch (oper)
{
case GT_CNS_INT:
if (op1->AsIntCon()->gtIconVal == op2->AsIntCon()->gtIconVal)
{
return true;
}
break;
case GT_CNS_STR:
if ((op1->AsStrCon()->gtSconCPX == op2->AsStrCon()->gtSconCPX) &&
(op1->AsStrCon()->gtScpHnd == op2->AsStrCon()->gtScpHnd))
{
return true;
}
break;
#if 0
// TODO-CQ: Enable this in the future
case GT_CNS_LNG:
if (op1->AsLngCon()->gtLconVal == op2->AsLngCon()->gtLconVal)
return true;
break;
case GT_CNS_DBL:
if (op1->AsDblCon()->gtDconVal == op2->AsDblCon()->gtDconVal)
return true;
break;
#endif
default:
break;
}
return false;
}
/* Is this a leaf node? */
if (kind & GTK_LEAF)
{
switch (oper)
{
case GT_LCL_VAR:
if (op1->AsLclVarCommon()->GetLclNum() != op2->AsLclVarCommon()->GetLclNum())
{
break;
}
return true;
case GT_LCL_FLD:
if ((op1->AsLclFld()->GetLclNum() != op2->AsLclFld()->GetLclNum()) ||
(op1->AsLclFld()->GetLclOffs() != op2->AsLclFld()->GetLclOffs()))
{
break;
}
return true;
case GT_CLS_VAR:
if (op1->AsClsVar()->gtClsVarHnd != op2->AsClsVar()->gtClsVarHnd)
{
break;
}
return true;
case GT_LABEL:
return true;
case GT_ARGPLACE:
if ((op1->gtType == TYP_STRUCT) &&
(op1->AsArgPlace()->gtArgPlaceClsHnd != op2->AsArgPlace()->gtArgPlaceClsHnd))
{
break;
}
return true;
default:
break;
}
return false;
}
/* Is it a 'simple' unary/binary operator? */
if (kind & GTK_UNOP)
{
if (IsExOp(kind))
{
// ExOp operators extend unary operator with extra, non-GenTree* members. In many cases,
// these should be included in the comparison.
switch (oper)
{
case GT_ARR_LENGTH:
if (op1->AsArrLen()->ArrLenOffset() != op2->AsArrLen()->ArrLenOffset())
{
return false;
}
break;
case GT_CAST:
if (op1->AsCast()->gtCastType != op2->AsCast()->gtCastType)
{
return false;
}
break;
case GT_BLK:
case GT_OBJ:
if (op1->AsBlk()->GetLayout() != op2->AsBlk()->GetLayout())
{
return false;
}
break;
case GT_FIELD:
if (op1->AsField()->gtFldHnd != op2->AsField()->gtFldHnd)
{
return false;
}
break;
// For the ones below no extra argument matters for comparison.
case GT_BOX:
case GT_RUNTIMELOOKUP:
break;
default:
assert(!"unexpected unary ExOp operator");
}
}
return Compare(op1->AsOp()->gtOp1, op2->AsOp()->gtOp1);
}
if (kind & GTK_BINOP)
{
if (IsExOp(kind))
{
// ExOp operators extend unary operator with extra, non-GenTree* members. In many cases,
// these should be included in the hash code.
switch (oper)
{
case GT_INTRINSIC:
if (op1->AsIntrinsic()->gtIntrinsicName != op2->AsIntrinsic()->gtIntrinsicName)
{
return false;
}
break;
case GT_LEA:
if (op1->AsAddrMode()->gtScale != op2->AsAddrMode()->gtScale)
{
return false;
}
if (op1->AsAddrMode()->Offset() != op2->AsAddrMode()->Offset())
{
return false;
}
break;
case GT_BOUNDS_CHECK:
if (op1->AsBoundsChk()->gtThrowKind != op2->AsBoundsChk()->gtThrowKind)
{
return false;
}
break;
case GT_INDEX:
if (op1->AsIndex()->gtIndElemSize != op2->AsIndex()->gtIndElemSize)
{
return false;
}
break;
case GT_INDEX_ADDR:
if (op1->AsIndexAddr()->gtElemSize != op2->AsIndexAddr()->gtElemSize)
{
return false;
}
break;
// For the ones below no extra argument matters for comparison.
case GT_QMARK:
break;
default:
assert(!"unexpected binary ExOp operator");
}
}
if (op1->AsOp()->gtOp2)
{
if (!Compare(op1->AsOp()->gtOp1, op2->AsOp()->gtOp1, swapOK))
{
if (swapOK && OperIsCommutative(oper) &&
((op1->AsOp()->gtOp1->gtFlags | op1->AsOp()->gtOp2->gtFlags | op2->AsOp()->gtOp1->gtFlags |
op2->AsOp()->gtOp2->gtFlags) &
GTF_ALL_EFFECT) == 0)
{
if (Compare(op1->AsOp()->gtOp1, op2->AsOp()->gtOp2, swapOK))
{
op1 = op1->AsOp()->gtOp2;
op2 = op2->AsOp()->gtOp1;
goto AGAIN;
}
}
return false;
}
op1 = op1->AsOp()->gtOp2;
op2 = op2->AsOp()->gtOp2;
goto AGAIN;
}
else
{
op1 = op1->AsOp()->gtOp1;
op2 = op2->AsOp()->gtOp1;
if (!op1)
{
return (op2 == nullptr);
}
if (!op2)
{
return false;
}
goto AGAIN;
}
}
/* See what kind of a special operator we have here */
switch (oper)
{
case GT_CALL:
return GenTreeCall::Equals(op1->AsCall(), op2->AsCall());
#ifdef FEATURE_SIMD
case GT_SIMD:
return GenTreeSIMD::Equals(op1->AsSIMD(), op2->AsSIMD());
#endif // FEATURE_SIMD
#ifdef FEATURE_HW_INTRINSICS
case GT_HWINTRINSIC:
return GenTreeHWIntrinsic::Equals(op1->AsHWIntrinsic(), op2->AsHWIntrinsic());
#endif
case GT_ARR_ELEM:
if (op1->AsArrElem()->gtArrRank != op2->AsArrElem()->gtArrRank)
{
return false;
}
// NOTE: gtArrElemSize may need to be handled
unsigned dim;
for (dim = 0; dim < op1->AsArrElem()->gtArrRank; dim++)
{
if (!Compare(op1->AsArrElem()->gtArrInds[dim], op2->AsArrElem()->gtArrInds[dim]))
{
return false;
}
}
op1 = op1->AsArrElem()->gtArrObj;
op2 = op2->AsArrElem()->gtArrObj;
goto AGAIN;
case GT_ARR_OFFSET:
if (op1->AsArrOffs()->gtCurrDim != op2->AsArrOffs()->gtCurrDim ||
op1->AsArrOffs()->gtArrRank != op2->AsArrOffs()->gtArrRank)
{
return false;
}
return (Compare(op1->AsArrOffs()->gtOffset, op2->AsArrOffs()->gtOffset) &&
Compare(op1->AsArrOffs()->gtIndex, op2->AsArrOffs()->gtIndex) &&
Compare(op1->AsArrOffs()->gtArrObj, op2->AsArrOffs()->gtArrObj));
case GT_PHI:
return GenTreePhi::Equals(op1->AsPhi(), op2->AsPhi());
case GT_FIELD_LIST:
return GenTreeFieldList::Equals(op1->AsFieldList(), op2->AsFieldList());
case GT_CMPXCHG:
return Compare(op1->AsCmpXchg()->gtOpLocation, op2->AsCmpXchg()->gtOpLocation) &&
Compare(op1->AsCmpXchg()->gtOpValue, op2->AsCmpXchg()->gtOpValue) &&
Compare(op1->AsCmpXchg()->gtOpComparand, op2->AsCmpXchg()->gtOpComparand);
case GT_STORE_DYN_BLK:
return Compare(op1->AsStoreDynBlk()->Addr(), op2->AsStoreDynBlk()->Addr()) &&
Compare(op1->AsStoreDynBlk()->Data(), op2->AsStoreDynBlk()->Data()) &&
Compare(op1->AsStoreDynBlk()->gtDynamicSize, op2->AsStoreDynBlk()->gtDynamicSize);
default:
assert(!"unexpected operator");
}
return false;
}
//------------------------------------------------------------------------
// gtHasRef: Find out whether the given tree contains a local/field.
//
// Arguments:
// tree - tree to find the local in
// lclNum - the local's number, *or* the handle for the field
//
// Return Value:
// Whether "tree" has any LCL_VAR/LCL_FLD nodes that refer to the
// local, LHS or RHS, or FIELD nodes with the specified handle.
//
// Notes:
// Does not pay attention to local address nodes.
//
/* static */ bool Compiler::gtHasRef(GenTree* tree, ssize_t lclNum)
{
if (tree == nullptr)
{
return false;
}
if (tree->OperIsLeaf())
{
if (tree->OperIs(GT_LCL_VAR, GT_LCL_FLD) && (tree->AsLclVarCommon()->GetLclNum() == (unsigned)lclNum))
{
return true;
}
if (tree->OperIs(GT_RET_EXPR))
{
return gtHasRef(tree->AsRetExpr()->gtInlineCandidate, lclNum);
}
return false;
}
if (tree->OperIsUnary())
{
// Code in importation (see CEE_STFLD in impImportBlockCode), when
// spilling, can pass us "lclNum" that is actually a field handle...
if (tree->OperIs(GT_FIELD) && (lclNum == (ssize_t)tree->AsField()->gtFldHnd))
{
return true;
}
return gtHasRef(tree->AsUnOp()->gtGetOp1(), lclNum);
}
if (tree->OperIsBinary())
{
return gtHasRef(tree->AsOp()->gtGetOp1(), lclNum) || gtHasRef(tree->AsOp()->gtGetOp2(), lclNum);
}
bool result = false;
tree->VisitOperands([lclNum, &result](GenTree* operand) -> GenTree::VisitResult {
if (gtHasRef(operand, lclNum))
{
result = true;
return GenTree::VisitResult::Abort;
}
return GenTree::VisitResult::Continue;
});
return result;
}
struct AddrTakenDsc
{
Compiler* comp;
bool hasAddrTakenLcl;
};
/* static */
Compiler::fgWalkResult Compiler::gtHasLocalsWithAddrOpCB(GenTree** pTree, fgWalkData* data)
{
GenTree* tree = *pTree;
Compiler* comp = data->compiler;
if (tree->gtOper == GT_LCL_VAR)
{
const LclVarDsc* varDsc = comp->lvaGetDesc(tree->AsLclVarCommon());
if (varDsc->lvHasLdAddrOp || varDsc->IsAddressExposed())
{
((AddrTakenDsc*)data->pCallbackData)->hasAddrTakenLcl = true;
return WALK_ABORT;
}
}
return WALK_CONTINUE;
}
/*****************************************************************************
*
* Return true if this tree contains locals with lvHasLdAddrOp or IsAddressExposed()
* flag(s) set.
*/
bool Compiler::gtHasLocalsWithAddrOp(GenTree* tree)
{
AddrTakenDsc desc;
desc.comp = this;
desc.hasAddrTakenLcl = false;
fgWalkTreePre(&tree, gtHasLocalsWithAddrOpCB, &desc);
return desc.hasAddrTakenLcl;
}
#ifdef DEBUG
/*****************************************************************************
*
* Helper used to compute hash values for trees.
*/
inline unsigned genTreeHashAdd(unsigned old, unsigned add)
{
return (old + old / 2) ^ add;
}
inline unsigned genTreeHashAdd(unsigned old, void* add)
{
return genTreeHashAdd(old, (unsigned)(size_t)add);
}
/*****************************************************************************
*
* Given an arbitrary expression tree, compute a hash value for it.
*/
unsigned Compiler::gtHashValue(GenTree* tree)
{
genTreeOps oper;
unsigned kind;
unsigned hash = 0;
GenTree* temp;
AGAIN:
assert(tree);
/* Figure out what kind of a node we have */
oper = tree->OperGet();
kind = tree->OperKind();
/* Include the operator value in the hash */
hash = genTreeHashAdd(hash, oper);
/* Is this a leaf node? */
if (kind & GTK_LEAF)
{
size_t add;
switch (oper)
{
UINT64 bits;
case GT_LCL_VAR:
add = tree->AsLclVar()->GetLclNum();
break;
case GT_LCL_FLD:
hash = genTreeHashAdd(hash, tree->AsLclFld()->GetLclNum());
add = tree->AsLclFld()->GetLclOffs();
break;
case GT_CNS_INT:
add = tree->AsIntCon()->gtIconVal;
break;
case GT_CNS_LNG:
bits = (UINT64)tree->AsLngCon()->gtLconVal;
#ifdef HOST_64BIT
add = bits;
#else // 32-bit host
add = genTreeHashAdd(uhi32(bits), ulo32(bits));
#endif
break;
case GT_CNS_DBL:
bits = *(UINT64*)(&tree->AsDblCon()->gtDconVal);
#ifdef HOST_64BIT
add = bits;
#else // 32-bit host
add = genTreeHashAdd(uhi32(bits), ulo32(bits));
#endif
break;
case GT_CNS_STR:
add = tree->AsStrCon()->gtSconCPX;
break;
case GT_JMP:
add = tree->AsVal()->gtVal1;
break;
default:
add = 0;
break;
}
// clang-format off
// narrow 'add' into a 32-bit 'val'
unsigned val;
#ifdef HOST_64BIT
val = genTreeHashAdd(uhi32(add), ulo32(add));
#else // 32-bit host
val = add;
#endif
// clang-format on
hash = genTreeHashAdd(hash, val);
goto DONE;
}
/* Is it a 'simple' unary/binary operator? */
GenTree* op1;
if (kind & GTK_UNOP)
{
op1 = tree->AsOp()->gtOp1;
/* Special case: no sub-operand at all */
if (GenTree::IsExOp(kind))
{
// ExOp operators extend operators with extra, non-GenTree* members. In many cases,
// these should be included in the hash code.
switch (oper)
{
case GT_ARR_LENGTH:
hash += tree->AsArrLen()->ArrLenOffset();
break;
case GT_CAST:
hash ^= tree->AsCast()->gtCastType;
break;
case GT_INDEX:
hash += tree->AsIndex()->gtIndElemSize;
break;
case GT_INDEX_ADDR:
hash += tree->AsIndexAddr()->gtElemSize;
break;
case GT_ALLOCOBJ:
hash = genTreeHashAdd(hash, static_cast<unsigned>(
reinterpret_cast<uintptr_t>(tree->AsAllocObj()->gtAllocObjClsHnd)));
hash = genTreeHashAdd(hash, tree->AsAllocObj()->gtNewHelper);
break;
case GT_RUNTIMELOOKUP:
hash = genTreeHashAdd(hash, static_cast<unsigned>(
reinterpret_cast<uintptr_t>(tree->AsRuntimeLookup()->gtHnd)));
break;
case GT_BLK:
case GT_OBJ:
hash =
genTreeHashAdd(hash,
static_cast<unsigned>(reinterpret_cast<uintptr_t>(tree->AsBlk()->GetLayout())));
break;
case GT_FIELD:
hash = genTreeHashAdd(hash, tree->AsField()->gtFldHnd);
break;
// For the ones below no extra argument matters for comparison.
case GT_BOX:
break;
default:
assert(!"unexpected unary ExOp operator");
}
}
if (!op1)
{
goto DONE;
}
tree = op1;
goto AGAIN;
}
if (kind & GTK_BINOP)
{
if (GenTree::IsExOp(kind))
{
// ExOp operators extend operators with extra, non-GenTree* members. In many cases,
// these should be included in the hash code.
switch (oper)
{
case GT_INTRINSIC:
hash += tree->AsIntrinsic()->gtIntrinsicName;
break;
case GT_LEA:
hash += static_cast<unsigned>(tree->AsAddrMode()->Offset() << 3) + tree->AsAddrMode()->gtScale;
break;
case GT_BOUNDS_CHECK:
hash = genTreeHashAdd(hash, tree->AsBoundsChk()->gtThrowKind);
break;
case GT_STORE_BLK:
case GT_STORE_OBJ:
hash ^= PtrToUlong(tree->AsBlk()->GetLayout());
break;
// For the ones below no extra argument matters for comparison.
case GT_ARR_INDEX:
case GT_QMARK:
case GT_INDEX:
case GT_INDEX_ADDR:
break;
#ifdef FEATURE_SIMD
case GT_SIMD:
hash += tree->AsSIMD()->GetSIMDIntrinsicId();
hash += tree->AsSIMD()->GetSimdBaseType();
hash += tree->AsSIMD()->GetSimdSize();
break;
#endif // FEATURE_SIMD
#ifdef FEATURE_HW_INTRINSICS
case GT_HWINTRINSIC:
hash += tree->AsHWIntrinsic()->GetHWIntrinsicId();
hash += tree->AsHWIntrinsic()->GetSimdBaseType();
hash += tree->AsHWIntrinsic()->GetSimdSize();
hash += tree->AsHWIntrinsic()->GetAuxiliaryType();
hash += tree->AsHWIntrinsic()->GetOtherReg();
break;
#endif // FEATURE_HW_INTRINSICS
default:
assert(!"unexpected binary ExOp operator");
}
}
op1 = tree->AsOp()->gtOp1;
GenTree* op2 = tree->AsOp()->gtOp2;
/* Is there a second sub-operand? */
if (!op2)
{
/* Special case: no sub-operands at all */
if (!op1)
{
goto DONE;
}
/* This is a unary operator */
tree = op1;
goto AGAIN;
}
/* This is a binary operator */
unsigned hsh1 = gtHashValue(op1);
/* Add op1's hash to the running value and continue with op2 */
hash = genTreeHashAdd(hash, hsh1);
tree = op2;
goto AGAIN;
}
/* See what kind of a special operator we have here */
switch (tree->gtOper)
{
case GT_ARR_ELEM:
hash = genTreeHashAdd(hash, gtHashValue(tree->AsArrElem()->gtArrObj));
unsigned dim;
for (dim = 0; dim < tree->AsArrElem()->gtArrRank; dim++)
{
hash = genTreeHashAdd(hash, gtHashValue(tree->AsArrElem()->gtArrInds[dim]));
}
break;
case GT_ARR_OFFSET:
hash = genTreeHashAdd(hash, gtHashValue(tree->AsArrOffs()->gtOffset));
hash = genTreeHashAdd(hash, gtHashValue(tree->AsArrOffs()->gtIndex));
hash = genTreeHashAdd(hash, gtHashValue(tree->AsArrOffs()->gtArrObj));
break;
case GT_CALL:
if ((tree->AsCall()->gtCallThisArg != nullptr) && !tree->AsCall()->gtCallThisArg->GetNode()->OperIs(GT_NOP))
{
hash = genTreeHashAdd(hash, gtHashValue(tree->AsCall()->gtCallThisArg->GetNode()));
}
for (GenTreeCall::Use& use : tree->AsCall()->Args())
{
hash = genTreeHashAdd(hash, gtHashValue(use.GetNode()));
}
if (tree->AsCall()->gtCallType == CT_INDIRECT)
{
temp = tree->AsCall()->gtCallAddr;
assert(temp);
hash = genTreeHashAdd(hash, gtHashValue(temp));
}
else
{
hash = genTreeHashAdd(hash, tree->AsCall()->gtCallMethHnd);
}
for (GenTreeCall::Use& use : tree->AsCall()->LateArgs())
{
hash = genTreeHashAdd(hash, gtHashValue(use.GetNode()));
}
break;
#if defined(FEATURE_SIMD) || defined(FEATURE_HW_INTRINSICS)
#if defined(FEATURE_SIMD)
case GT_SIMD:
#endif
#if defined(FEATURE_HW_INTRINSICS)
case GT_HWINTRINSIC:
#endif
// TODO-List: rewrite with a general visitor / iterator?
for (GenTree* operand : tree->AsMultiOp()->Operands())
{
hash = genTreeHashAdd(hash, gtHashValue(operand));
}
break;
#endif // defined(FEATURE_SIMD) || defined(FEATURE_HW_INTRINSICS)
case GT_PHI:
for (GenTreePhi::Use& use : tree->AsPhi()->Uses())
{
hash = genTreeHashAdd(hash, gtHashValue(use.GetNode()));
}
break;
case GT_FIELD_LIST:
for (GenTreeFieldList::Use& use : tree->AsFieldList()->Uses())
{
hash = genTreeHashAdd(hash, gtHashValue(use.GetNode()));
}
break;
case GT_CMPXCHG:
hash = genTreeHashAdd(hash, gtHashValue(tree->AsCmpXchg()->gtOpLocation));
hash = genTreeHashAdd(hash, gtHashValue(tree->AsCmpXchg()->gtOpValue));
hash = genTreeHashAdd(hash, gtHashValue(tree->AsCmpXchg()->gtOpComparand));
break;
case GT_STORE_DYN_BLK:
hash = genTreeHashAdd(hash, gtHashValue(tree->AsStoreDynBlk()->Data()));
hash = genTreeHashAdd(hash, gtHashValue(tree->AsStoreDynBlk()->Addr()));
hash = genTreeHashAdd(hash, gtHashValue(tree->AsStoreDynBlk()->gtDynamicSize));
break;
default:
#ifdef DEBUG
gtDispTree(tree);
#endif
assert(!"unexpected operator");
break;
}
DONE:
return hash;
}
#endif // DEBUG
/*****************************************************************************
*
* Return a relational operator that is the reverse of the given one.
*/
/* static */
genTreeOps GenTree::ReverseRelop(genTreeOps relop)
{
static const genTreeOps reverseOps[] = {
GT_NE, // GT_EQ
GT_EQ, // GT_NE
GT_GE, // GT_LT
GT_GT, // GT_LE
GT_LT, // GT_GE
GT_LE, // GT_GT
GT_TEST_NE, // GT_TEST_EQ
GT_TEST_EQ, // GT_TEST_NE
};
assert(reverseOps[GT_EQ - GT_EQ] == GT_NE);
assert(reverseOps[GT_NE - GT_EQ] == GT_EQ);
assert(reverseOps[GT_LT - GT_EQ] == GT_GE);
assert(reverseOps[GT_LE - GT_EQ] == GT_GT);
assert(reverseOps[GT_GE - GT_EQ] == GT_LT);
assert(reverseOps[GT_GT - GT_EQ] == GT_LE);
assert(reverseOps[GT_TEST_EQ - GT_EQ] == GT_TEST_NE);
assert(reverseOps[GT_TEST_NE - GT_EQ] == GT_TEST_EQ);
assert(OperIsCompare(relop));
assert(relop >= GT_EQ && (unsigned)(relop - GT_EQ) < sizeof(reverseOps));
return reverseOps[relop - GT_EQ];
}
/*****************************************************************************
*
* Return a relational operator that will work for swapped operands.
*/
/* static */
genTreeOps GenTree::SwapRelop(genTreeOps relop)
{
static const genTreeOps swapOps[] = {
GT_EQ, // GT_EQ
GT_NE, // GT_NE
GT_GT, // GT_LT
GT_GE, // GT_LE
GT_LE, // GT_GE
GT_LT, // GT_GT
GT_TEST_EQ, // GT_TEST_EQ
GT_TEST_NE, // GT_TEST_NE
};
assert(swapOps[GT_EQ - GT_EQ] == GT_EQ);
assert(swapOps[GT_NE - GT_EQ] == GT_NE);
assert(swapOps[GT_LT - GT_EQ] == GT_GT);
assert(swapOps[GT_LE - GT_EQ] == GT_GE);
assert(swapOps[GT_GE - GT_EQ] == GT_LE);
assert(swapOps[GT_GT - GT_EQ] == GT_LT);
assert(swapOps[GT_TEST_EQ - GT_EQ] == GT_TEST_EQ);
assert(swapOps[GT_TEST_NE - GT_EQ] == GT_TEST_NE);
assert(OperIsCompare(relop));
assert(relop >= GT_EQ && (unsigned)(relop - GT_EQ) < sizeof(swapOps));
return swapOps[relop - GT_EQ];
}
/*****************************************************************************
*
* Reverse the meaning of the given test condition.
*/
GenTree* Compiler::gtReverseCond(GenTree* tree)
{
if (tree->OperIsCompare())
{
tree->SetOper(GenTree::ReverseRelop(tree->OperGet()));
// Flip the GTF_RELOP_NAN_UN bit
// a ord b === (a != NaN && b != NaN)
// a unord b === (a == NaN || b == NaN)
// => !(a ord b) === (a unord b)
if (varTypeIsFloating(tree->AsOp()->gtOp1->TypeGet()))
{
tree->gtFlags ^= GTF_RELOP_NAN_UN;
}
}
else if (tree->OperIs(GT_JCC, GT_SETCC))
{
GenTreeCC* cc = tree->AsCC();
cc->gtCondition = GenCondition::Reverse(cc->gtCondition);
}
else if (tree->OperIs(GT_JCMP))
{
// Flip the GTF_JCMP_EQ
//
// This causes switching
// cbz <=> cbnz
// tbz <=> tbnz
tree->gtFlags ^= GTF_JCMP_EQ;
}
else
{
tree = gtNewOperNode(GT_NOT, TYP_INT, tree);
}
return tree;
}
#if !defined(TARGET_64BIT) || defined(TARGET_ARM64)
//------------------------------------------------------------------------------
// IsValidLongMul : Check for long multiplication with 32 bit operands.
//
// Recognizes the following tree: MUL(CAST(long <- int), CAST(long <- int) or CONST),
// where CONST must be an integer constant that fits in 32 bits. Will try to detect
// cases when the multiplication cannot overflow and return "true" for them.
//
// This function does not change the state of the tree and is usable in LIR.
//
// Return Value:
// Whether this GT_MUL tree is a valid long multiplication candidate.
//
bool GenTreeOp::IsValidLongMul()
{
assert(OperIs(GT_MUL));
GenTree* op1 = gtGetOp1();
GenTree* op2 = gtGetOp2();
if (!TypeIs(TYP_LONG))
{
return false;
}
assert(op1->TypeIs(TYP_LONG));
assert(op2->TypeIs(TYP_LONG));
if (!(op1->OperIs(GT_CAST) && genActualTypeIsInt(op1->AsCast()->CastOp())))
{
return false;
}
if (!(op2->OperIs(GT_CAST) && genActualTypeIsInt(op2->AsCast()->CastOp())) &&
!(op2->IsIntegralConst() && FitsIn<int32_t>(op2->AsIntConCommon()->IntegralValue())))
{
return false;
}
if (op1->gtOverflow() || op2->gtOverflowEx())
{
return false;
}
if (gtOverflow())
{
auto getMaxValue = [this](GenTree* op) -> int64_t {
if (op->OperIs(GT_CAST))
{
if (op->IsUnsigned())
{
switch (op->AsCast()->CastOp()->TypeGet())
{
case TYP_UBYTE:
return UINT8_MAX;
case TYP_USHORT:
return UINT16_MAX;
default:
return UINT32_MAX;
}
}
return IsUnsigned() ? static_cast<int64_t>(UINT64_MAX) : INT32_MIN;
}
return op->AsIntConCommon()->IntegralValue();
};
int64_t maxOp1 = getMaxValue(op1);
int64_t maxOp2 = getMaxValue(op2);
if (CheckedOps::MulOverflows(maxOp1, maxOp2, IsUnsigned()))
{
return false;
}
}
// Both operands must extend the same way.
bool op1ZeroExtends = op1->IsUnsigned();
bool op2ZeroExtends = op2->OperIs(GT_CAST) ? op2->IsUnsigned() : op2->AsIntConCommon()->IntegralValue() >= 0;
bool op2AnyExtensionIsSuitable = op2->IsIntegralConst() && op2ZeroExtends;
if ((op1ZeroExtends != op2ZeroExtends) && !op2AnyExtensionIsSuitable)
{
return false;
}
return true;
}
#if !defined(TARGET_64BIT) && defined(DEBUG)
//------------------------------------------------------------------------------
// DebugCheckLongMul : Checks that a GTF_MUL_64RSLT tree is a valid MUL_LONG.
//
// Notes:
// This function is defined for 32 bit targets only because we *must* maintain
// the MUL_LONG-compatible tree shape throughout the compilation from morph to
// decomposition, since we do not have (great) ability to create new calls in LIR.
//
// It is for this reason that we recognize MUL_LONGs early in morph, mark them with
// a flag and then pessimize various places (e. g. assertion propagation) to not look
// at them. In contrast, on ARM64 we recognize MUL_LONGs late, in lowering, and thus
// do not need this function.
//
void GenTreeOp::DebugCheckLongMul()
{
assert(OperIs(GT_MUL));
assert(Is64RsltMul());
assert(TypeIs(TYP_LONG));
assert(!gtOverflow());
GenTree* op1 = gtGetOp1();
GenTree* op2 = gtGetOp2();
assert(op1->TypeIs(TYP_LONG));
assert(op2->TypeIs(TYP_LONG));
// op1 has to be CAST(long <- int)
assert(op1->OperIs(GT_CAST) && genActualTypeIsInt(op1->AsCast()->CastOp()));
assert(!op1->gtOverflow());
// op2 has to be CAST(long <- int) or a suitably small constant.
assert((op2->OperIs(GT_CAST) && genActualTypeIsInt(op2->AsCast()->CastOp())) ||
(op2->IsIntegralConst() && FitsIn<int32_t>(op2->AsIntConCommon()->IntegralValue())));
assert(!op2->gtOverflowEx());
// Both operands must extend the same way.
bool op1ZeroExtends = op1->IsUnsigned();
bool op2ZeroExtends = op2->OperIs(GT_CAST) ? op2->IsUnsigned() : op2->AsIntConCommon()->IntegralValue() >= 0;
bool op2AnyExtensionIsSuitable = op2->IsIntegralConst() && op2ZeroExtends;
assert((op1ZeroExtends == op2ZeroExtends) || op2AnyExtensionIsSuitable);
// Do unsigned mul iff both operands are zero-extending.
assert(op1->IsUnsigned() == IsUnsigned());
}
#endif // !defined(TARGET_64BIT) && defined(DEBUG)
#endif // !defined(TARGET_64BIT) || defined(TARGET_ARM64)
unsigned Compiler::gtSetCallArgsOrder(const GenTreeCall::UseList& args, bool lateArgs, int* callCostEx, int* callCostSz)
{
unsigned level = 0;
unsigned costEx = 0;
unsigned costSz = 0;
for (GenTreeCall::Use& use : args)
{
GenTree* argNode = use.GetNode();
unsigned argLevel = gtSetEvalOrder(argNode);
if (argLevel > level)
{
level = argLevel;
}
if (argNode->GetCostEx() != 0)
{
costEx += argNode->GetCostEx();
costEx += lateArgs ? 0 : IND_COST_EX;
}
if (argNode->GetCostSz() != 0)
{
costSz += argNode->GetCostSz();
#ifdef TARGET_XARCH
if (lateArgs) // push is smaller than mov to reg
#endif
{
costSz += 1;
}
}
}
*callCostEx += costEx;
*callCostSz += costSz;
return level;
}
#if defined(FEATURE_SIMD) || defined(FEATURE_HW_INTRINSICS)
//------------------------------------------------------------------------
// gtSetMultiOpOrder: Calculate the costs for a MultiOp.
//
// Currently this function just preserves the previous behavior.
// TODO-List-Cleanup: implement proper costing for these trees.
//
// Arguments:
// multiOp - The MultiOp tree in question
//
// Return Value:
// The Sethi "complexity" for this tree (the idealized number of
// registers needed to evaluate it).
//
unsigned Compiler::gtSetMultiOpOrder(GenTreeMultiOp* multiOp)
{
// These default costs preserve previous behavior.
// TODO-CQ: investigate opportunities for tuning them.
int costEx = 1;
int costSz = 1;
unsigned level = 0;
unsigned lvl2 = 0;
#if defined(FEATURE_HW_INTRINSICS)
if (multiOp->OperIs(GT_HWINTRINSIC))
{
GenTreeHWIntrinsic* hwTree = multiOp->AsHWIntrinsic();
#if defined(TARGET_XARCH)
if ((hwTree->GetOperandCount() == 1) && hwTree->OperIsMemoryLoadOrStore())
{
costEx = IND_COST_EX;
costSz = 2;
GenTree* const addrNode = hwTree->Op(1);
level = gtSetEvalOrder(addrNode);
GenTree* const addr = addrNode->gtEffectiveVal();
// See if we can form a complex addressing mode.
if (addr->OperIs(GT_ADD) && gtMarkAddrMode(addr, &costEx, &costSz, hwTree->TypeGet()))
{
// Nothing to do, costs have been set.
}
else
{
costEx += addr->GetCostEx();
costSz += addr->GetCostSz();
}
hwTree->SetCosts(costEx, costSz);
return level;
}
#endif
switch (hwTree->GetHWIntrinsicId())
{
#if defined(TARGET_XARCH)
case NI_Vector128_Create:
case NI_Vector256_Create:
#elif defined(TARGET_ARM64)
case NI_Vector64_Create:
case NI_Vector128_Create:
#endif
{
if ((hwTree->GetOperandCount() == 1) && hwTree->Op(1)->OperIsConst())
{
// Vector.Create(cns) is cheap but not that cheap to be (1,1)
costEx = IND_COST_EX;
costSz = 2;
level = gtSetEvalOrder(hwTree->Op(1));
hwTree->SetCosts(costEx, costSz);
return level;
}
break;
}
default:
break;
}
}
#endif // defined(FEATURE_SIMD) || defined(FEATURE_HW_INTRINSICS)
// This code is here to preserve previous behavior.
switch (multiOp->GetOperandCount())
{
case 0:
// This is a constant HWIntrinsic, we already have correct costs.
break;
case 1:
// A "unary" case.
level = gtSetEvalOrder(multiOp->Op(1));
costEx += multiOp->Op(1)->GetCostEx();
costSz += multiOp->Op(1)->GetCostSz();
break;
case 2:
// A "binary" case.
// This way we have "level" be the complexity of the
// first tree to be evaluated, and "lvl2" - the second.
if (multiOp->IsReverseOp())
{
level = gtSetEvalOrder(multiOp->Op(2));
lvl2 = gtSetEvalOrder(multiOp->Op(1));
}
else
{
level = gtSetEvalOrder(multiOp->Op(1));
lvl2 = gtSetEvalOrder(multiOp->Op(2));
}
// We want the more complex tree to be evaluated first.
if (level < lvl2)
{
bool canSwap = multiOp->IsReverseOp() ? gtCanSwapOrder(multiOp->Op(2), multiOp->Op(1))
: gtCanSwapOrder(multiOp->Op(1), multiOp->Op(2));
if (canSwap)
{
if (multiOp->IsReverseOp())
{
multiOp->ClearReverseOp();
}
else
{
multiOp->SetReverseOp();
}
std::swap(level, lvl2);
}
}
if (level < 1)
{
level = lvl2;
}
else if (level == lvl2)
{
level += 1;
}
costEx += (multiOp->Op(1)->GetCostEx() + multiOp->Op(2)->GetCostEx());
costSz += (multiOp->Op(1)->GetCostSz() + multiOp->Op(2)->GetCostSz());
break;
default:
// The former "ArgList" case... we'll be emulating it here.
// The old implementation pushed the nodes on the list, in pre-order.
// Then it popped and costed them in "reverse order", so that's what
// we'll be doing here as well.
unsigned nxtlvl = 0;
for (size_t i = multiOp->GetOperandCount(); i >= 1; i--)
{
GenTree* op = multiOp->Op(i);
unsigned lvl = gtSetEvalOrder(op);
if (lvl < 1)
{
level = nxtlvl;
}
else if (lvl == nxtlvl)
{
level = lvl + 1;
}
else
{
level = lvl;
}
costEx += op->GetCostEx();
costSz += op->GetCostSz();
// Preserving previous behavior...
CLANG_FORMAT_COMMENT_ANCHOR;
#ifndef TARGET_XARCH
if (op->GetCostSz() != 0)
{
costSz += 1;
}
#endif
nxtlvl = level;
}
break;
}
multiOp->SetCosts(costEx, costSz);
return level;
}
#endif
//-----------------------------------------------------------------------------
// gtWalkOp: Traverse and mark an address expression
//
// Arguments:
// op1WB - An out parameter which is either the address expression, or one
// of its operands.
// op2WB - An out parameter which starts as either null or one of the operands
// of the address expression.
// base - The base address of the addressing mode, or null if 'constOnly' is false
// constOnly - True if we will only traverse into ADDs with constant op2.
//
// This routine is a helper routine for gtSetEvalOrder() and is used to identify the
// base and index nodes, which will be validated against those identified by
// genCreateAddrMode().
// It also marks the ADD nodes involved in the address expression with the
// GTF_ADDRMODE_NO_CSE flag which prevents them from being considered for CSE's.
//
// Its two output parameters are modified under the following conditions:
//
// It is called once with the original address expression as 'op1WB', and
// with 'constOnly' set to false. On this first invocation, *op1WB is always
// an ADD node, and it will consider the operands of the ADD even if its op2 is
// not a constant. However, when it encounters a non-constant or the base in the
// op2 position, it stops iterating. That operand is returned in the 'op2WB' out
// parameter, and will be considered on the third invocation of this method if
// it is an ADD.
//
// It is called the second time with the two operands of the original expression, in
// the original order, and the third time in reverse order. For these invocations
// 'constOnly' is true, so it will only traverse cascaded ADD nodes if they have a
// constant op2.
//
// The result, after three invocations, is that the values of the two out parameters
// correspond to the base and index in some fashion. This method doesn't attempt
// to determine or validate the scale or offset, if any.
//
// Assumptions (presumed to be ensured by genCreateAddrMode()):
// If an ADD has a constant operand, it is in the op2 position.
//
// Notes:
// This method, and its invocation sequence, are quite confusing, and since they
// were not originally well-documented, this specification is a possibly-imperfect
// reconstruction.
// The motivation for the handling of the NOP case is unclear.
// Note that 'op2WB' is only modified in the initial (!constOnly) case,
// or if a NOP is encountered in the op1 position.
//
void Compiler::gtWalkOp(GenTree** op1WB, GenTree** op2WB, GenTree* base, bool constOnly)
{
GenTree* op1 = *op1WB;
GenTree* op2 = *op2WB;
op1 = op1->gtEffectiveVal();
// Now we look for op1's with non-overflow GT_ADDs [of constants]
while ((op1->gtOper == GT_ADD) && (!op1->gtOverflow()) && (!constOnly || (op1->AsOp()->gtOp2->IsCnsIntOrI())))
{
// mark it with GTF_ADDRMODE_NO_CSE
op1->gtFlags |= GTF_ADDRMODE_NO_CSE;
if (!constOnly)
{
op2 = op1->AsOp()->gtOp2;
}
op1 = op1->AsOp()->gtOp1;
// If op1 is a GT_NOP then swap op1 and op2.
// (Why? Also, presumably op2 is not a GT_NOP in this case?)
if (op1->gtOper == GT_NOP)
{
GenTree* tmp;
tmp = op1;
op1 = op2;
op2 = tmp;
}
if (!constOnly && ((op2 == base) || (!op2->IsCnsIntOrI())))
{
break;
}
op1 = op1->gtEffectiveVal();
}
*op1WB = op1;
*op2WB = op2;
}
#ifdef DEBUG
/*****************************************************************************
* This is a workaround. It is to help implement an assert in gtSetEvalOrder() that the values
* gtWalkOp() leaves in op1 and op2 correspond with the values of adr, idx, mul, and cns
* that are returned by genCreateAddrMode(). It's essentially impossible to determine
* what gtWalkOp() *should* return for all possible trees. This simply loosens one assert
* to handle the following case:
indir int
const(h) int 4 field
+ byref
lclVar byref V00 this <-- op2
comma byref <-- adr (base)
indir byte
lclVar byref V00 this
+ byref
const int 2 <-- mul == 4
<< int <-- op1
lclVar int V01 arg1 <-- idx
* Here, we are planning to generate the address mode [edx+4*eax], where eax = idx and edx = the GT_COMMA expression.
* To check adr equivalence with op2, we need to walk down the GT_ADD tree just like gtWalkOp() does.
*/
GenTree* Compiler::gtWalkOpEffectiveVal(GenTree* op)
{
for (;;)
{
op = op->gtEffectiveVal();
if ((op->gtOper != GT_ADD) || op->gtOverflow() || !op->AsOp()->gtOp2->IsCnsIntOrI())
{
break;
}
op = op->AsOp()->gtOp1;
}
return op;
}
#endif // DEBUG
/*****************************************************************************
*
* Given a tree, set the GetCostEx and GetCostSz() fields which
* are used to measure the relative costs of the codegen of the tree
*
*/
void Compiler::gtPrepareCost(GenTree* tree)
{
gtSetEvalOrder(tree);
}
bool Compiler::gtIsLikelyRegVar(GenTree* tree)
{
if (tree->gtOper != GT_LCL_VAR)
{
return false;
}
const LclVarDsc* varDsc = lvaGetDesc(tree->AsLclVar());
if (varDsc->lvDoNotEnregister)
{
return false;
}
// If this is an EH-live var, return false if it is a def,
// as it will have to go to memory.
if (varDsc->lvLiveInOutOfHndlr && ((tree->gtFlags & GTF_VAR_DEF) != 0))
{
return false;
}
// Be pessimistic if ref counts are not yet set up.
//
// Perhaps we should be optimistic though.
// See notes in GitHub issue 18969.
if (!lvaLocalVarRefCounted())
{
return false;
}
if (varDsc->lvRefCntWtd() < (BB_UNITY_WEIGHT * 3))
{
return false;
}
#ifdef TARGET_X86
if (varTypeUsesFloatReg(tree->TypeGet()))
return false;
if (varTypeIsLong(tree->TypeGet()))
return false;
#endif
return true;
}
//------------------------------------------------------------------------
// gtCanSwapOrder: Returns true iff the secondNode can be swapped with firstNode.
//
// Arguments:
// firstNode - An operand of a tree that can have GTF_REVERSE_OPS set.
// secondNode - The other operand of the tree.
//
// Return Value:
// Returns a boolean indicating whether it is safe to reverse the execution
// order of the two trees, considering any exception, global effects, or
// ordering constraints.
//
bool Compiler::gtCanSwapOrder(GenTree* firstNode, GenTree* secondNode)
{
// Relative of order of global / side effects can't be swapped.
bool canSwap = true;
if (optValnumCSE_phase)
{
canSwap = optCSE_canSwap(firstNode, secondNode);
}
// We cannot swap in the presence of special side effects such as GT_CATCH_ARG.
if (canSwap && (firstNode->gtFlags & GTF_ORDER_SIDEEFF))
{
canSwap = false;
}
// When strict side effect order is disabled we allow GTF_REVERSE_OPS to be set
// when one or both sides contains a GTF_CALL or GTF_EXCEPT.
// Currently only the C and C++ languages allow non strict side effect order.
unsigned strictEffects = GTF_GLOB_EFFECT;
if (canSwap && (firstNode->gtFlags & strictEffects))
{
// op1 has side efects that can't be reordered.
// Check for some special cases where we still may be able to swap.
if (secondNode->gtFlags & strictEffects)
{
// op2 has also has non reorderable side effects - can't swap.
canSwap = false;
}
else
{
// No side effects in op2 - we can swap iff op1 has no way of modifying op2,
// i.e. through byref assignments or calls or op2 is a constant.
if (firstNode->gtFlags & strictEffects & GTF_PERSISTENT_SIDE_EFFECTS)
{
// We have to be conservative - can swap iff op2 is constant.
if (!secondNode->IsInvariant())
{
canSwap = false;
}
}
}
}
return canSwap;
}
//------------------------------------------------------------------------
// Given an address expression, compute its costs and addressing mode opportunities,
// and mark addressing mode candidates as GTF_DONT_CSE.
//
// Arguments:
// addr - The address expression
// costEx - The execution cost of this address expression (in/out arg to be updated)
// costEx - The size cost of this address expression (in/out arg to be updated)
// type - The type of the value being referenced by the parent of this address expression.
//
// Return Value:
// Returns true if it finds an addressing mode.
//
// Notes:
// TODO-Throughput - Consider actually instantiating these early, to avoid
// having to re-run the algorithm that looks for them (might also improve CQ).
//
bool Compiler::gtMarkAddrMode(GenTree* addr, int* pCostEx, int* pCostSz, var_types type)
{
// These are "out" parameters on the call to genCreateAddrMode():
bool rev; // This will be true if the operands will need to be reversed. At this point we
// don't care about this because we're not yet instantiating this addressing mode.
unsigned mul; // This is the index (scale) value for the addressing mode
ssize_t cns; // This is the constant offset
GenTree* base; // This is the base of the address.
GenTree* idx; // This is the index.
if (codeGen->genCreateAddrMode(addr, false /*fold*/, &rev, &base, &idx, &mul, &cns))
{
#ifdef TARGET_ARMARCH
// Multiplier should be a "natural-scale" power of two number which is equal to target's width.
//
// *(ulong*)(data + index * 8); - can be optimized
// *(ulong*)(data + index * 7); - can not be optimized
// *(int*)(data + index * 2); - can not be optimized
//
if ((mul > 0) && (genTypeSize(type) != mul))
{
return false;
}
#endif
// We can form a complex addressing mode, so mark each of the interior
// nodes with GTF_ADDRMODE_NO_CSE and calculate a more accurate cost.
addr->gtFlags |= GTF_ADDRMODE_NO_CSE;
#ifdef TARGET_XARCH
// addrmodeCount is the count of items that we used to form
// an addressing mode. The maximum value is 4 when we have
// all of these: { base, idx, cns, mul }
//
unsigned addrmodeCount = 0;
if (base)
{
*pCostEx += base->GetCostEx();
*pCostSz += base->GetCostSz();
addrmodeCount++;
}
if (idx)
{
*pCostEx += idx->GetCostEx();
*pCostSz += idx->GetCostSz();
addrmodeCount++;
}
if (cns)
{
if (((signed char)cns) == ((int)cns))
{
*pCostSz += 1;
}
else
{
*pCostSz += 4;
}
addrmodeCount++;
}
if (mul)
{
addrmodeCount++;
}
// When we form a complex addressing mode we can reduced the costs
// associated with the interior GT_ADD and GT_LSH nodes:
//
// GT_ADD -- reduce this interior GT_ADD by (-3,-3)
// / \ --
// GT_ADD 'cns' -- reduce this interior GT_ADD by (-2,-2)
// / \ --
// 'base' GT_LSL -- reduce this interior GT_LSL by (-1,-1)
// / \ --
// 'idx' 'mul'
//
if (addrmodeCount > 1)
{
// The number of interior GT_ADD and GT_LSL will always be one less than addrmodeCount
//
addrmodeCount--;
GenTree* tmp = addr;
while (addrmodeCount > 0)
{
// decrement the gtCosts for the interior GT_ADD or GT_LSH node by the remaining
// addrmodeCount
tmp->SetCosts(tmp->GetCostEx() - addrmodeCount, tmp->GetCostSz() - addrmodeCount);
addrmodeCount--;
if (addrmodeCount > 0)
{
GenTree* tmpOp1 = tmp->AsOp()->gtOp1;
GenTree* tmpOp2 = tmp->gtGetOp2();
assert(tmpOp2 != nullptr);
if ((tmpOp1 != base) && (tmpOp1->OperGet() == GT_ADD))
{
tmp = tmpOp1;
}
else if (tmpOp2->OperGet() == GT_LSH)
{
tmp = tmpOp2;
}
else if (tmpOp1->OperGet() == GT_LSH)
{
tmp = tmpOp1;
}
else if (tmpOp2->OperGet() == GT_ADD)
{
tmp = tmpOp2;
}
else
{
// We can very rarely encounter a tree that has a GT_COMMA node
// that is difficult to walk, so we just early out without decrementing.
addrmodeCount = 0;
}
}
}
}
#elif defined TARGET_ARM
if (base)
{
*pCostEx += base->GetCostEx();
*pCostSz += base->GetCostSz();
if ((base->gtOper == GT_LCL_VAR) && ((idx == NULL) || (cns == 0)))
{
*pCostSz -= 1;
}
}
if (idx)
{
*pCostEx += idx->GetCostEx();
*pCostSz += idx->GetCostSz();
if (mul > 0)
{
*pCostSz += 2;
}
}
if (cns)
{
if (cns >= 128) // small offsets fits into a 16-bit instruction
{
if (cns < 4096) // medium offsets require a 32-bit instruction
{
if (!varTypeIsFloating(type))
{
*pCostSz += 2;
}
}
else
{
*pCostEx += 2; // Very large offsets require movw/movt instructions
*pCostSz += 8;
}
}
}
#elif defined TARGET_ARM64
if (base)
{
*pCostEx += base->GetCostEx();
*pCostSz += base->GetCostSz();
}
if (idx)
{
*pCostEx += idx->GetCostEx();
*pCostSz += idx->GetCostSz();
}
if (cns != 0)
{
if (cns >= (4096 * genTypeSize(type)))
{
*pCostEx += 1;
*pCostSz += 4;
}
}
#else
#error "Unknown TARGET"
#endif
assert(addr->gtOper == GT_ADD);
assert(!addr->gtOverflow());
assert(mul != 1);
// If we have an addressing mode, we have one of:
// [base + cns]
// [ idx * mul ] // mul >= 2, else we would use base instead of idx
// [ idx * mul + cns] // mul >= 2, else we would use base instead of idx
// [base + idx * mul ] // mul can be 0, 2, 4, or 8
// [base + idx * mul + cns] // mul can be 0, 2, 4, or 8
// Note that mul == 0 is semantically equivalent to mul == 1.
// Note that cns can be zero.
CLANG_FORMAT_COMMENT_ANCHOR;
assert((base != nullptr) || (idx != nullptr && mul >= 2));
INDEBUG(GenTree* op1Save = addr);
// Walk 'addr' identifying non-overflow ADDs that will be part of the address mode.
// Note that we will be modifying 'op1' and 'op2' so that eventually they should
// map to the base and index.
GenTree* op1 = addr;
GenTree* op2 = nullptr;
gtWalkOp(&op1, &op2, base, false);
// op1 and op2 are now descendents of the root GT_ADD of the addressing mode.
assert(op1 != op1Save);
assert(op2 != nullptr);
#if defined(TARGET_XARCH)
// Walk the operands again (the third operand is unused in this case).
// This time we will only consider adds with constant op2's, since
// we have already found either a non-ADD op1 or a non-constant op2.
// NOTE: we don't support ADD(op1, cns) addressing for ARM/ARM64 yet so
// this walk makes no sense there.
gtWalkOp(&op1, &op2, nullptr, true);
// For XARCH we will fold GT_ADDs in the op2 position into the addressing mode, so we call
// gtWalkOp on both operands of the original GT_ADD.
// This is not done for ARMARCH. Though the stated reason is that we don't try to create a
// scaled index, in fact we actually do create them (even base + index*scale + offset).
// At this point, 'op2' may itself be an ADD of a constant that should be folded
// into the addressing mode.
// Walk op2 looking for non-overflow GT_ADDs of constants.
gtWalkOp(&op2, &op1, nullptr, true);
#endif // defined(TARGET_XARCH)
// OK we are done walking the tree
// Now assert that op1 and op2 correspond with base and idx
// in one of the several acceptable ways.
// Note that sometimes op1/op2 is equal to idx/base
// and other times op1/op2 is a GT_COMMA node with
// an effective value that is idx/base
if (mul > 1)
{
if ((op1 != base) && (op1->gtOper == GT_LSH))
{
op1->gtFlags |= GTF_ADDRMODE_NO_CSE;
if (op1->AsOp()->gtOp1->gtOper == GT_MUL)
{
op1->AsOp()->gtOp1->gtFlags |= GTF_ADDRMODE_NO_CSE;
}
assert((base == nullptr) || (op2 == base) || (op2->gtEffectiveVal() == base->gtEffectiveVal()) ||
(gtWalkOpEffectiveVal(op2) == gtWalkOpEffectiveVal(base)));
}
else
{
assert(op2 != nullptr);
assert(op2->OperIs(GT_LSH, GT_MUL));
op2->gtFlags |= GTF_ADDRMODE_NO_CSE;
// We may have eliminated multiple shifts and multiplies in the addressing mode,
// so navigate down through them to get to "idx".
GenTree* op2op1 = op2->AsOp()->gtOp1;
while ((op2op1->gtOper == GT_LSH || op2op1->gtOper == GT_MUL) && op2op1 != idx)
{
op2op1->gtFlags |= GTF_ADDRMODE_NO_CSE;
op2op1 = op2op1->AsOp()->gtOp1;
}
assert(op1->gtEffectiveVal() == base);
assert(op2op1 == idx);
}
}
else
{
assert(mul == 0);
if ((op1 == idx) || (op1->gtEffectiveVal() == idx))
{
if (idx != nullptr)
{
if ((op1->gtOper == GT_MUL) || (op1->gtOper == GT_LSH))
{
GenTree* op1op1 = op1->AsOp()->gtOp1;
if ((op1op1->gtOper == GT_NOP) ||
(op1op1->gtOper == GT_MUL && op1op1->AsOp()->gtOp1->gtOper == GT_NOP))
{
op1->gtFlags |= GTF_ADDRMODE_NO_CSE;
if (op1op1->gtOper == GT_MUL)
{
op1op1->gtFlags |= GTF_ADDRMODE_NO_CSE;
}
}
}
}
assert((op2 == base) || (op2->gtEffectiveVal() == base));
}
else if ((op1 == base) || (op1->gtEffectiveVal() == base))
{
if (idx != nullptr)
{
assert(op2 != nullptr);
if (op2->OperIs(GT_MUL, GT_LSH))
{
GenTree* op2op1 = op2->AsOp()->gtOp1;
if ((op2op1->gtOper == GT_NOP) ||
(op2op1->gtOper == GT_MUL && op2op1->AsOp()->gtOp1->gtOper == GT_NOP))
{
op2->gtFlags |= GTF_ADDRMODE_NO_CSE;
if (op2op1->gtOper == GT_MUL)
{
op2op1->gtFlags |= GTF_ADDRMODE_NO_CSE;
}
}
}
assert((op2 == idx) || (op2->gtEffectiveVal() == idx));
}
}
else
{
// op1 isn't base or idx. Is this possible? Or should there be an assert?
}
}
return true;
} // end if (genCreateAddrMode(...))
return false;
}
/*****************************************************************************
*
* Given a tree, figure out the order in which its sub-operands should be
* evaluated. If the second operand of a binary operator is more expensive
* than the first operand, then try to swap the operand trees. Updates the
* GTF_REVERSE_OPS bit if necessary in this case.
*
* Returns the Sethi 'complexity' estimate for this tree (the higher
* the number, the higher is the tree's resources requirement).
*
* This function sets:
* 1. GetCostEx() to the execution complexity estimate
* 2. GetCostSz() to the code size estimate
* 3. Sometimes sets GTF_ADDRMODE_NO_CSE on nodes in the tree.
* 4. DEBUG-only: clears GTF_DEBUG_NODE_MORPHED.
*/
#ifdef _PREFAST_
#pragma warning(push)
#pragma warning(disable : 21000) // Suppress PREFast warning about overly large function
#endif
unsigned Compiler::gtSetEvalOrder(GenTree* tree)
{
assert(tree);
#ifdef DEBUG
/* Clear the GTF_DEBUG_NODE_MORPHED flag as well */
tree->gtDebugFlags &= ~GTF_DEBUG_NODE_MORPHED;
#endif
/* Is this a FP value? */
bool isflt = varTypeIsFloating(tree->TypeGet());
/* Figure out what kind of a node we have */
const genTreeOps oper = tree->OperGet();
const unsigned kind = tree->OperKind();
/* Assume no fixed registers will be trashed */
unsigned level;
int costEx;
int costSz;
#ifdef DEBUG
costEx = -1;
costSz = -1;
#endif
/* Is this a leaf node? */
if (kind & GTK_LEAF)
{
switch (oper)
{
#ifdef TARGET_ARM
case GT_CNS_STR:
// Uses movw/movt
costSz = 8;
costEx = 2;
goto COMMON_CNS;
case GT_CNS_LNG:
{
GenTreeIntConCommon* con = tree->AsIntConCommon();
INT64 lngVal = con->LngValue();
INT32 loVal = (INT32)(lngVal & 0xffffffff);
INT32 hiVal = (INT32)(lngVal >> 32);
if (lngVal == 0)
{
costSz = 1;
costEx = 1;
}
else
{
// Minimum of one instruction to setup hiVal,
// and one instruction to setup loVal
costSz = 4 + 4;
costEx = 1 + 1;
if (!codeGen->validImmForInstr(INS_mov, (target_ssize_t)hiVal) &&
!codeGen->validImmForInstr(INS_mvn, (target_ssize_t)hiVal))
{
// Needs extra instruction: movw/movt
costSz += 4;
costEx += 1;
}
if (!codeGen->validImmForInstr(INS_mov, (target_ssize_t)loVal) &&
!codeGen->validImmForInstr(INS_mvn, (target_ssize_t)loVal))
{
// Needs extra instruction: movw/movt
costSz += 4;
costEx += 1;
}
}
goto COMMON_CNS;
}
case GT_CNS_INT:
{
// If the constant is a handle then it will need to have a relocation
// applied to it.
// Any constant that requires a reloc must use the movw/movt sequence
//
GenTreeIntConCommon* con = tree->AsIntConCommon();
target_ssize_t conVal = (target_ssize_t)con->IconValue();
if (con->ImmedValNeedsReloc(this))
{
// Requires movw/movt
costSz = 8;
costEx = 2;
}
else if (codeGen->validImmForInstr(INS_add, conVal))
{
// Typically included with parent oper
costSz = 2;
costEx = 1;
}
else if (codeGen->validImmForInstr(INS_mov, conVal) || codeGen->validImmForInstr(INS_mvn, conVal))
{
// Uses mov or mvn
costSz = 4;
costEx = 1;
}
else
{
// Needs movw/movt
costSz = 8;
costEx = 2;
}
goto COMMON_CNS;
}
#elif defined TARGET_XARCH
case GT_CNS_STR:
#ifdef TARGET_AMD64
costSz = 10;
costEx = 2;
#else // TARGET_X86
costSz = 4;
costEx = 1;
#endif
goto COMMON_CNS;
case GT_CNS_LNG:
case GT_CNS_INT:
{
GenTreeIntConCommon* con = tree->AsIntConCommon();
ssize_t conVal = (oper == GT_CNS_LNG) ? (ssize_t)con->LngValue() : con->IconValue();
bool fitsInVal = true;
#ifdef TARGET_X86
if (oper == GT_CNS_LNG)
{
INT64 lngVal = con->LngValue();
conVal = (ssize_t)lngVal; // truncate to 32-bits
fitsInVal = ((INT64)conVal == lngVal);
}
#endif // TARGET_X86
// If the constant is a handle then it will need to have a relocation
// applied to it.
//
bool iconNeedsReloc = con->ImmedValNeedsReloc(this);
if (iconNeedsReloc)
{
costSz = 4;
costEx = 1;
}
else if (fitsInVal && GenTreeIntConCommon::FitsInI8(conVal))
{
costSz = 1;
costEx = 1;
}
#ifdef TARGET_AMD64
else if (!GenTreeIntConCommon::FitsInI32(conVal))
{
costSz = 10;
costEx = 2;
}
#endif // TARGET_AMD64
else
{
costSz = 4;
costEx = 1;
}
#ifdef TARGET_X86
if (oper == GT_CNS_LNG)
{
costSz += fitsInVal ? 1 : 4;
costEx += 1;
}
#endif // TARGET_X86
goto COMMON_CNS;
}
#elif defined(TARGET_ARM64)
case GT_CNS_STR:
case GT_CNS_LNG:
case GT_CNS_INT:
{
GenTreeIntConCommon* con = tree->AsIntConCommon();
bool iconNeedsReloc = con->ImmedValNeedsReloc(this);
INT64 imm = con->LngValue();
emitAttr size = EA_SIZE(emitActualTypeSize(tree));
if (iconNeedsReloc)
{
costSz = 8;
costEx = 2;
}
else if (emitter::emitIns_valid_imm_for_add(imm, size))
{
costSz = 2;
costEx = 1;
}
else if (emitter::emitIns_valid_imm_for_mov(imm, size))
{
costSz = 4;
costEx = 1;
}
else
{
// Arm64 allows any arbitrary 16-bit constant to be loaded into a register halfword
// There are three forms
// movk which loads into any halfword preserving the remaining halfwords
// movz which loads into any halfword zeroing the remaining halfwords
// movn which loads into any halfword zeroing the remaining halfwords then bitwise inverting
// the register
// In some cases it is preferable to use movn, because it has the side effect of filling the
// other halfwords
// with ones
// Determine whether movn or movz will require the fewest instructions to populate the immediate
bool preferMovz = false;
bool preferMovn = false;
int instructionCount = 4;
for (int i = (size == EA_8BYTE) ? 48 : 16; i >= 0; i -= 16)
{
if (!preferMovn && (uint16_t(imm >> i) == 0x0000))
{
preferMovz = true; // by using a movk to start we can save one instruction
instructionCount--;
}
else if (!preferMovz && (uint16_t(imm >> i) == 0xffff))
{
preferMovn = true; // by using a movn to start we can save one instruction
instructionCount--;
}
}
costEx = instructionCount;
costSz = 4 * instructionCount;
}
}
goto COMMON_CNS;
#else
case GT_CNS_STR:
case GT_CNS_LNG:
case GT_CNS_INT:
#error "Unknown TARGET"
#endif
COMMON_CNS:
/*
Note that some code below depends on constants always getting
moved to be the second operand of a binary operator. This is
easily accomplished by giving constants a level of 0, which
we do on the next line. If you ever decide to change this, be
aware that unless you make other arrangements for integer
constants to be moved, stuff will break.
*/
level = 0;
break;
case GT_CNS_DBL:
{
level = 0;
#if defined(TARGET_XARCH)
/* We use fldz and fld1 to load 0.0 and 1.0, but all other */
/* floating point constants are loaded using an indirection */
if ((*((__int64*)&(tree->AsDblCon()->gtDconVal)) == 0) ||
(*((__int64*)&(tree->AsDblCon()->gtDconVal)) == I64(0x3ff0000000000000)))
{
costEx = 1;
costSz = 1;
}
else
{
costEx = IND_COST_EX;
costSz = 4;
}
#elif defined(TARGET_ARM)
var_types targetType = tree->TypeGet();
if (targetType == TYP_FLOAT)
{
costEx = 1 + 2;
costSz = 2 + 4;
}
else
{
assert(targetType == TYP_DOUBLE);
costEx = 1 + 4;
costSz = 2 + 8;
}
#elif defined(TARGET_ARM64)
if ((*((__int64*)&(tree->AsDblCon()->gtDconVal)) == 0) ||
emitter::emitIns_valid_imm_for_fmov(tree->AsDblCon()->gtDconVal))
{
costEx = 1;
costSz = 1;
}
else
{
costEx = IND_COST_EX;
costSz = 4;
}
#else
#error "Unknown TARGET"
#endif
}
break;
case GT_LCL_VAR:
level = 1;
if (gtIsLikelyRegVar(tree))
{
costEx = 1;
costSz = 1;
/* Sign-extend and zero-extend are more expensive to load */
if (lvaTable[tree->AsLclVar()->GetLclNum()].lvNormalizeOnLoad())
{
costEx += 1;
costSz += 1;
}
}
else
{
costEx = IND_COST_EX;
costSz = 2;
/* Sign-extend and zero-extend are more expensive to load */
if (varTypeIsSmall(tree->TypeGet()))
{
costEx += 1;
costSz += 1;
}
}
#if defined(TARGET_AMD64)
// increase costSz for floating point locals
if (isflt)
{
costSz += 1;
if (!gtIsLikelyRegVar(tree))
{
costSz += 1;
}
}
#endif
break;
case GT_CLS_VAR:
#ifdef TARGET_ARM
// We generate movw/movt/ldr
level = 1;
costEx = 3 + IND_COST_EX; // 6
costSz = 4 + 4 + 2; // 10
break;
#endif
case GT_LCL_FLD:
level = 1;
costEx = IND_COST_EX;
costSz = 4;
if (varTypeIsSmall(tree->TypeGet()))
{
costEx += 1;
costSz += 1;
}
break;
case GT_LCL_FLD_ADDR:
case GT_LCL_VAR_ADDR:
level = 1;
costEx = 3;
costSz = 3;
break;
case GT_PHI_ARG:
case GT_ARGPLACE:
level = 0;
costEx = 0;
costSz = 0;
break;
default:
level = 1;
costEx = 1;
costSz = 1;
break;
}
goto DONE;
}
/* Is it a 'simple' unary/binary operator? */
if (kind & GTK_SMPOP)
{
int lvlb; // preference for op2
unsigned lvl2; // scratch variable
GenTree* op1 = tree->AsOp()->gtOp1;
GenTree* op2 = tree->gtGetOp2IfPresent();
costEx = 0;
costSz = 0;
if (tree->OperIsAddrMode())
{
if (op1 == nullptr)
{
op1 = op2;
op2 = nullptr;
}
}
/* Check for a nilary operator */
if (op1 == nullptr)
{
assert(op2 == nullptr);
level = 0;
goto DONE;
}
/* Is this a unary operator? */
if (op2 == nullptr)
{
/* Process the operand of the operator */
/* Most Unary ops have costEx of 1 */
costEx = 1;
costSz = 1;
level = gtSetEvalOrder(op1);
GenTreeIntrinsic* intrinsic;
/* Special handling for some operators */
switch (oper)
{
case GT_JTRUE:
costEx = 2;
costSz = 2;
break;
case GT_SWITCH:
costEx = 10;
costSz = 5;
break;
case GT_CAST:
#if defined(TARGET_ARM)
costEx = 1;
costSz = 1;
if (isflt || varTypeIsFloating(op1->TypeGet()))
{
costEx = 3;
costSz = 4;
}
#elif defined(TARGET_ARM64)
costEx = 1;
costSz = 2;
if (isflt || varTypeIsFloating(op1->TypeGet()))
{
costEx = 2;
costSz = 4;
}
#elif defined(TARGET_XARCH)
costEx = 1;
costSz = 2;
if (isflt || varTypeIsFloating(op1->TypeGet()))
{
/* cast involving floats always go through memory */
costEx = IND_COST_EX * 2;
costSz = 6;
}
#else
#error "Unknown TARGET"
#endif
/* Overflow casts are a lot more expensive */
if (tree->gtOverflow())
{
costEx += 6;
costSz += 6;
}
break;
case GT_NOP:
costEx = 0;
costSz = 0;
break;
case GT_INTRINSIC:
intrinsic = tree->AsIntrinsic();
// named intrinsic
assert(intrinsic->gtIntrinsicName != NI_Illegal);
// GT_INTRINSIC intrinsics Sin, Cos, Sqrt, Abs ... have higher costs.
// TODO: tune these costs target specific as some of these are
// target intrinsics and would cost less to generate code.
switch (intrinsic->gtIntrinsicName)
{
default:
assert(!"missing case for gtIntrinsicName");
costEx = 12;
costSz = 12;
break;
case NI_System_Math_Abs:
costEx = 5;
costSz = 15;
break;
case NI_System_Math_Acos:
case NI_System_Math_Acosh:
case NI_System_Math_Asin:
case NI_System_Math_Asinh:
case NI_System_Math_Atan:
case NI_System_Math_Atanh:
case NI_System_Math_Atan2:
case NI_System_Math_Cbrt:
case NI_System_Math_Ceiling:
case NI_System_Math_Cos:
case NI_System_Math_Cosh:
case NI_System_Math_Exp:
case NI_System_Math_Floor:
case NI_System_Math_FMod:
case NI_System_Math_FusedMultiplyAdd:
case NI_System_Math_ILogB:
case NI_System_Math_Log:
case NI_System_Math_Log2:
case NI_System_Math_Log10:
case NI_System_Math_Max:
case NI_System_Math_Min:
case NI_System_Math_Pow:
case NI_System_Math_Round:
case NI_System_Math_Sin:
case NI_System_Math_Sinh:
case NI_System_Math_Sqrt:
case NI_System_Math_Tan:
case NI_System_Math_Tanh:
case NI_System_Math_Truncate:
{
// Giving intrinsics a large fixed execution cost is because we'd like to CSE
// them, even if they are implemented by calls. This is different from modeling
// user calls since we never CSE user calls. We don't do this for target intrinsics
// however as they typically represent single instruction calls
if (IsIntrinsicImplementedByUserCall(intrinsic->gtIntrinsicName))
{
costEx = 36;
costSz = 4;
}
else
{
costEx = 3;
costSz = 4;
}
break;
}
case NI_System_Object_GetType:
// Giving intrinsics a large fixed execution cost is because we'd like to CSE
// them, even if they are implemented by calls. This is different from modeling
// user calls since we never CSE user calls.
costEx = 36;
costSz = 4;
break;
}
level++;
break;
case GT_NOT:
case GT_NEG:
// We need to ensure that -x is evaluated before x or else
// we get burned while adjusting genFPstkLevel in x*-x where
// the rhs x is the last use of the enregistered x.
//
// Even in the integer case we want to prefer to
// evaluate the side without the GT_NEG node, all other things
// being equal. Also a GT_NOT requires a scratch register
level++;
break;
case GT_ADDR:
costEx = 0;
costSz = 1;
// If we have a GT_ADDR of an GT_IND we can just copy the costs from indOp1
if (op1->OperGet() == GT_IND)
{
GenTree* indOp1 = op1->AsOp()->gtOp1;
costEx = indOp1->GetCostEx();
costSz = indOp1->GetCostSz();
}
break;
case GT_ARR_LENGTH:
level++;
/* Array Len should be the same as an indirections, which have a costEx of IND_COST_EX */
costEx = IND_COST_EX - 1;
costSz = 2;
break;
case GT_MKREFANY:
case GT_OBJ:
// We estimate the cost of a GT_OBJ or GT_MKREFANY to be two loads (GT_INDs)
costEx = 2 * IND_COST_EX;
costSz = 2 * 2;
break;
case GT_BOX:
// We estimate the cost of a GT_BOX to be two stores (GT_INDs)
costEx = 2 * IND_COST_EX;
costSz = 2 * 2;
break;
case GT_BLK:
case GT_IND:
/* An indirection should always have a non-zero level.
* Only constant leaf nodes have level 0.
*/
if (level == 0)
{
level = 1;
}
/* Indirections have a costEx of IND_COST_EX */
costEx = IND_COST_EX;
costSz = 2;
/* If we have to sign-extend or zero-extend, bump the cost */
if (varTypeIsSmall(tree->TypeGet()))
{
costEx += 1;
costSz += 1;
}
if (isflt)
{
if (tree->TypeGet() == TYP_DOUBLE)
{
costEx += 1;
}
#ifdef TARGET_ARM
costSz += 2;
#endif // TARGET_ARM
}
// Can we form an addressing mode with this indirection?
// TODO-CQ: Consider changing this to op1->gtEffectiveVal() to take into account
// addressing modes hidden under a comma node.
if (op1->gtOper == GT_ADD)
{
// See if we can form a complex addressing mode.
GenTree* addr = op1->gtEffectiveVal();
bool doAddrMode = true;
// See if we can form a complex addressing mode.
// Always use an addrMode for an array index indirection.
// TODO-1stClassStructs: Always do this, but first make sure it's
// done in Lowering as well.
if ((tree->gtFlags & GTF_IND_ARR_INDEX) == 0)
{
if (tree->TypeGet() == TYP_STRUCT)
{
doAddrMode = false;
}
else if (varTypeIsStruct(tree))
{
// This is a heuristic attempting to match prior behavior when indirections
// under a struct assignment would not be considered for addressing modes.
if (compCurStmt != nullptr)
{
GenTree* expr = compCurStmt->GetRootNode();
if ((expr->OperGet() == GT_ASG) &&
((expr->gtGetOp1() == tree) || (expr->gtGetOp2() == tree)))
{
doAddrMode = false;
}
}
}
}
#ifdef TARGET_ARM64
if (tree->gtFlags & GTF_IND_VOLATILE)
{
// For volatile store/loads when address is contained we always emit `dmb`
// if it's not - we emit one-way barriers i.e. ldar/stlr
doAddrMode = false;
}
#endif // TARGET_ARM64
if (doAddrMode && gtMarkAddrMode(addr, &costEx, &costSz, tree->TypeGet()))
{
goto DONE;
}
} // end if (op1->gtOper == GT_ADD)
else if (gtIsLikelyRegVar(op1))
{
/* Indirection of an enregister LCL_VAR, don't increase costEx/costSz */
goto DONE;
}
#ifdef TARGET_XARCH
else if (op1->IsCnsIntOrI())
{
// Indirection of a CNS_INT, subtract 1 from costEx
// makes costEx 3 for x86 and 4 for amd64
//
costEx += (op1->GetCostEx() - 1);
costSz += op1->GetCostSz();
goto DONE;
}
#endif
break;
default:
break;
}
costEx += op1->GetCostEx();
costSz += op1->GetCostSz();
goto DONE;
}
/* Binary operator - check for certain special cases */
lvlb = 0;
/* Default Binary ops have a cost of 1,1 */
costEx = 1;
costSz = 1;
#ifdef TARGET_ARM
if (isflt)
{
costSz += 2;
}
#endif
#ifndef TARGET_64BIT
if (varTypeIsLong(op1->TypeGet()))
{
/* Operations on longs are more expensive */
costEx += 3;
costSz += 3;
}
#endif
switch (oper)
{
case GT_MOD:
case GT_UMOD:
/* Modulo by a power of 2 is easy */
if (op2->IsCnsIntOrI())
{
size_t ival = op2->AsIntConCommon()->IconValue();
if (ival > 0 && ival == genFindLowestBit(ival))
{
break;
}
}
FALLTHROUGH;
case GT_DIV:
case GT_UDIV:
if (isflt)
{
/* fp division is very expensive to execute */
costEx = 36; // TYP_DOUBLE
costSz += 3;
}
else
{
/* integer division is also very expensive */
costEx = 20;
costSz += 2;
// Encourage the first operand to be evaluated (into EAX/EDX) first */
lvlb -= 3;
}
break;
case GT_MUL:
if (isflt)
{
/* FP multiplication instructions are more expensive */
costEx += 4;
costSz += 3;
}
else
{
/* Integer multiplication instructions are more expensive */
costEx += 3;
costSz += 2;
if (tree->gtOverflow())
{
/* Overflow check are more expensive */
costEx += 3;
costSz += 3;
}
#ifdef TARGET_X86
if ((tree->gtType == TYP_LONG) || tree->gtOverflow())
{
/* We use imulEAX for TYP_LONG and overflow multiplications */
// Encourage the first operand to be evaluated (into EAX/EDX) first */
lvlb -= 4;
/* The 64-bit imul instruction costs more */
costEx += 4;
}
#endif // TARGET_X86
}
break;
case GT_ADD:
case GT_SUB:
if (isflt)
{
/* FP instructions are a bit more expensive */
costEx += 4;
costSz += 3;
break;
}
/* Overflow check are more expensive */
if (tree->gtOverflow())
{
costEx += 3;
costSz += 3;
}
break;
case GT_BOUNDS_CHECK:
costEx = 4; // cmp reg,reg and jae throw (not taken)
costSz = 7; // jump to cold section
break;
case GT_COMMA:
/* Comma tosses the result of the left operand */
gtSetEvalOrder(op1);
level = gtSetEvalOrder(op2);
/* GT_COMMA cost is the sum of op1 and op2 costs */
costEx = (op1->GetCostEx() + op2->GetCostEx());
costSz = (op1->GetCostSz() + op2->GetCostSz());
goto DONE;
case GT_COLON:
level = gtSetEvalOrder(op1);
lvl2 = gtSetEvalOrder(op2);
if (level < lvl2)
{
level = lvl2;
}
else if (level == lvl2)
{
level += 1;
}
costEx = op1->GetCostEx() + op2->GetCostEx();
costSz = op1->GetCostSz() + op2->GetCostSz();
goto DONE;
case GT_INDEX_ADDR:
costEx = 6; // cmp reg,reg; jae throw; mov reg, [addrmode] (not taken)
costSz = 9; // jump to cold section
break;
case GT_ASG:
/* Assignments need a bit of special handling */
/* Process the target */
level = gtSetEvalOrder(op1);
if (gtIsLikelyRegVar(op1))
{
assert(lvlb == 0);
lvl2 = gtSetEvalOrder(op2);
/* Assignment to an enregistered LCL_VAR */
costEx = op2->GetCostEx();
costSz = max(3, op2->GetCostSz()); // 3 is an estimate for a reg-reg assignment
goto DONE_OP1_AFTER_COST;
}
goto DONE_OP1;
default:
break;
}
/* Process the sub-operands */
level = gtSetEvalOrder(op1);
if (lvlb < 0)
{
level -= lvlb; // lvlb is negative, so this increases level
lvlb = 0;
}
DONE_OP1:
assert(lvlb >= 0);
lvl2 = gtSetEvalOrder(op2) + lvlb;
costEx += (op1->GetCostEx() + op2->GetCostEx());
costSz += (op1->GetCostSz() + op2->GetCostSz());
DONE_OP1_AFTER_COST:
bool bReverseInAssignment = false;
if (oper == GT_ASG && (!optValnumCSE_phase || optCSE_canSwap(op1, op2)))
{
GenTree* op1Val = op1;
// Skip over the GT_IND/GT_ADDR tree (if one exists)
//
if ((op1->gtOper == GT_IND) && (op1->AsOp()->gtOp1->gtOper == GT_ADDR))
{
op1Val = op1->AsOp()->gtOp1->AsOp()->gtOp1;
}
switch (op1Val->gtOper)
{
case GT_IND:
case GT_BLK:
case GT_OBJ:
{
// In an ASG(IND(addr), ...), the "IND" is a pure syntactical element,
// the actual indirection will only be realized at the point of the ASG
// itself. As such, we can disard any side effects "induced" by it in
// this logic.
//
// Note that for local "addr"s, liveness depends on seeing the defs and
// uses in correct order, and so we MUST reverse the ASG in that case.
//
GenTree* op1Addr = op1->AsIndir()->Addr();
if (op1Addr->IsLocalAddrExpr() || op1Addr->IsInvariant())
{
bReverseInAssignment = true;
tree->gtFlags |= GTF_REVERSE_OPS;
break;
}
if (op1Addr->gtFlags & GTF_ALL_EFFECT)
{
break;
}
// In case op2 assigns to a local var that is used in op1Val, we have to evaluate op1Val first.
if (op2->gtFlags & GTF_ASG)
{
break;
}
// If op2 is simple then evaluate op1 first
if (op2->OperKind() & GTK_LEAF)
{
break;
}
}
// fall through and set GTF_REVERSE_OPS
FALLTHROUGH;
case GT_LCL_VAR:
case GT_LCL_FLD:
case GT_CLS_VAR:
// We evaluate op2 before op1
bReverseInAssignment = true;
tree->gtFlags |= GTF_REVERSE_OPS;
break;
default:
break;
}
}
else if (GenTree::OperIsCompare(oper))
{
/* Float compares remove both operands from the FP stack */
/* Also FP comparison uses EAX for flags */
if (varTypeIsFloating(op1->TypeGet()))
{
level++;
lvl2++;
}
if ((tree->gtFlags & GTF_RELOP_JMP_USED) == 0)
{
/* Using a setcc instruction is more expensive */
costEx += 3;
}
}
/* Check for other interesting cases */
switch (oper)
{
case GT_LSH:
case GT_RSH:
case GT_RSZ:
case GT_ROL:
case GT_ROR:
/* Variable sized shifts are more expensive and use REG_SHIFT */
if (!op2->IsCnsIntOrI())
{
costEx += 3;
#ifndef TARGET_64BIT
// Variable sized LONG shifts require the use of a helper call
//
if (tree->gtType == TYP_LONG)
{
level += 5;
lvl2 += 5;
costEx += 3 * IND_COST_EX;
costSz += 4;
}
#endif // !TARGET_64BIT
}
break;
case GT_INTRINSIC:
switch (tree->AsIntrinsic()->gtIntrinsicName)
{
case NI_System_Math_Atan2:
case NI_System_Math_Pow:
// These math intrinsics are actually implemented by user calls.
// Increase the Sethi 'complexity' by two to reflect the argument
// register requirement.
level += 2;
break;
case NI_System_Math_Max:
case NI_System_Math_Min:
level++;
break;
default:
assert(!"Unknown binary GT_INTRINSIC operator");
break;
}
break;
default:
break;
}
/* We need to evalutate constants later as many places in codegen
can't handle op1 being a constant. This is normally naturally
enforced as constants have the least level of 0. However,
sometimes we end up with a tree like "cns1 < nop(cns2)". In
such cases, both sides have a level of 0. So encourage constants
to be evaluated last in such cases */
if ((level == 0) && (level == lvl2) && op1->OperIsConst() &&
(tree->OperIsCommutative() || tree->OperIsCompare()))
{
lvl2++;
}
/* We try to swap operands if the second one is more expensive */
bool tryToSwap;
GenTree* opA;
GenTree* opB;
if (tree->gtFlags & GTF_REVERSE_OPS)
{
opA = op2;
opB = op1;
}
else
{
opA = op1;
opB = op2;
}
if (fgOrder == FGOrderLinear)
{
// Don't swap anything if we're in linear order; we're really just interested in the costs.
tryToSwap = false;
}
else if (bReverseInAssignment)
{
// Assignments are special, we want the reverseops flags
// so if possible it was set above.
tryToSwap = false;
}
else if ((oper == GT_INTRINSIC) && IsIntrinsicImplementedByUserCall(tree->AsIntrinsic()->gtIntrinsicName))
{
// We do not swap operand execution order for intrinsics that are implemented by user calls
// because of trickiness around ensuring the execution order does not change during rationalization.
tryToSwap = false;
}
else if (oper == GT_BOUNDS_CHECK)
{
// Bounds check nodes used to not be binary, thus GTF_REVERSE_OPS was
// not enabled for them. This condition preserves that behavior.
// Additionally, CQ analysis shows that enabling GTF_REVERSE_OPS
// for these nodes leads to mixed results at best.
tryToSwap = false;
}
else
{
if (tree->gtFlags & GTF_REVERSE_OPS)
{
tryToSwap = (level > lvl2);
}
else
{
tryToSwap = (level < lvl2);
}
// Try to force extra swapping when in the stress mode:
if (compStressCompile(STRESS_REVERSE_FLAG, 60) && ((tree->gtFlags & GTF_REVERSE_OPS) == 0) &&
!op2->OperIsConst())
{
tryToSwap = true;
}
}
if (tryToSwap)
{
bool canSwap = gtCanSwapOrder(opA, opB);
if (canSwap)
{
/* Can we swap the order by commuting the operands? */
switch (oper)
{
case GT_EQ:
case GT_NE:
case GT_LT:
case GT_LE:
case GT_GE:
case GT_GT:
if (GenTree::SwapRelop(oper) != oper)
{
tree->SetOper(GenTree::SwapRelop(oper), GenTree::PRESERVE_VN);
}
FALLTHROUGH;
case GT_ADD:
case GT_MUL:
case GT_OR:
case GT_XOR:
case GT_AND:
/* Swap the operands */
tree->AsOp()->gtOp1 = op2;
tree->AsOp()->gtOp2 = op1;
break;
case GT_QMARK:
case GT_COLON:
case GT_MKREFANY:
break;
default:
/* Mark the operand's evaluation order to be swapped */
if (tree->gtFlags & GTF_REVERSE_OPS)
{
tree->gtFlags &= ~GTF_REVERSE_OPS;
}
else
{
tree->gtFlags |= GTF_REVERSE_OPS;
}
break;
}
}
}
/* Swap the level counts */
if (tree->gtFlags & GTF_REVERSE_OPS)
{
unsigned tmpl;
tmpl = level;
level = lvl2;
lvl2 = tmpl;
}
/* Compute the sethi number for this binary operator */
if (level < 1)
{
level = lvl2;
}
else if (level == lvl2)
{
level += 1;
}
goto DONE;
}
/* See what kind of a special operator we have here */
switch (oper)
{
unsigned lvl2; // Scratch variable
case GT_CALL:
assert(tree->gtFlags & GTF_CALL);
level = 0;
costEx = 5;
costSz = 2;
GenTreeCall* call;
call = tree->AsCall();
/* Evaluate the 'this' argument, if present */
if (tree->AsCall()->gtCallThisArg != nullptr)
{
GenTree* thisVal = tree->AsCall()->gtCallThisArg->GetNode();
lvl2 = gtSetEvalOrder(thisVal);
if (level < lvl2)
{
level = lvl2;
}
costEx += thisVal->GetCostEx();
costSz += thisVal->GetCostSz() + 1;
}
/* Evaluate the arguments, right to left */
if (call->gtCallArgs != nullptr)
{
const bool lateArgs = false;
lvl2 = gtSetCallArgsOrder(call->Args(), lateArgs, &costEx, &costSz);
if (level < lvl2)
{
level = lvl2;
}
}
/* Evaluate the temp register arguments list
* This is a "hidden" list and its only purpose is to
* extend the life of temps until we make the call */
if (call->gtCallLateArgs != nullptr)
{
const bool lateArgs = true;
lvl2 = gtSetCallArgsOrder(call->LateArgs(), lateArgs, &costEx, &costSz);
if (level < lvl2)
{
level = lvl2;
}
}
if (call->gtCallType == CT_INDIRECT)
{
// pinvoke-calli cookie is a constant, or constant indirection
assert(call->gtCallCookie == nullptr || call->gtCallCookie->gtOper == GT_CNS_INT ||
call->gtCallCookie->gtOper == GT_IND);
GenTree* indirect = call->gtCallAddr;
lvl2 = gtSetEvalOrder(indirect);
if (level < lvl2)
{
level = lvl2;
}
costEx += indirect->GetCostEx() + IND_COST_EX;
costSz += indirect->GetCostSz();
}
else
{
if (call->IsVirtual())
{
GenTree* controlExpr = call->gtControlExpr;
if (controlExpr != nullptr)
{
lvl2 = gtSetEvalOrder(controlExpr);
if (level < lvl2)
{
level = lvl2;
}
costEx += controlExpr->GetCostEx();
costSz += controlExpr->GetCostSz();
}
}
#ifdef TARGET_ARM
if (call->IsVirtualStub())
{
// We generate movw/movt/ldr
costEx += (1 + IND_COST_EX);
costSz += 8;
if (call->gtCallMoreFlags & GTF_CALL_M_VIRTSTUB_REL_INDIRECT)
{
// Must use R12 for the ldr target -- REG_JUMP_THUNK_PARAM
costSz += 2;
}
}
else if (!opts.jitFlags->IsSet(JitFlags::JIT_FLAG_PREJIT))
{
costEx += 2;
costSz += 6;
}
costSz += 2;
#endif
#ifdef TARGET_XARCH
costSz += 3;
#endif
}
level += 1;
/* Virtual calls are a bit more expensive */
if (call->IsVirtual())
{
costEx += 2 * IND_COST_EX;
costSz += 2;
}
level += 5;
costEx += 3 * IND_COST_EX;
break;
#if defined(FEATURE_SIMD) || defined(FEATURE_HW_INTRINSICS)
#if defined(FEATURE_SIMD)
case GT_SIMD:
#endif
#if defined(FEATURE_HW_INTRINSICS)
case GT_HWINTRINSIC:
#endif
return gtSetMultiOpOrder(tree->AsMultiOp());
#endif // defined(FEATURE_SIMD) || defined(FEATURE_HW_INTRINSICS)
case GT_ARR_ELEM:
{
GenTreeArrElem* arrElem = tree->AsArrElem();
level = gtSetEvalOrder(arrElem->gtArrObj);
costEx = arrElem->gtArrObj->GetCostEx();
costSz = arrElem->gtArrObj->GetCostSz();
for (unsigned dim = 0; dim < arrElem->gtArrRank; dim++)
{
lvl2 = gtSetEvalOrder(arrElem->gtArrInds[dim]);
if (level < lvl2)
{
level = lvl2;
}
costEx += arrElem->gtArrInds[dim]->GetCostEx();
costSz += arrElem->gtArrInds[dim]->GetCostSz();
}
level += arrElem->gtArrRank;
costEx += 2 + (arrElem->gtArrRank * (IND_COST_EX + 1));
costSz += 2 + (arrElem->gtArrRank * 2);
}
break;
case GT_ARR_OFFSET:
level = gtSetEvalOrder(tree->AsArrOffs()->gtOffset);
costEx = tree->AsArrOffs()->gtOffset->GetCostEx();
costSz = tree->AsArrOffs()->gtOffset->GetCostSz();
lvl2 = gtSetEvalOrder(tree->AsArrOffs()->gtIndex);
level = max(level, lvl2);
costEx += tree->AsArrOffs()->gtIndex->GetCostEx();
costSz += tree->AsArrOffs()->gtIndex->GetCostSz();
lvl2 = gtSetEvalOrder(tree->AsArrOffs()->gtArrObj);
level = max(level, lvl2);
costEx += tree->AsArrOffs()->gtArrObj->GetCostEx();
costSz += tree->AsArrOffs()->gtArrObj->GetCostSz();
break;
case GT_PHI:
for (GenTreePhi::Use& use : tree->AsPhi()->Uses())
{
lvl2 = gtSetEvalOrder(use.GetNode());
// PHI args should always have cost 0 and level 0
assert(lvl2 == 0);
assert(use.GetNode()->GetCostEx() == 0);
assert(use.GetNode()->GetCostSz() == 0);
}
// Give it a level of 2, just to be sure that it's greater than the LHS of
// the parent assignment and the PHI gets evaluated first in linear order.
// See also SsaBuilder::InsertPhi and SsaBuilder::AddPhiArg.
level = 2;
costEx = 0;
costSz = 0;
break;
case GT_FIELD_LIST:
level = 0;
costEx = 0;
costSz = 0;
for (GenTreeFieldList::Use& use : tree->AsFieldList()->Uses())
{
unsigned opLevel = gtSetEvalOrder(use.GetNode());
level = max(level, opLevel);
gtSetEvalOrder(use.GetNode());
costEx += use.GetNode()->GetCostEx();
costSz += use.GetNode()->GetCostSz();
}
break;
case GT_CMPXCHG:
level = gtSetEvalOrder(tree->AsCmpXchg()->gtOpLocation);
costSz = tree->AsCmpXchg()->gtOpLocation->GetCostSz();
lvl2 = gtSetEvalOrder(tree->AsCmpXchg()->gtOpValue);
if (level < lvl2)
{
level = lvl2;
}
costSz += tree->AsCmpXchg()->gtOpValue->GetCostSz();
lvl2 = gtSetEvalOrder(tree->AsCmpXchg()->gtOpComparand);
if (level < lvl2)
{
level = lvl2;
}
costSz += tree->AsCmpXchg()->gtOpComparand->GetCostSz();
costEx = MAX_COST; // Seriously, what could be more expensive than lock cmpxchg?
costSz += 5; // size of lock cmpxchg [reg+C], reg
break;
case GT_STORE_DYN_BLK:
level = gtSetEvalOrder(tree->AsStoreDynBlk()->Addr());
costEx = tree->AsStoreDynBlk()->Addr()->GetCostEx();
costSz = tree->AsStoreDynBlk()->Addr()->GetCostSz();
lvl2 = gtSetEvalOrder(tree->AsStoreDynBlk()->Data());
level = max(level, lvl2);
costEx += tree->AsStoreDynBlk()->Data()->GetCostEx();
costSz += tree->AsStoreDynBlk()->Data()->GetCostSz();
lvl2 = gtSetEvalOrder(tree->AsStoreDynBlk()->gtDynamicSize);
level = max(level, lvl2);
costEx += tree->AsStoreDynBlk()->gtDynamicSize->GetCostEx();
costSz += tree->AsStoreDynBlk()->gtDynamicSize->GetCostSz();
break;
default:
JITDUMP("unexpected operator in this tree:\n");
DISPTREE(tree);
NO_WAY("unexpected operator");
}
DONE:
// Some path through this function must have set the costs.
assert(costEx != -1);
assert(costSz != -1);
tree->SetCosts(costEx, costSz);
return level;
}
#ifdef _PREFAST_
#pragma warning(pop)
#endif
#ifdef DEBUG
bool GenTree::OperSupportsReverseOpEvalOrder(Compiler* comp) const
{
if (OperIsBinary())
{
if ((AsOp()->gtGetOp1() == nullptr) || (AsOp()->gtGetOp2() == nullptr))
{
return false;
}
if (OperIs(GT_COMMA, GT_BOUNDS_CHECK))
{
return false;
}
if (OperIs(GT_INTRINSIC))
{
return !comp->IsIntrinsicImplementedByUserCall(AsIntrinsic()->gtIntrinsicName);
}
return true;
}
#if defined(FEATURE_SIMD) || defined(FEATURE_HW_INTRINSICS)
if (OperIsMultiOp())
{
return AsMultiOp()->GetOperandCount() == 2;
}
#endif // FEATURE_SIMD || FEATURE_HW_INTRINSICS
return false;
}
#endif // DEBUG
/*****************************************************************************
*
* If the given tree is an integer constant that can be used
* in a scaled index address mode as a multiplier (e.g. "[4*index]"), then return
* the scale factor: 2, 4, or 8. Otherwise, return 0. Note that we never return 1,
* to match the behavior of GetScaleIndexShf().
*/
unsigned GenTree::GetScaleIndexMul()
{
if (IsCnsIntOrI() && jitIsScaleIndexMul(AsIntConCommon()->IconValue()) && AsIntConCommon()->IconValue() != 1)
{
return (unsigned)AsIntConCommon()->IconValue();
}
return 0;
}
/*****************************************************************************
*
* If the given tree is the right-hand side of a left shift (that is,
* 'y' in the tree 'x' << 'y'), and it is an integer constant that can be used
* in a scaled index address mode as a multiplier (e.g. "[4*index]"), then return
* the scale factor: 2, 4, or 8. Otherwise, return 0.
*/
unsigned GenTree::GetScaleIndexShf()
{
if (IsCnsIntOrI() && jitIsScaleIndexShift(AsIntConCommon()->IconValue()))
{
return (unsigned)(1 << AsIntConCommon()->IconValue());
}
return 0;
}
/*****************************************************************************
*
* If the given tree is a scaled index (i.e. "op * 4" or "op << 2"), returns
* the multiplier: 2, 4, or 8; otherwise returns 0. Note that "1" is never
* returned.
*/
unsigned GenTree::GetScaledIndex()
{
// with (!opts.OptEnabled(CLFLG_CONSTANTFOLD) we can have
// CNS_INT * CNS_INT
//
if (AsOp()->gtOp1->IsCnsIntOrI())
{
return 0;
}
switch (gtOper)
{
case GT_MUL:
return AsOp()->gtOp2->GetScaleIndexMul();
case GT_LSH:
return AsOp()->gtOp2->GetScaleIndexShf();
default:
assert(!"GenTree::GetScaledIndex() called with illegal gtOper");
break;
}
return 0;
}
//------------------------------------------------------------------------
// TryGetUse: Get the use edge for an operand of this tree.
//
// Arguments:
// operand - the node to find the use for
// pUse - [out] parameter for the use
//
// Return Value:
// Whether "operand" is a child of this node. If it is, "*pUse" is set,
// allowing for the replacement of "operand" with some other node.
//
bool GenTree::TryGetUse(GenTree* operand, GenTree*** pUse)
{
assert(operand != nullptr);
assert(pUse != nullptr);
switch (OperGet())
{
// Leaf nodes
case GT_LCL_VAR:
case GT_LCL_FLD:
case GT_LCL_VAR_ADDR:
case GT_LCL_FLD_ADDR:
case GT_CATCH_ARG:
case GT_LABEL:
case GT_FTN_ADDR:
case GT_RET_EXPR:
case GT_CNS_INT:
case GT_CNS_LNG:
case GT_CNS_DBL:
case GT_CNS_STR:
case GT_MEMORYBARRIER:
case GT_JMP:
case GT_JCC:
case GT_SETCC:
case GT_NO_OP:
case GT_START_NONGC:
case GT_START_PREEMPTGC:
case GT_PROF_HOOK:
#if !defined(FEATURE_EH_FUNCLETS)
case GT_END_LFIN:
#endif // !FEATURE_EH_FUNCLETS
case GT_PHI_ARG:
case GT_JMPTABLE:
case GT_CLS_VAR:
case GT_CLS_VAR_ADDR:
case GT_ARGPLACE:
case GT_PHYSREG:
case GT_EMITNOP:
case GT_PINVOKE_PROLOG:
case GT_PINVOKE_EPILOG:
case GT_IL_OFFSET:
return false;
// Standard unary operators
case GT_STORE_LCL_VAR:
case GT_STORE_LCL_FLD:
case GT_NOT:
case GT_NEG:
case GT_COPY:
case GT_RELOAD:
case GT_ARR_LENGTH:
case GT_CAST:
case GT_BITCAST:
case GT_CKFINITE:
case GT_LCLHEAP:
case GT_ADDR:
case GT_IND:
case GT_OBJ:
case GT_BLK:
case GT_BOX:
case GT_ALLOCOBJ:
case GT_RUNTIMELOOKUP:
case GT_INIT_VAL:
case GT_JTRUE:
case GT_SWITCH:
case GT_NULLCHECK:
case GT_PUTARG_REG:
case GT_PUTARG_STK:
case GT_PUTARG_TYPE:
case GT_RETURNTRAP:
case GT_NOP:
case GT_RETURN:
case GT_RETFILT:
case GT_BSWAP:
case GT_BSWAP16:
case GT_KEEPALIVE:
case GT_INC_SATURATE:
if (operand == this->AsUnOp()->gtOp1)
{
*pUse = &this->AsUnOp()->gtOp1;
return true;
}
return false;
// Variadic nodes
#if FEATURE_ARG_SPLIT
case GT_PUTARG_SPLIT:
if (this->AsUnOp()->gtOp1->gtOper == GT_FIELD_LIST)
{
return this->AsUnOp()->gtOp1->TryGetUse(operand, pUse);
}
if (operand == this->AsUnOp()->gtOp1)
{
*pUse = &this->AsUnOp()->gtOp1;
return true;
}
return false;
#endif // FEATURE_ARG_SPLIT
#if defined(FEATURE_SIMD) || defined(FEATURE_HW_INTRINSICS)
#if defined(FEATURE_SIMD)
case GT_SIMD:
#endif
#if defined(FEATURE_HW_INTRINSICS)
case GT_HWINTRINSIC:
#endif
for (GenTree** opUse : this->AsMultiOp()->UseEdges())
{
if (*opUse == operand)
{
*pUse = opUse;
return true;
}
}
return false;
#endif // defined(FEATURE_SIMD) || defined(FEATURE_HW_INTRINSICS)
// Special nodes
case GT_PHI:
for (GenTreePhi::Use& phiUse : AsPhi()->Uses())
{
if (phiUse.GetNode() == operand)
{
*pUse = &phiUse.NodeRef();
return true;
}
}
return false;
case GT_FIELD_LIST:
for (GenTreeFieldList::Use& fieldUse : AsFieldList()->Uses())
{
if (fieldUse.GetNode() == operand)
{
*pUse = &fieldUse.NodeRef();
return true;
}
}
return false;
case GT_CMPXCHG:
{
GenTreeCmpXchg* const cmpXchg = this->AsCmpXchg();
if (operand == cmpXchg->gtOpLocation)
{
*pUse = &cmpXchg->gtOpLocation;
return true;
}
if (operand == cmpXchg->gtOpValue)
{
*pUse = &cmpXchg->gtOpValue;
return true;
}
if (operand == cmpXchg->gtOpComparand)
{
*pUse = &cmpXchg->gtOpComparand;
return true;
}
return false;
}
case GT_ARR_ELEM:
{
GenTreeArrElem* const arrElem = this->AsArrElem();
if (operand == arrElem->gtArrObj)
{
*pUse = &arrElem->gtArrObj;
return true;
}
for (unsigned i = 0; i < arrElem->gtArrRank; i++)
{
if (operand == arrElem->gtArrInds[i])
{
*pUse = &arrElem->gtArrInds[i];
return true;
}
}
return false;
}
case GT_ARR_OFFSET:
{
GenTreeArrOffs* const arrOffs = this->AsArrOffs();
if (operand == arrOffs->gtOffset)
{
*pUse = &arrOffs->gtOffset;
return true;
}
if (operand == arrOffs->gtIndex)
{
*pUse = &arrOffs->gtIndex;
return true;
}
if (operand == arrOffs->gtArrObj)
{
*pUse = &arrOffs->gtArrObj;
return true;
}
return false;
}
case GT_STORE_DYN_BLK:
{
GenTreeStoreDynBlk* const dynBlock = this->AsStoreDynBlk();
if (operand == dynBlock->gtOp1)
{
*pUse = &dynBlock->gtOp1;
return true;
}
if (operand == dynBlock->gtOp2)
{
*pUse = &dynBlock->gtOp2;
return true;
}
if (operand == dynBlock->gtDynamicSize)
{
*pUse = &dynBlock->gtDynamicSize;
return true;
}
return false;
}
case GT_CALL:
{
GenTreeCall* const call = this->AsCall();
if ((call->gtCallThisArg != nullptr) && (operand == call->gtCallThisArg->GetNode()))
{
*pUse = &call->gtCallThisArg->NodeRef();
return true;
}
if (operand == call->gtControlExpr)
{
*pUse = &call->gtControlExpr;
return true;
}
if (call->gtCallType == CT_INDIRECT)
{
if (operand == call->gtCallCookie)
{
*pUse = &call->gtCallCookie;
return true;
}
if (operand == call->gtCallAddr)
{
*pUse = &call->gtCallAddr;
return true;
}
}
for (GenTreeCall::Use& argUse : call->Args())
{
if (argUse.GetNode() == operand)
{
*pUse = &argUse.NodeRef();
return true;
}
}
for (GenTreeCall::Use& argUse : call->LateArgs())
{
if (argUse.GetNode() == operand)
{
*pUse = &argUse.NodeRef();
return true;
}
}
return false;
}
// Binary nodes
default:
assert(this->OperIsBinary());
return TryGetUseBinOp(operand, pUse);
}
}
bool GenTree::TryGetUseBinOp(GenTree* operand, GenTree*** pUse)
{
assert(operand != nullptr);
assert(pUse != nullptr);
assert(this->OperIsBinary());
GenTreeOp* const binOp = this->AsOp();
if (operand == binOp->gtOp1)
{
*pUse = &binOp->gtOp1;
return true;
}
if (operand == binOp->gtOp2)
{
*pUse = &binOp->gtOp2;
return true;
}
return false;
}
//------------------------------------------------------------------------
// GenTree::ReplaceOperand:
// Replace a given operand to this node with a new operand. If the
// current node is a call node, this will also udpate the call
// argument table if necessary.
//
// Arguments:
// useEdge - the use edge that points to the operand to be replaced.
// replacement - the replacement node.
//
void GenTree::ReplaceOperand(GenTree** useEdge, GenTree* replacement)
{
assert(useEdge != nullptr);
assert(replacement != nullptr);
assert(TryGetUse(*useEdge, &useEdge));
if (OperGet() == GT_CALL)
{
AsCall()->ReplaceCallOperand(useEdge, replacement);
}
else
{
*useEdge = replacement;
}
}
//------------------------------------------------------------------------
// gtGetParent: Get the parent of this node, and optionally capture the
// pointer to the child so that it can be modified.
//
// Arguments:
// pUse - A pointer to a GenTree** (yes, that's three
// levels, i.e. GenTree ***), which if non-null,
// will be set to point to the field in the parent
// that points to this node.
//
// Return value
// The parent of this node.
//
// Notes:
// This requires that the execution order must be defined (i.e. gtSetEvalOrder() has been called).
// To enable the child to be replaced, it accepts an argument, "pUse", that, if non-null,
// will be set to point to the child pointer in the parent that points to this node.
//
GenTree* GenTree::gtGetParent(GenTree*** pUse)
{
// Find the parent node; it must be after this node in the execution order.
GenTree* user;
GenTree** use = nullptr;
for (user = gtNext; user != nullptr; user = user->gtNext)
{
if (user->TryGetUse(this, &use))
{
break;
}
}
if (pUse != nullptr)
{
*pUse = use;
}
return user;
}
//-------------------------------------------------------------------------
// gtRetExprVal - walk back through GT_RET_EXPRs
//
// Arguments:
// pbbFlags - out-parameter that is set to the flags of the basic block
// containing the inlinee return value. The value is 0
// for unsuccessful inlines.
//
// Returns:
// tree representing return value from a successful inline,
// or original call for failed or yet to be determined inline.
//
// Notes:
// Multi-level inlines can form chains of GT_RET_EXPRs.
// This method walks back to the root of the chain.
//
GenTree* GenTree::gtRetExprVal(BasicBlockFlags* pbbFlags /* = nullptr */)
{
GenTree* retExprVal = this;
BasicBlockFlags bbFlags = BBF_EMPTY;
assert(!retExprVal->OperIs(GT_PUTARG_TYPE));
while (retExprVal->OperIs(GT_RET_EXPR))
{
const GenTreeRetExpr* retExpr = retExprVal->AsRetExpr();
bbFlags = retExpr->bbFlags;
retExprVal = retExpr->gtInlineCandidate;
}
if (pbbFlags != nullptr)
{
*pbbFlags = bbFlags;
}
return retExprVal;
}
//------------------------------------------------------------------------------
// OperRequiresAsgFlag : Check whether the operation requires GTF_ASG flag regardless
// of the children's flags.
//
bool GenTree::OperRequiresAsgFlag()
{
if (OperIs(GT_ASG, GT_STORE_DYN_BLK) ||
OperIs(GT_XADD, GT_XORR, GT_XAND, GT_XCHG, GT_LOCKADD, GT_CMPXCHG, GT_MEMORYBARRIER))
{
return true;
}
#ifdef FEATURE_HW_INTRINSICS
if (gtOper == GT_HWINTRINSIC)
{
GenTreeHWIntrinsic* hwIntrinsicNode = this->AsHWIntrinsic();
if (hwIntrinsicNode->OperIsMemoryStore())
{
// A MemoryStore operation is an assignment
return true;
}
}
#endif // FEATURE_HW_INTRINSICS
if (gtOper == GT_CALL)
{
// If the call has return buffer argument, it produced a definition and hence
// should be marked with assignment.
return AsCall()->GetLclRetBufArgNode() != nullptr;
}
return false;
}
//------------------------------------------------------------------------------
// OperRequiresCallFlag : Check whether the operation requires GTF_CALL flag regardless
// of the children's flags.
//
bool GenTree::OperRequiresCallFlag(Compiler* comp)
{
switch (gtOper)
{
case GT_CALL:
return true;
case GT_KEEPALIVE:
return true;
case GT_INTRINSIC:
return comp->IsIntrinsicImplementedByUserCall(this->AsIntrinsic()->gtIntrinsicName);
#if FEATURE_FIXED_OUT_ARGS && !defined(TARGET_64BIT)
case GT_LSH:
case GT_RSH:
case GT_RSZ:
// Variable shifts of a long end up being helper calls, so mark the tree as such in morph.
// This is potentially too conservative, since they'll get treated as having side effects.
// It is important to mark them as calls so if they are part of an argument list,
// they will get sorted and processed properly (for example, it is important to handle
// all nested calls before putting struct arguments in the argument registers). We
// could mark the trees just before argument processing, but it would require a full
// tree walk of the argument tree, so we just do it when morphing, instead, even though we'll
// mark non-argument trees (that will still get converted to calls, anyway).
return (this->TypeGet() == TYP_LONG) && (gtGetOp2()->OperGet() != GT_CNS_INT);
#endif // FEATURE_FIXED_OUT_ARGS && !TARGET_64BIT
default:
return false;
}
}
//------------------------------------------------------------------------------
// OperIsImplicitIndir : Check whether the operation contains an implicit
// indirection.
// Arguments:
// this - a GenTree node
//
// Return Value:
// True if the given node contains an implicit indirection
//
// Note that for the [HW]INTRINSIC nodes we have to examine the
// details of the node to determine its result.
//
bool GenTree::OperIsImplicitIndir() const
{
switch (gtOper)
{
case GT_LOCKADD:
case GT_XORR:
case GT_XAND:
case GT_XADD:
case GT_XCHG:
case GT_CMPXCHG:
case GT_BLK:
case GT_OBJ:
case GT_STORE_BLK:
case GT_STORE_OBJ:
case GT_STORE_DYN_BLK:
case GT_BOX:
case GT_ARR_INDEX:
case GT_ARR_ELEM:
case GT_ARR_OFFSET:
return true;
case GT_INTRINSIC:
return AsIntrinsic()->gtIntrinsicName == NI_System_Object_GetType;
#ifdef FEATURE_SIMD
case GT_SIMD:
{
return AsSIMD()->OperIsMemoryLoad();
}
#endif // FEATURE_SIMD
#ifdef FEATURE_HW_INTRINSICS
case GT_HWINTRINSIC:
{
return AsHWIntrinsic()->OperIsMemoryLoadOrStore();
}
#endif // FEATURE_HW_INTRINSICS
default:
return false;
}
}
//------------------------------------------------------------------------------
// OperMayThrow : Check whether the operation may throw.
//
//
// Arguments:
// comp - Compiler instance
//
// Return Value:
// True if the given operator may cause an exception
bool GenTree::OperMayThrow(Compiler* comp)
{
GenTree* op;
switch (gtOper)
{
case GT_MOD:
case GT_DIV:
case GT_UMOD:
case GT_UDIV:
/* Division with a non-zero, non-minus-one constant does not throw an exception */
op = AsOp()->gtOp2;
if (varTypeIsFloating(op->TypeGet()))
{
return false; // Floating point division does not throw.
}
// For integers only division by 0 or by -1 can throw
if (op->IsIntegralConst() && !op->IsIntegralConst(0) && !op->IsIntegralConst(-1))
{
return false;
}
return true;
case GT_INTRINSIC:
// If this is an intrinsic that represents the object.GetType(), it can throw an NullReferenceException.
// Currently, this is the only intrinsic that can throw an exception.
return AsIntrinsic()->gtIntrinsicName == NI_System_Object_GetType;
case GT_CALL:
CorInfoHelpFunc helper;
helper = comp->eeGetHelperNum(this->AsCall()->gtCallMethHnd);
return ((helper == CORINFO_HELP_UNDEF) || !comp->s_helperCallProperties.NoThrow(helper));
case GT_IND:
case GT_BLK:
case GT_OBJ:
case GT_NULLCHECK:
case GT_STORE_BLK:
case GT_STORE_DYN_BLK:
return (((this->gtFlags & GTF_IND_NONFAULTING) == 0) && comp->fgAddrCouldBeNull(this->AsIndir()->Addr()));
case GT_ARR_LENGTH:
return (((this->gtFlags & GTF_IND_NONFAULTING) == 0) &&
comp->fgAddrCouldBeNull(this->AsArrLen()->ArrRef()));
case GT_ARR_ELEM:
return comp->fgAddrCouldBeNull(this->AsArrElem()->gtArrObj);
case GT_FIELD:
{
GenTree* fldObj = this->AsField()->GetFldObj();
if (fldObj != nullptr)
{
return comp->fgAddrCouldBeNull(fldObj);
}
return false;
}
case GT_BOUNDS_CHECK:
case GT_ARR_INDEX:
case GT_ARR_OFFSET:
case GT_LCLHEAP:
case GT_CKFINITE:
case GT_INDEX_ADDR:
return true;
#ifdef FEATURE_HW_INTRINSICS
case GT_HWINTRINSIC:
{
GenTreeHWIntrinsic* hwIntrinsicNode = this->AsHWIntrinsic();
assert(hwIntrinsicNode != nullptr);
if (hwIntrinsicNode->OperIsMemoryLoadOrStore())
{
// This operation contains an implicit indirection
// it could throw a null reference exception.
//
return true;
}
break;
}
#endif // FEATURE_HW_INTRINSICS
default:
break;
}
/* Overflow arithmetic operations also throw exceptions */
if (gtOverflowEx())
{
return true;
}
return false;
}
//-----------------------------------------------------------------------------------
// GetFieldCount: Return the register count for a multi-reg lclVar.
//
// Arguments:
// compiler - the current Compiler instance.
//
// Return Value:
// Returns the number of registers defined by this node.
//
// Notes:
// This must be a multireg lclVar.
//
unsigned int GenTreeLclVar::GetFieldCount(Compiler* compiler) const
{
assert(IsMultiReg());
LclVarDsc* varDsc = compiler->lvaGetDesc(GetLclNum());
return varDsc->lvFieldCnt;
}
//-----------------------------------------------------------------------------------
// GetFieldTypeByIndex: Get a specific register's type, based on regIndex, that is produced
// by this multi-reg node.
//
// Arguments:
// compiler - the current Compiler instance.
// idx - which register type to return.
//
// Return Value:
// The register type assigned to this index for this node.
//
// Notes:
// This must be a multireg lclVar and 'regIndex' must be a valid index for this node.
//
var_types GenTreeLclVar::GetFieldTypeByIndex(Compiler* compiler, unsigned idx)
{
assert(IsMultiReg());
LclVarDsc* varDsc = compiler->lvaGetDesc(GetLclNum());
LclVarDsc* fieldVarDsc = compiler->lvaGetDesc(varDsc->lvFieldLclStart + idx);
assert(fieldVarDsc->TypeGet() != TYP_STRUCT); // Don't expect struct fields.
return fieldVarDsc->TypeGet();
}
#if DEBUGGABLE_GENTREE
// static
GenTree::VtablePtr GenTree::s_vtablesForOpers[] = {nullptr};
GenTree::VtablePtr GenTree::s_vtableForOp = nullptr;
GenTree::VtablePtr GenTree::GetVtableForOper(genTreeOps oper)
{
noway_assert(oper < GT_COUNT);
// First, check a cache.
if (s_vtablesForOpers[oper] != nullptr)
{
return s_vtablesForOpers[oper];
}
// Otherwise, look up the correct vtable entry. Note that we want the most derived GenTree subtype
// for an oper. E.g., GT_LCL_VAR is defined in GTSTRUCT_3 as GenTreeLclVar and in GTSTRUCT_N as
// GenTreeLclVarCommon. We want the GenTreeLclVar vtable, since nothing should actually be
// instantiated as a GenTreeLclVarCommon.
VtablePtr res = nullptr;
switch (oper)
{
// clang-format off
#define GTSTRUCT_0(nm, tag) /*handle explicitly*/
#define GTSTRUCT_1(nm, tag) \
case tag: \
{ \
GenTree##nm gt; \
res = *reinterpret_cast<VtablePtr*>(>); \
} \
break;
#define GTSTRUCT_2(nm, tag, tag2) \
case tag: \
case tag2: \
{ \
GenTree##nm gt; \
res = *reinterpret_cast<VtablePtr*>(>); \
} \
break;
#define GTSTRUCT_3(nm, tag, tag2, tag3) \
case tag: \
case tag2: \
case tag3: \
{ \
GenTree##nm gt; \
res = *reinterpret_cast<VtablePtr*>(>); \
} \
break;
#define GTSTRUCT_4(nm, tag, tag2, tag3, tag4) \
case tag: \
case tag2: \
case tag3: \
case tag4: \
{ \
GenTree##nm gt; \
res = *reinterpret_cast<VtablePtr*>(>); \
} \
break;
#define GTSTRUCT_N(nm, ...) /*handle explicitly*/
#define GTSTRUCT_2_SPECIAL(nm, tag, tag2) /*handle explicitly*/
#define GTSTRUCT_3_SPECIAL(nm, tag, tag2, tag3) /*handle explicitly*/
#include "gtstructs.h"
// clang-format on
// Handle the special cases.
// The following opers are in GTSTRUCT_N but no other place (namely, no subtypes).
case GT_STORE_BLK:
case GT_BLK:
{
GenTreeBlk gt;
res = *reinterpret_cast<VtablePtr*>(>);
}
break;
case GT_IND:
case GT_NULLCHECK:
{
GenTreeIndir gt;
res = *reinterpret_cast<VtablePtr*>(>);
}
break;
// We don't need to handle GTSTRUCT_N for LclVarCommon, since all those allowed opers are specified
// in their proper subtype. Similarly for GenTreeIndir.
default:
{
// Should be unary or binary op.
if (s_vtableForOp == nullptr)
{
unsigned opKind = OperKind(oper);
assert(!IsExOp(opKind));
assert(OperIsSimple(oper) || OperIsLeaf(oper));
// Need to provide non-null operands.
GenTreeIntCon dummyOp(TYP_INT, 0);
GenTreeOp gt(oper, TYP_INT, &dummyOp, ((opKind & GTK_UNOP) ? nullptr : &dummyOp));
s_vtableForOp = *reinterpret_cast<VtablePtr*>(>);
}
res = s_vtableForOp;
break;
}
}
s_vtablesForOpers[oper] = res;
return res;
}
void GenTree::SetVtableForOper(genTreeOps oper)
{
*reinterpret_cast<VtablePtr*>(this) = GetVtableForOper(oper);
}
#endif // DEBUGGABLE_GENTREE
GenTree* Compiler::gtNewOperNode(genTreeOps oper, var_types type, GenTree* op1, GenTree* op2)
{
assert(op1 != nullptr);
assert(op2 != nullptr);
// We should not be allocating nodes that extend GenTreeOp with this;
// should call the appropriate constructor for the extended type.
assert(!GenTree::IsExOp(GenTree::OperKind(oper)));
GenTree* node = new (this, oper) GenTreeOp(oper, type, op1, op2);
return node;
}
GenTreeColon* Compiler::gtNewColonNode(var_types type, GenTree* elseNode, GenTree* thenNode)
{
return new (this, GT_COLON) GenTreeColon(TYP_INT, elseNode, thenNode);
}
GenTreeQmark* Compiler::gtNewQmarkNode(var_types type, GenTree* cond, GenTreeColon* colon)
{
compQmarkUsed = true;
GenTreeQmark* result = new (this, GT_QMARK) GenTreeQmark(type, cond, colon);
#ifdef DEBUG
if (compQmarkRationalized)
{
fgCheckQmarkAllowedForm(result);
}
#endif
return result;
}
GenTreeIntCon* Compiler::gtNewIconNode(ssize_t value, var_types type)
{
return new (this, GT_CNS_INT) GenTreeIntCon(type, value);
}
GenTreeIntCon* Compiler::gtNewNull()
{
return gtNewIconNode(0, TYP_REF);
}
GenTreeIntCon* Compiler::gtNewTrue()
{
return gtNewIconNode(1, TYP_INT);
}
GenTreeIntCon* Compiler::gtNewFalse()
{
return gtNewIconNode(0, TYP_INT);
}
GenTreeIntCon* Compiler::gtNewIconNode(unsigned fieldOffset, FieldSeqNode* fieldSeq)
{
GenTreeIntCon* node = new (this, GT_CNS_INT) GenTreeIntCon(TYP_I_IMPL, static_cast<ssize_t>(fieldOffset));
node->gtFieldSeq = fieldSeq == nullptr ? FieldSeqStore::NotAField() : fieldSeq;
return node;
}
// return a new node representing the value in a physical register
GenTree* Compiler::gtNewPhysRegNode(regNumber reg, var_types type)
{
assert(genIsValidIntReg(reg) || (reg == REG_SPBASE));
GenTree* result = new (this, GT_PHYSREG) GenTreePhysReg(reg, type);
return result;
}
GenTree* Compiler::gtNewJmpTableNode()
{
return new (this, GT_JMPTABLE) GenTree(GT_JMPTABLE, TYP_I_IMPL);
}
/*****************************************************************************
*
* Converts an annotated token into an icon flags (so that we will later be
* able to tell the type of the handle that will be embedded in the icon
* node)
*/
GenTreeFlags Compiler::gtTokenToIconFlags(unsigned token)
{
GenTreeFlags flags = GTF_EMPTY;
switch (TypeFromToken(token))
{
case mdtTypeRef:
case mdtTypeDef:
case mdtTypeSpec:
flags = GTF_ICON_CLASS_HDL;
break;
case mdtMethodDef:
flags = GTF_ICON_METHOD_HDL;
break;
case mdtFieldDef:
flags = GTF_ICON_FIELD_HDL;
break;
default:
flags = GTF_ICON_TOKEN_HDL;
break;
}
return flags;
}
//-----------------------------------------------------------------------------------------
// gtNewIndOfIconHandleNode: Creates an indirection GenTree node of a constant handle
//
// Arguments:
// indType - The type returned by the indirection node
// addr - The constant address to read from
// iconFlags - The GTF_ICON flag value that specifies the kind of handle that we have
// isInvariant - The indNode should also be marked as invariant
//
// Return Value:
// Returns a GT_IND node representing value at the address provided by 'value'
//
// Notes:
// The GT_IND node is marked as non-faulting
// If the indType is GT_REF we also mark the indNode as GTF_GLOB_REF
//
GenTree* Compiler::gtNewIndOfIconHandleNode(var_types indType, size_t addr, GenTreeFlags iconFlags, bool isInvariant)
{
GenTree* addrNode = gtNewIconHandleNode(addr, iconFlags);
GenTree* indNode = gtNewOperNode(GT_IND, indType, addrNode);
// This indirection won't cause an exception.
//
indNode->gtFlags |= GTF_IND_NONFAULTING;
if (isInvariant)
{
assert(iconFlags != GTF_ICON_STATIC_HDL); // Pointer to a mutable class Static variable
assert(iconFlags != GTF_ICON_BBC_PTR); // Pointer to a mutable basic block count value
assert(iconFlags != GTF_ICON_GLOBAL_PTR); // Pointer to mutable data from the VM state
// This indirection also is invariant.
indNode->gtFlags |= GTF_IND_INVARIANT;
if (iconFlags == GTF_ICON_STR_HDL)
{
// String literals are never null
indNode->gtFlags |= GTF_IND_NONNULL;
}
}
else
{
// GLOB_REF needs to be set for indirections returning values from mutable
// locations, so that e. g. args sorting does not reorder them with calls.
indNode->gtFlags |= GTF_GLOB_REF;
}
return indNode;
}
/*****************************************************************************
*
* Allocates a integer constant entry that represents a HANDLE to something.
* It may not be allowed to embed HANDLEs directly into the JITed code (for eg,
* as arguments to JIT helpers). Get a corresponding value that can be embedded.
* If the handle needs to be accessed via an indirection, pValue points to it.
*/
GenTree* Compiler::gtNewIconEmbHndNode(void* value, void* pValue, GenTreeFlags iconFlags, void* compileTimeHandle)
{
GenTree* iconNode;
GenTree* handleNode;
if (value != nullptr)
{
// When 'value' is non-null, pValue is required to be null
assert(pValue == nullptr);
// use 'value' to construct an integer constant node
iconNode = gtNewIconHandleNode((size_t)value, iconFlags);
// 'value' is the handle
handleNode = iconNode;
}
else
{
// When 'value' is null, pValue is required to be non-null
assert(pValue != nullptr);
// use 'pValue' to construct an integer constant node
iconNode = gtNewIconHandleNode((size_t)pValue, iconFlags);
// 'pValue' is an address of a location that contains the handle
// construct the indirection of 'pValue'
handleNode = gtNewOperNode(GT_IND, TYP_I_IMPL, iconNode);
// This indirection won't cause an exception.
handleNode->gtFlags |= GTF_IND_NONFAULTING;
// This indirection also is invariant.
handleNode->gtFlags |= GTF_IND_INVARIANT;
}
iconNode->AsIntCon()->gtCompileTimeHandle = (size_t)compileTimeHandle;
return handleNode;
}
/*****************************************************************************/
GenTree* Compiler::gtNewStringLiteralNode(InfoAccessType iat, void* pValue)
{
GenTree* tree = nullptr;
switch (iat)
{
case IAT_VALUE:
setMethodHasFrozenString();
tree = gtNewIconEmbHndNode(pValue, nullptr, GTF_ICON_STR_HDL, nullptr);
tree->gtType = TYP_REF;
#ifdef DEBUG
tree->AsIntCon()->gtTargetHandle = (size_t)pValue;
#endif
break;
case IAT_PVALUE: // The value needs to be accessed via an indirection
// Create an indirection
tree = gtNewIndOfIconHandleNode(TYP_REF, (size_t)pValue, GTF_ICON_STR_HDL, true);
#ifdef DEBUG
tree->gtGetOp1()->AsIntCon()->gtTargetHandle = (size_t)pValue;
#endif
break;
case IAT_PPVALUE: // The value needs to be accessed via a double indirection
// Create the first indirection
tree = gtNewIndOfIconHandleNode(TYP_I_IMPL, (size_t)pValue, GTF_ICON_CONST_PTR, true);
#ifdef DEBUG
tree->gtGetOp1()->AsIntCon()->gtTargetHandle = (size_t)pValue;
#endif
// Create the second indirection
tree = gtNewOperNode(GT_IND, TYP_REF, tree);
// This indirection won't cause an exception.
tree->gtFlags |= GTF_IND_NONFAULTING;
// This indirection points into the gloabal heap (it is String Object)
tree->gtFlags |= GTF_GLOB_REF;
break;
default:
noway_assert(!"Unexpected InfoAccessType");
}
return tree;
}
//------------------------------------------------------------------------
// gtNewStringLiteralLength: create GenTreeIntCon node for the given string
// literal to store its length.
//
// Arguments:
// node - string literal node.
//
// Return Value:
// GenTreeIntCon node with string's length as a value or null.
//
GenTreeIntCon* Compiler::gtNewStringLiteralLength(GenTreeStrCon* node)
{
if (node->IsStringEmptyField())
{
JITDUMP("Folded String.Empty.Length to 0\n");
return gtNewIconNode(0);
}
int length = -1;
const char16_t* str = info.compCompHnd->getStringLiteral(node->gtScpHnd, node->gtSconCPX, &length);
if (length >= 0)
{
GenTreeIntCon* iconNode = gtNewIconNode(length);
// str can be NULL for dynamic context
if (str != nullptr)
{
JITDUMP("Folded '\"%ws\".Length' to '%d'\n", str, length)
}
else
{
JITDUMP("Folded 'CNS_STR.Length' to '%d'\n", length)
}
return iconNode;
}
return nullptr;
}
/*****************************************************************************/
GenTree* Compiler::gtNewLconNode(__int64 value)
{
#ifdef TARGET_64BIT
GenTree* node = new (this, GT_CNS_INT) GenTreeIntCon(TYP_LONG, value);
#else
GenTree* node = new (this, GT_CNS_LNG) GenTreeLngCon(value);
#endif
return node;
}
GenTree* Compiler::gtNewDconNode(double value, var_types type)
{
GenTree* node = new (this, GT_CNS_DBL) GenTreeDblCon(value, type);
return node;
}
GenTree* Compiler::gtNewSconNode(int CPX, CORINFO_MODULE_HANDLE scpHandle)
{
// 'GT_CNS_STR' nodes later get transformed into 'GT_CALL'
assert(GenTree::s_gtNodeSizes[GT_CALL] > GenTree::s_gtNodeSizes[GT_CNS_STR]);
GenTree* node = new (this, GT_CALL) GenTreeStrCon(CPX, scpHandle DEBUGARG(/*largeNode*/ true));
return node;
}
GenTree* Compiler::gtNewZeroConNode(var_types type)
{
GenTree* zero;
switch (type)
{
case TYP_INT:
zero = gtNewIconNode(0);
break;
case TYP_BYREF:
FALLTHROUGH;
case TYP_REF:
zero = gtNewIconNode(0);
zero->gtType = type;
break;
case TYP_LONG:
zero = gtNewLconNode(0);
break;
case TYP_FLOAT:
zero = gtNewDconNode(0.0);
zero->gtType = type;
break;
case TYP_DOUBLE:
zero = gtNewDconNode(0.0);
break;
default:
noway_assert(!"Bad type in gtNewZeroConNode");
zero = nullptr;
break;
}
return zero;
}
GenTree* Compiler::gtNewOneConNode(var_types type)
{
GenTree* one;
switch (type)
{
case TYP_INT:
case TYP_UINT:
one = gtNewIconNode(1);
break;
case TYP_LONG:
case TYP_ULONG:
one = gtNewLconNode(1);
break;
case TYP_FLOAT:
case TYP_DOUBLE:
one = gtNewDconNode(1.0);
one->gtType = type;
break;
default:
noway_assert(!"Bad type in gtNewOneConNode");
one = nullptr;
break;
}
return one;
}
GenTreeLclVar* Compiler::gtNewStoreLclVar(unsigned dstLclNum, GenTree* src)
{
GenTreeLclVar* store = new (this, GT_STORE_LCL_VAR) GenTreeLclVar(GT_STORE_LCL_VAR, src->TypeGet(), dstLclNum);
store->gtOp1 = src;
store->gtFlags = (src->gtFlags & GTF_COMMON_MASK);
store->gtFlags |= GTF_VAR_DEF | GTF_ASG;
return store;
}
#ifdef FEATURE_SIMD
//---------------------------------------------------------------------
// gtNewSIMDVectorZero: create a GT_SIMD node for Vector<T>.Zero
//
// Arguments:
// simdType - simd vector type
// simdBaseJitType - element type of vector
// simdSize - size of vector in bytes
GenTree* Compiler::gtNewSIMDVectorZero(var_types simdType, CorInfoType simdBaseJitType, unsigned simdSize)
{
var_types simdBaseType = genActualType(JitType2PreciseVarType(simdBaseJitType));
GenTree* initVal = gtNewZeroConNode(simdBaseType);
initVal->gtType = simdBaseType;
return gtNewSIMDNode(simdType, initVal, SIMDIntrinsicInit, simdBaseJitType, simdSize);
}
#endif // FEATURE_SIMD
GenTreeCall* Compiler::gtNewIndCallNode(GenTree* addr, var_types type, GenTreeCall::Use* args, const DebugInfo& di)
{
return gtNewCallNode(CT_INDIRECT, (CORINFO_METHOD_HANDLE)addr, type, args, di);
}
GenTreeCall* Compiler::gtNewCallNode(
gtCallTypes callType, CORINFO_METHOD_HANDLE callHnd, var_types type, GenTreeCall::Use* args, const DebugInfo& di)
{
GenTreeCall* node = new (this, GT_CALL) GenTreeCall(genActualType(type));
node->gtFlags |= (GTF_CALL | GTF_GLOB_REF);
#ifdef UNIX_X86_ABI
if (callType == CT_INDIRECT || callType == CT_HELPER)
node->gtFlags |= GTF_CALL_POP_ARGS;
#endif // UNIX_X86_ABI
for (GenTreeCall::Use& use : GenTreeCall::UseList(args))
{
node->gtFlags |= (use.GetNode()->gtFlags & GTF_ALL_EFFECT);
}
node->gtCallType = callType;
node->gtCallMethHnd = callHnd;
node->gtCallArgs = args;
node->gtCallThisArg = nullptr;
node->fgArgInfo = nullptr;
INDEBUG(node->callSig = nullptr;)
node->tailCallInfo = nullptr;
node->gtRetClsHnd = nullptr;
node->gtControlExpr = nullptr;
node->gtCallMoreFlags = GTF_CALL_M_EMPTY;
if (callType == CT_INDIRECT)
{
node->gtCallCookie = nullptr;
}
else
{
node->gtInlineCandidateInfo = nullptr;
}
node->gtCallLateArgs = nullptr;
node->gtReturnType = type;
#ifdef FEATURE_READYTORUN
node->gtEntryPoint.addr = nullptr;
node->gtEntryPoint.accessType = IAT_VALUE;
#endif
#if defined(DEBUG) || defined(INLINE_DATA)
// These get updated after call node is built.
node->gtInlineObservation = InlineObservation::CALLEE_UNUSED_INITIAL;
node->gtRawILOffset = BAD_IL_OFFSET;
node->gtInlineContext = compInlineContext;
#endif
// Spec: Managed Retval sequence points needs to be generated while generating debug info for debuggable code.
//
// Implementation note: if not generating MRV info genCallSite2ILOffsetMap will be NULL and
// codegen will pass DebugInfo() to emitter, which will cause emitter
// not to emit IP mapping entry.
if (opts.compDbgCode && opts.compDbgInfo && di.IsValid())
{
// Managed Retval - IL offset of the call. This offset is used to emit a
// CALL_INSTRUCTION type sequence point while emitting corresponding native call.
//
// TODO-Cleanup:
// a) (Opt) We need not store this offset if the method doesn't return a
// value. Rather it can be made BAD_IL_OFFSET to prevent a sequence
// point being emitted.
//
// b) (Opt) Add new sequence points only if requested by debugger through
// a new boundary type - ICorDebugInfo::BoundaryTypes
if (genCallSite2DebugInfoMap == nullptr)
{
genCallSite2DebugInfoMap = new (getAllocator()) CallSiteDebugInfoTable(getAllocator());
}
// Make sure that there are no duplicate entries for a given call node
assert(!genCallSite2DebugInfoMap->Lookup(node));
genCallSite2DebugInfoMap->Set(node, di);
}
// Initialize gtOtherRegs
node->ClearOtherRegs();
// Initialize spill flags of gtOtherRegs
node->ClearOtherRegFlags();
#if !defined(TARGET_64BIT)
if (varTypeIsLong(node))
{
assert(node->gtReturnType == node->gtType);
// Initialize Return type descriptor of call node
node->InitializeLongReturnType();
}
#endif // !defined(TARGET_64BIT)
return node;
}
GenTreeLclVar* Compiler::gtNewLclvNode(unsigned lnum, var_types type DEBUGARG(IL_OFFSET offs))
{
assert(type != TYP_VOID);
// We need to ensure that all struct values are normalized.
// It might be nice to assert this in general, but we have assignments of int to long.
if (varTypeIsStruct(type))
{
// Make an exception for implicit by-ref parameters during global morph, since
// their lvType has been updated to byref but their appearances have not yet all
// been rewritten and so may have struct type still.
LclVarDsc* varDsc = lvaGetDesc(lnum);
bool simd12ToSimd16Widening = false;
#if FEATURE_SIMD
// We can additionally have a SIMD12 that was widened to a SIMD16, generally as part of lowering
simd12ToSimd16Widening = (type == TYP_SIMD16) && (varDsc->lvType == TYP_SIMD12);
#endif
assert((type == varDsc->lvType) || simd12ToSimd16Widening ||
(lvaIsImplicitByRefLocal(lnum) && fgGlobalMorph && (varDsc->lvType == TYP_BYREF)));
}
GenTreeLclVar* node = new (this, GT_LCL_VAR) GenTreeLclVar(GT_LCL_VAR, type, lnum DEBUGARG(offs));
/* Cannot have this assert because the inliner uses this function
* to add temporaries */
// assert(lnum < lvaCount);
return node;
}
GenTreeLclVar* Compiler::gtNewLclLNode(unsigned lnum, var_types type DEBUGARG(IL_OFFSET offs))
{
// We need to ensure that all struct values are normalized.
// It might be nice to assert this in general, but we have assignments of int to long.
if (varTypeIsStruct(type))
{
// Make an exception for implicit by-ref parameters during global morph, since
// their lvType has been updated to byref but their appearances have not yet all
// been rewritten and so may have struct type still.
assert(type == lvaTable[lnum].lvType ||
(lvaIsImplicitByRefLocal(lnum) && fgGlobalMorph && (lvaTable[lnum].lvType == TYP_BYREF)));
}
// This local variable node may later get transformed into a large node
assert(GenTree::s_gtNodeSizes[LargeOpOpcode()] > GenTree::s_gtNodeSizes[GT_LCL_VAR]);
GenTreeLclVar* node =
new (this, LargeOpOpcode()) GenTreeLclVar(GT_LCL_VAR, type, lnum DEBUGARG(offs) DEBUGARG(/*largeNode*/ true));
return node;
}
GenTreeLclVar* Compiler::gtNewLclVarAddrNode(unsigned lclNum, var_types type)
{
GenTreeLclVar* node = new (this, GT_LCL_VAR_ADDR) GenTreeLclVar(GT_LCL_VAR_ADDR, type, lclNum);
return node;
}
GenTreeLclFld* Compiler::gtNewLclFldAddrNode(unsigned lclNum, unsigned lclOffs, FieldSeqNode* fieldSeq, var_types type)
{
GenTreeLclFld* node = new (this, GT_LCL_FLD_ADDR) GenTreeLclFld(GT_LCL_FLD_ADDR, type, lclNum, lclOffs);
node->SetFieldSeq(fieldSeq == nullptr ? FieldSeqStore::NotAField() : fieldSeq);
return node;
}
GenTreeLclFld* Compiler::gtNewLclFldNode(unsigned lnum, var_types type, unsigned offset)
{
GenTreeLclFld* node = new (this, GT_LCL_FLD) GenTreeLclFld(GT_LCL_FLD, type, lnum, offset);
/* Cannot have this assert because the inliner uses this function
* to add temporaries */
// assert(lnum < lvaCount);
node->SetFieldSeq(FieldSeqStore::NotAField());
return node;
}
GenTree* Compiler::gtNewInlineCandidateReturnExpr(GenTree* inlineCandidate, var_types type, BasicBlockFlags bbFlags)
{
assert(GenTree::s_gtNodeSizes[GT_RET_EXPR] == TREE_NODE_SZ_LARGE);
GenTreeRetExpr* node = new (this, GT_RET_EXPR) GenTreeRetExpr(type);
node->gtInlineCandidate = inlineCandidate;
node->bbFlags = bbFlags;
if (varTypeIsStruct(inlineCandidate) && !inlineCandidate->OperIsBlkOp())
{
node->gtRetClsHnd = gtGetStructHandle(inlineCandidate);
}
// GT_RET_EXPR node eventually might be bashed back to GT_CALL (when inlining is aborted for example).
// Therefore it should carry the GTF_CALL flag so that all the rules about spilling can apply to it as well.
// For example, impImportLeave or CEE_POP need to spill GT_RET_EXPR before empty the evaluation stack.
node->gtFlags |= GTF_CALL;
return node;
}
GenTreeCall::Use* Compiler::gtPrependNewCallArg(GenTree* node, GenTreeCall::Use* args)
{
return new (this, CMK_ASTNode) GenTreeCall::Use(node, args);
}
GenTreeCall::Use* Compiler::gtInsertNewCallArgAfter(GenTree* node, GenTreeCall::Use* after)
{
after->SetNext(new (this, CMK_ASTNode) GenTreeCall::Use(node, after->GetNext()));
return after->GetNext();
}
GenTreeCall::Use* Compiler::gtNewCallArgs(GenTree* node)
{
return new (this, CMK_ASTNode) GenTreeCall::Use(node);
}
GenTreeCall::Use* Compiler::gtNewCallArgs(GenTree* node1, GenTree* node2)
{
return new (this, CMK_ASTNode) GenTreeCall::Use(node1, gtNewCallArgs(node2));
}
GenTreeCall::Use* Compiler::gtNewCallArgs(GenTree* node1, GenTree* node2, GenTree* node3)
{
return new (this, CMK_ASTNode) GenTreeCall::Use(node1, gtNewCallArgs(node2, node3));
}
GenTreeCall::Use* Compiler::gtNewCallArgs(GenTree* node1, GenTree* node2, GenTree* node3, GenTree* node4)
{
return new (this, CMK_ASTNode) GenTreeCall::Use(node1, gtNewCallArgs(node2, node3, node4));
}
/*****************************************************************************
*
* Given a GT_CALL node, access the fgArgInfo and find the entry
* that has the matching argNum and return the fgArgTableEntryPtr
*/
fgArgTabEntry* Compiler::gtArgEntryByArgNum(GenTreeCall* call, unsigned argNum)
{
fgArgInfo* argInfo = call->fgArgInfo;
noway_assert(argInfo != nullptr);
return argInfo->GetArgEntry(argNum);
}
/*****************************************************************************
*
* Given a GT_CALL node, access the fgArgInfo and find the entry
* that has the matching node and return the fgArgTableEntryPtr
*/
fgArgTabEntry* Compiler::gtArgEntryByNode(GenTreeCall* call, GenTree* node)
{
fgArgInfo* argInfo = call->fgArgInfo;
noway_assert(argInfo != nullptr);
unsigned argCount = argInfo->ArgCount();
fgArgTabEntry** argTable = argInfo->ArgTable();
fgArgTabEntry* curArgTabEntry = nullptr;
for (unsigned i = 0; i < argCount; i++)
{
curArgTabEntry = argTable[i];
if (curArgTabEntry->GetNode() == node)
{
return curArgTabEntry;
}
else if (curArgTabEntry->use->GetNode() == node)
{
return curArgTabEntry;
}
}
noway_assert(!"gtArgEntryByNode: node not found");
return nullptr;
}
/*****************************************************************************
*
* Find and return the entry with the given "lateArgInx". Requires that one is found
* (asserts this).
*/
fgArgTabEntry* Compiler::gtArgEntryByLateArgIndex(GenTreeCall* call, unsigned lateArgInx)
{
fgArgInfo* argInfo = call->fgArgInfo;
noway_assert(argInfo != nullptr);
assert(lateArgInx != UINT_MAX);
unsigned argCount = argInfo->ArgCount();
fgArgTabEntry** argTable = argInfo->ArgTable();
fgArgTabEntry* curArgTabEntry = nullptr;
for (unsigned i = 0; i < argCount; i++)
{
curArgTabEntry = argTable[i];
if (curArgTabEntry->isLateArg() && curArgTabEntry->GetLateArgInx() == lateArgInx)
{
return curArgTabEntry;
}
}
noway_assert(!"gtArgEntryByNode: node not found");
return nullptr;
}
//------------------------------------------------------------------------
// gtArgNodeByLateArgInx: Given a call instruction, find the argument with the given
// late arg index (i.e. the given position in the gtCallLateArgs list).
// Arguments:
// call - the call node
// lateArgInx - the index into the late args list
//
// Return value:
// The late argument node.
//
GenTree* Compiler::gtArgNodeByLateArgInx(GenTreeCall* call, unsigned lateArgInx)
{
GenTree* argx = nullptr;
unsigned regIndex = 0;
for (GenTreeCall::Use& use : call->LateArgs())
{
argx = use.GetNode();
assert(!argx->IsArgPlaceHolderNode()); // No placeholder nodes are in gtCallLateArgs;
if (regIndex == lateArgInx)
{
break;
}
regIndex++;
}
noway_assert(argx != nullptr);
return argx;
}
/*****************************************************************************
*
* Create a node that will assign 'src' to 'dst'.
*/
GenTreeOp* Compiler::gtNewAssignNode(GenTree* dst, GenTree* src)
{
assert(!src->TypeIs(TYP_VOID));
/* Mark the target as being assigned */
if ((dst->gtOper == GT_LCL_VAR) || (dst->OperGet() == GT_LCL_FLD))
{
dst->gtFlags |= GTF_VAR_DEF;
if (dst->IsPartialLclFld(this))
{
// We treat these partial writes as combined uses and defs.
dst->gtFlags |= GTF_VAR_USEASG;
}
}
dst->gtFlags |= GTF_DONT_CSE;
#if defined(FEATURE_SIMD) && !defined(TARGET_X86)
// TODO-CQ: x86 Windows supports multi-reg returns but not SIMD multi-reg returns
if (varTypeIsSIMD(dst->gtType))
{
// We want to track SIMD assignments as being intrinsics since they
// are functionally SIMD `mov` instructions and are more efficient
// when we don't promote, particularly when it occurs due to inlining
SetOpLclRelatedToSIMDIntrinsic(dst);
SetOpLclRelatedToSIMDIntrinsic(src);
}
#endif // FEATURE_SIMD
/* Create the assignment node */
GenTreeOp* asg = gtNewOperNode(GT_ASG, dst->TypeGet(), dst, src)->AsOp();
/* Mark the expression as containing an assignment */
asg->gtFlags |= GTF_ASG;
return asg;
}
//------------------------------------------------------------------------
// gtNewObjNode: Creates a new Obj node.
//
// Arguments:
// structHnd - The class handle of the struct type.
// addr - The address of the struct.
//
// Return Value:
// Returns a node representing the struct value at the given address.
//
GenTreeObj* Compiler::gtNewObjNode(CORINFO_CLASS_HANDLE structHnd, GenTree* addr)
{
var_types nodeType = impNormStructType(structHnd);
assert(varTypeIsStruct(nodeType));
GenTreeObj* objNode = new (this, GT_OBJ) GenTreeObj(nodeType, addr, typGetObjLayout(structHnd));
// An Obj is not a global reference, if it is known to be a local struct.
if ((addr->gtFlags & GTF_GLOB_REF) == 0)
{
GenTreeLclVarCommon* lclNode = addr->IsLocalAddrExpr();
if (lclNode != nullptr)
{
objNode->gtFlags |= GTF_IND_NONFAULTING;
if (!lvaIsImplicitByRefLocal(lclNode->GetLclNum()))
{
objNode->gtFlags &= ~GTF_GLOB_REF;
}
}
}
return objNode;
}
//------------------------------------------------------------------------
// gtSetObjGcInfo: Set the GC info on an object node
//
// Arguments:
// objNode - The object node of interest
void Compiler::gtSetObjGcInfo(GenTreeObj* objNode)
{
assert(varTypeIsStruct(objNode->TypeGet()));
assert(objNode->TypeGet() == impNormStructType(objNode->GetLayout()->GetClassHandle()));
if (!objNode->GetLayout()->HasGCPtr())
{
objNode->SetOper(objNode->OperIs(GT_OBJ) ? GT_BLK : GT_STORE_BLK);
}
}
//------------------------------------------------------------------------
// gtNewStructVal: Return a node that represents a struct value
//
// Arguments:
// structHnd - The class for the struct
// addr - The address of the struct
//
// Return Value:
// A block, object or local node that represents the struct value pointed to by 'addr'.
GenTree* Compiler::gtNewStructVal(CORINFO_CLASS_HANDLE structHnd, GenTree* addr)
{
if (addr->gtOper == GT_ADDR)
{
GenTree* val = addr->gtGetOp1();
if (val->OperGet() == GT_LCL_VAR)
{
unsigned lclNum = addr->gtGetOp1()->AsLclVarCommon()->GetLclNum();
LclVarDsc* varDsc = &(lvaTable[lclNum]);
if (varTypeIsStruct(varDsc) && (varDsc->GetStructHnd() == structHnd) && !lvaIsImplicitByRefLocal(lclNum))
{
return addr->gtGetOp1();
}
}
}
return gtNewObjNode(structHnd, addr);
}
//------------------------------------------------------------------------
// gtNewBlockVal: Return a node that represents a possibly untyped block value
//
// Arguments:
// addr - The address of the block
// size - The size of the block
//
// Return Value:
// A block, object or local node that represents the block value pointed to by 'addr'.
GenTree* Compiler::gtNewBlockVal(GenTree* addr, unsigned size)
{
// By default we treat this as an opaque struct type with known size.
var_types blkType = TYP_STRUCT;
if (addr->gtOper == GT_ADDR)
{
GenTree* val = addr->gtGetOp1();
#if FEATURE_SIMD
if (varTypeIsSIMD(val) && (genTypeSize(val) == size))
{
blkType = val->TypeGet();
}
#endif // FEATURE_SIMD
if (varTypeIsStruct(val) && val->OperIs(GT_LCL_VAR))
{
LclVarDsc* varDsc = lvaGetDesc(val->AsLclVarCommon());
unsigned varSize = varTypeIsStruct(varDsc) ? varDsc->lvExactSize : genTypeSize(varDsc);
if (varSize == size)
{
return val;
}
}
}
return new (this, GT_BLK) GenTreeBlk(GT_BLK, blkType, addr, typGetBlkLayout(size));
}
// Creates a new assignment node for a CpObj.
// Parameters (exactly the same as MSIL CpObj):
//
// dstAddr - The target to copy the struct to
// srcAddr - The source to copy the struct from
// structHnd - A class token that represents the type of object being copied. May be null
// if FEATURE_SIMD is enabled and the source has a SIMD type.
// isVolatile - Is this marked as volatile memory?
GenTree* Compiler::gtNewCpObjNode(GenTree* dstAddr, GenTree* srcAddr, CORINFO_CLASS_HANDLE structHnd, bool isVolatile)
{
GenTree* lhs = gtNewStructVal(structHnd, dstAddr);
GenTree* src = nullptr;
if (lhs->OperIs(GT_OBJ))
{
GenTreeObj* lhsObj = lhs->AsObj();
#if DEBUG
// Codegen for CpObj assumes that we cannot have a struct with GC pointers whose size is not a multiple
// of the register size. The EE currently does not allow this to ensure that GC pointers are aligned
// if the struct is stored in an array. Note that this restriction doesn't apply to stack-allocated objects:
// they are never stored in arrays. We should never get to this method with stack-allocated objects since they
// are never copied so we don't need to exclude them from the assert below.
// Let's assert it just to be safe.
ClassLayout* layout = lhsObj->GetLayout();
unsigned size = layout->GetSize();
assert((layout->GetGCPtrCount() == 0) || (roundUp(size, REGSIZE_BYTES) == size));
#endif
gtSetObjGcInfo(lhsObj);
}
if (srcAddr->OperGet() == GT_ADDR)
{
src = srcAddr->AsOp()->gtOp1;
}
else
{
src = gtNewOperNode(GT_IND, lhs->TypeGet(), srcAddr);
}
GenTree* result = gtNewBlkOpNode(lhs, src, isVolatile, true);
return result;
}
//------------------------------------------------------------------------
// FixupInitBlkValue: Fixup the init value for an initBlk operation
//
// Arguments:
// asgType - The type of assignment that the initBlk is being transformed into
//
// Return Value:
// Modifies the constant value on this node to be the appropriate "fill"
// value for the initblk.
//
// Notes:
// The initBlk MSIL instruction takes a byte value, which must be
// extended to the size of the assignment when an initBlk is transformed
// to an assignment of a primitive type.
// This performs the appropriate extension.
void GenTreeIntCon::FixupInitBlkValue(var_types asgType)
{
assert(varTypeIsIntegralOrI(asgType));
unsigned size = genTypeSize(asgType);
if (size > 1)
{
size_t cns = gtIconVal;
cns = cns & 0xFF;
cns |= cns << 8;
if (size >= 4)
{
cns |= cns << 16;
#ifdef TARGET_64BIT
if (size == 8)
{
cns |= cns << 32;
}
#endif // TARGET_64BIT
// Make the type match for evaluation types.
gtType = asgType;
// if we are initializing a GC type the value being assigned must be zero (null).
assert(!varTypeIsGC(asgType) || (cns == 0));
}
gtIconVal = cns;
}
}
//----------------------------------------------------------------------------
// UsesDivideByConstOptimized:
// returns true if rationalize will use the division by constant
// optimization for this node.
//
// Arguments:
// this - a GenTreeOp node
// comp - the compiler instance
//
// Return Value:
// Return true iff the node is a GT_DIV,GT_UDIV, GT_MOD or GT_UMOD with
// an integer constant and we can perform the division operation using
// a reciprocal multiply or a shift operation.
//
bool GenTreeOp::UsesDivideByConstOptimized(Compiler* comp)
{
if (!comp->opts.OptimizationEnabled())
{
return false;
}
if (!OperIs(GT_DIV, GT_MOD, GT_UDIV, GT_UMOD))
{
return false;
}
#if defined(TARGET_ARM64)
if (OperIs(GT_MOD, GT_UMOD))
{
// MOD, UMOD not supported for ARM64
return false;
}
#endif // TARGET_ARM64
bool isSignedDivide = OperIs(GT_DIV, GT_MOD);
GenTree* dividend = gtGetOp1()->gtEffectiveVal(/*commaOnly*/ true);
GenTree* divisor = gtGetOp2()->gtEffectiveVal(/*commaOnly*/ true);
#if !defined(TARGET_64BIT)
if (dividend->OperIs(GT_LONG))
{
return false;
}
#endif
if (dividend->IsCnsIntOrI())
{
// We shouldn't see a divmod with constant operands here but if we do then it's likely
// because optimizations are disabled or it's a case that's supposed to throw an exception.
// Don't optimize this.
return false;
}
ssize_t divisorValue;
if (divisor->IsCnsIntOrI())
{
divisorValue = static_cast<ssize_t>(divisor->AsIntCon()->IconValue());
}
else
{
ValueNum vn = divisor->gtVNPair.GetLiberal();
if (comp->vnStore->IsVNConstant(vn))
{
divisorValue = comp->vnStore->CoercedConstantValue<ssize_t>(vn);
}
else
{
return false;
}
}
const var_types divType = TypeGet();
if (divisorValue == 0)
{
// x / 0 and x % 0 can't be optimized because they are required to throw an exception.
return false;
}
else if (isSignedDivide)
{
if (divisorValue == -1)
{
// x / -1 can't be optimized because INT_MIN / -1 is required to throw an exception.
return false;
}
else if (isPow2(divisorValue))
{
return true;
}
}
else // unsigned divide
{
if (divType == TYP_INT)
{
// Clear up the upper 32 bits of the value, they may be set to 1 because constants
// are treated as signed and stored in ssize_t which is 64 bit in size on 64 bit targets.
divisorValue &= UINT32_MAX;
}
size_t unsignedDivisorValue = (size_t)divisorValue;
if (isPow2(unsignedDivisorValue))
{
return true;
}
}
const bool isDiv = OperIs(GT_DIV, GT_UDIV);
if (isDiv)
{
if (isSignedDivide)
{
// If the divisor is the minimum representable integer value then the result is either 0 or 1
if ((divType == TYP_INT && divisorValue == INT_MIN) || (divType == TYP_LONG && divisorValue == INT64_MIN))
{
return true;
}
}
else
{
// If the divisor is greater or equal than 2^(N - 1) then the result is either 0 or 1
if (((divType == TYP_INT) && ((UINT32)divisorValue > (UINT32_MAX / 2))) ||
((divType == TYP_LONG) && ((UINT64)divisorValue > (UINT64_MAX / 2))))
{
return true;
}
}
}
// TODO-ARM-CQ: Currently there's no GT_MULHI for ARM32
#if defined(TARGET_XARCH) || defined(TARGET_ARM64)
if (!comp->opts.MinOpts() && ((divisorValue >= 3) || !isSignedDivide))
{
// All checks pass we can perform the division operation using a reciprocal multiply.
return true;
}
#endif
return false;
}
//------------------------------------------------------------------------
// CheckDivideByConstOptimized:
// Checks if we can use the division by constant optimization
// on this node
// and if so sets the flag GTF_DIV_BY_CNS_OPT and
// set GTF_DONT_CSE on the constant node
//
// Arguments:
// this - a GenTreeOp node
// comp - the compiler instance
//
void GenTreeOp::CheckDivideByConstOptimized(Compiler* comp)
{
if (UsesDivideByConstOptimized(comp))
{
gtFlags |= GTF_DIV_BY_CNS_OPT;
// Now set DONT_CSE on the GT_CNS_INT divisor, note that
// with ValueNumbering we can have a non GT_CNS_INT divisior
GenTree* divisor = gtGetOp2()->gtEffectiveVal(/*commaOnly*/ true);
if (divisor->OperIs(GT_CNS_INT))
{
divisor->gtFlags |= GTF_DONT_CSE;
}
}
}
//
//------------------------------------------------------------------------
// gtBlockOpInit: Initializes a BlkOp GenTree
//
// Arguments:
// result - an assignment node that is to be initialized.
// dst - the target (destination) we want to either initialize or copy to.
// src - the init value for InitBlk or the source struct for CpBlk/CpObj.
// isVolatile - specifies whether this node is a volatile memory operation.
//
// Assumptions:
// 'result' is an assignment that is newly constructed.
// If 'dst' is TYP_STRUCT, then it must be a block node or lclVar.
//
// Notes:
// This procedure centralizes all the logic to both enforce proper structure and
// to properly construct any InitBlk/CpBlk node.
void Compiler::gtBlockOpInit(GenTree* result, GenTree* dst, GenTree* srcOrFillVal, bool isVolatile)
{
if (!result->OperIsBlkOp())
{
assert(dst->TypeGet() != TYP_STRUCT);
return;
}
/* In the case of CpBlk, we want to avoid generating
* nodes where the source and destination are the same
* because of two reasons, first, is useless, second
* it introduces issues in liveness and also copying
* memory from an overlapping memory location is
* undefined both as per the ECMA standard and also
* the memcpy semantics specify that.
*
* NOTE: In this case we'll only detect the case for addr of a local
* and a local itself, any other complex expressions won't be
* caught.
*
* TODO-Cleanup: though having this logic is goodness (i.e. avoids self-assignment
* of struct vars very early), it was added because fgInterBlockLocalVarLiveness()
* isn't handling self-assignment of struct variables correctly. This issue may not
* surface if struct promotion is ON (which is the case on x86/arm). But still the
* fundamental issue exists that needs to be addressed.
*/
if (result->OperIsCopyBlkOp())
{
GenTree* currSrc = srcOrFillVal;
GenTree* currDst = dst;
if (currSrc->OperIsBlk() && (currSrc->AsBlk()->Addr()->OperGet() == GT_ADDR))
{
currSrc = currSrc->AsBlk()->Addr()->gtGetOp1();
}
if (currDst->OperIsBlk() && (currDst->AsBlk()->Addr()->OperGet() == GT_ADDR))
{
currDst = currDst->AsBlk()->Addr()->gtGetOp1();
}
if (currSrc->OperGet() == GT_LCL_VAR && currDst->OperGet() == GT_LCL_VAR &&
currSrc->AsLclVarCommon()->GetLclNum() == currDst->AsLclVarCommon()->GetLclNum())
{
// Make this a NOP
// TODO-Cleanup: probably doesn't matter, but could do this earlier and avoid creating a GT_ASG
result->gtBashToNOP();
return;
}
}
// Propagate all effect flags from children
result->gtFlags |= dst->gtFlags & GTF_ALL_EFFECT;
result->gtFlags |= result->AsOp()->gtOp2->gtFlags & GTF_ALL_EFFECT;
result->gtFlags |= (dst->gtFlags & GTF_EXCEPT) | (srcOrFillVal->gtFlags & GTF_EXCEPT);
if (isVolatile)
{
result->gtFlags |= GTF_BLK_VOLATILE;
}
#ifdef FEATURE_SIMD
if (result->OperIsCopyBlkOp() && varTypeIsSIMD(srcOrFillVal))
{
// If the source is a GT_SIMD node of SIMD type, then the dst lclvar struct
// should be labeled as simd intrinsic related struct.
// This is done so that the morpher can transform any field accesses into
// intrinsics, thus avoiding conflicting access methods (fields vs. whole-register).
GenTree* src = srcOrFillVal;
if (src->OperIsIndir() && (src->AsIndir()->Addr()->OperGet() == GT_ADDR))
{
src = src->AsIndir()->Addr()->gtGetOp1();
}
#ifdef FEATURE_HW_INTRINSICS
if ((src->OperGet() == GT_SIMD) || (src->OperGet() == GT_HWINTRINSIC))
#else
if (src->OperGet() == GT_SIMD)
#endif // FEATURE_HW_INTRINSICS
{
if (dst->OperIsBlk() && (dst->AsIndir()->Addr()->OperGet() == GT_ADDR))
{
dst = dst->AsIndir()->Addr()->gtGetOp1();
}
if (dst->OperIsLocal() && varTypeIsStruct(dst))
{
setLclRelatedToSIMDIntrinsic(dst);
}
}
}
#endif // FEATURE_SIMD
}
//------------------------------------------------------------------------
// gtNewBlkOpNode: Creates a GenTree for a block (struct) assignment.
//
// Arguments:
// dst - The destination node: local var / block node.
// srcOrFillVall - The value to assign for CopyBlk, the integer "fill" for InitBlk
// isVolatile - Whether this is a volatile memory operation or not.
// isCopyBlock - True if this is a block copy (rather than a block init).
//
// Return Value:
// Returns the newly constructed and initialized block operation.
//
GenTree* Compiler::gtNewBlkOpNode(GenTree* dst, GenTree* srcOrFillVal, bool isVolatile, bool isCopyBlock)
{
assert(dst->OperIsBlk() || dst->OperIsLocal());
if (!isCopyBlock) // InitBlk
{
assert(varTypeIsIntegral(srcOrFillVal));
if (varTypeIsStruct(dst))
{
if (!srcOrFillVal->IsIntegralConst(0))
{
srcOrFillVal = gtNewOperNode(GT_INIT_VAL, TYP_INT, srcOrFillVal);
}
}
}
GenTree* result = gtNewAssignNode(dst, srcOrFillVal);
gtBlockOpInit(result, dst, srcOrFillVal, isVolatile);
return result;
}
//------------------------------------------------------------------------
// gtNewPutArgReg: Creates a new PutArgReg node.
//
// Arguments:
// type - The actual type of the argument
// arg - The argument node
// argReg - The register that the argument will be passed in
//
// Return Value:
// Returns the newly created PutArgReg node.
//
// Notes:
// The node is generated as GenTreeMultiRegOp on RyuJIT/armel, GenTreeOp on all the other archs.
//
GenTree* Compiler::gtNewPutArgReg(var_types type, GenTree* arg, regNumber argReg)
{
assert(arg != nullptr);
GenTree* node = nullptr;
#if defined(TARGET_ARM)
// A PUTARG_REG could be a MultiRegOp on arm since we could move a double register to two int registers.
node = new (this, GT_PUTARG_REG) GenTreeMultiRegOp(GT_PUTARG_REG, type, arg, nullptr);
if (type == TYP_LONG)
{
node->AsMultiRegOp()->gtOtherReg = REG_NEXT(argReg);
}
#else
node = gtNewOperNode(GT_PUTARG_REG, type, arg);
#endif
node->SetRegNum(argReg);
return node;
}
//------------------------------------------------------------------------
// gtNewBitCastNode: Creates a new BitCast node.
//
// Arguments:
// type - The actual type of the argument
// arg - The argument node
// argReg - The register that the argument will be passed in
//
// Return Value:
// Returns the newly created BitCast node.
//
// Notes:
// The node is generated as GenTreeMultiRegOp on RyuJIT/arm, as GenTreeOp on all the other archs.
//
GenTree* Compiler::gtNewBitCastNode(var_types type, GenTree* arg)
{
assert(arg != nullptr);
assert(type != TYP_STRUCT);
GenTree* node = nullptr;
#if defined(TARGET_ARM)
// A BITCAST could be a MultiRegOp on arm since we could move a double register to two int registers.
node = new (this, GT_BITCAST) GenTreeMultiRegOp(GT_BITCAST, type, arg, nullptr);
#else
node = gtNewOperNode(GT_BITCAST, type, arg);
#endif
return node;
}
//------------------------------------------------------------------------
// gtNewAllocObjNode: Helper to create an object allocation node.
//
// Arguments:
// pResolvedToken - Resolved token for the object being allocated
// useParent - true iff the token represents a child of the object's class
//
// Return Value:
// Returns GT_ALLOCOBJ node that will be later morphed into an
// allocation helper call or local variable allocation on the stack.
//
// Node creation can fail for inlinees when the type described by pResolvedToken
// can't be represented in jitted code. If this happens, this method will return
// nullptr.
//
GenTreeAllocObj* Compiler::gtNewAllocObjNode(CORINFO_RESOLVED_TOKEN* pResolvedToken, bool useParent)
{
const bool mustRestoreHandle = true;
bool* const pRuntimeLookup = nullptr;
bool usingReadyToRunHelper = false;
CorInfoHelpFunc helper = CORINFO_HELP_UNDEF;
GenTree* opHandle = impTokenToHandle(pResolvedToken, pRuntimeLookup, mustRestoreHandle, useParent);
#ifdef FEATURE_READYTORUN
CORINFO_CONST_LOOKUP lookup = {};
if (opts.IsReadyToRun())
{
helper = CORINFO_HELP_READYTORUN_NEW;
CORINFO_LOOKUP_KIND* const pGenericLookupKind = nullptr;
usingReadyToRunHelper =
info.compCompHnd->getReadyToRunHelper(pResolvedToken, pGenericLookupKind, helper, &lookup);
}
#endif
if (!usingReadyToRunHelper)
{
if (opHandle == nullptr)
{
// We must be backing out of an inline.
assert(compDonotInline());
return nullptr;
}
}
bool helperHasSideEffects;
CorInfoHelpFunc helperTemp =
info.compCompHnd->getNewHelper(pResolvedToken, info.compMethodHnd, &helperHasSideEffects);
if (!usingReadyToRunHelper)
{
helper = helperTemp;
}
// TODO: ReadyToRun: When generic dictionary lookups are necessary, replace the lookup call
// and the newfast call with a single call to a dynamic R2R cell that will:
// 1) Load the context
// 2) Perform the generic dictionary lookup and caching, and generate the appropriate stub
// 3) Allocate and return the new object for boxing
// Reason: performance (today, we'll always use the slow helper for the R2R generics case)
GenTreeAllocObj* allocObj =
gtNewAllocObjNode(helper, helperHasSideEffects, pResolvedToken->hClass, TYP_REF, opHandle);
#ifdef FEATURE_READYTORUN
if (usingReadyToRunHelper)
{
assert(lookup.addr != nullptr);
allocObj->gtEntryPoint = lookup;
}
#endif
return allocObj;
}
/*****************************************************************************
*
* Clones the given tree value and returns a copy of the given tree.
* If 'complexOK' is false, the cloning is only done provided the tree
* is not too complex (whatever that may mean);
* If 'complexOK' is true, we try slightly harder to clone the tree.
* In either case, NULL is returned if the tree cannot be cloned
*
* Note that there is the function gtCloneExpr() which does a more
* complete job if you can't handle this function failing.
*/
GenTree* Compiler::gtClone(GenTree* tree, bool complexOK)
{
GenTree* copy;
switch (tree->gtOper)
{
case GT_CNS_INT:
#if defined(LATE_DISASM)
if (tree->IsIconHandle())
{
copy = gtNewIconHandleNode(tree->AsIntCon()->gtIconVal, tree->gtFlags, tree->AsIntCon()->gtFieldSeq);
copy->AsIntCon()->gtCompileTimeHandle = tree->AsIntCon()->gtCompileTimeHandle;
copy->gtType = tree->gtType;
}
else
#endif
{
copy = new (this, GT_CNS_INT)
GenTreeIntCon(tree->gtType, tree->AsIntCon()->gtIconVal, tree->AsIntCon()->gtFieldSeq);
copy->AsIntCon()->gtCompileTimeHandle = tree->AsIntCon()->gtCompileTimeHandle;
}
break;
case GT_CNS_LNG:
copy = gtNewLconNode(tree->AsLngCon()->gtLconVal);
break;
case GT_LCL_VAR:
// Remember that the LclVar node has been cloned. The flag will be set
// on 'copy' as well.
tree->gtFlags |= GTF_VAR_CLONED;
copy = gtNewLclvNode(tree->AsLclVarCommon()->GetLclNum(),
tree->gtType DEBUGARG(tree->AsLclVar()->gtLclILoffs));
break;
case GT_LCL_FLD:
case GT_LCL_FLD_ADDR:
// Remember that the LclVar node has been cloned. The flag will be set
// on 'copy' as well.
tree->gtFlags |= GTF_VAR_CLONED;
copy = new (this, tree->OperGet())
GenTreeLclFld(tree->OperGet(), tree->TypeGet(), tree->AsLclFld()->GetLclNum(),
tree->AsLclFld()->GetLclOffs());
copy->AsLclFld()->SetFieldSeq(tree->AsLclFld()->GetFieldSeq());
break;
case GT_CLS_VAR:
copy = new (this, GT_CLS_VAR)
GenTreeClsVar(tree->gtType, tree->AsClsVar()->gtClsVarHnd, tree->AsClsVar()->gtFieldSeq);
break;
default:
if (!complexOK)
{
return nullptr;
}
if (tree->gtOper == GT_FIELD)
{
GenTree* objp = nullptr;
if (tree->AsField()->GetFldObj() != nullptr)
{
objp = gtClone(tree->AsField()->GetFldObj(), false);
if (objp == nullptr)
{
return nullptr;
}
}
copy = gtNewFieldRef(tree->TypeGet(), tree->AsField()->gtFldHnd, objp, tree->AsField()->gtFldOffset);
copy->AsField()->gtFldMayOverlap = tree->AsField()->gtFldMayOverlap;
#ifdef FEATURE_READYTORUN
copy->AsField()->gtFieldLookup = tree->AsField()->gtFieldLookup;
#endif
}
else if (tree->OperIs(GT_ADD, GT_SUB))
{
GenTree* op1 = tree->AsOp()->gtOp1;
GenTree* op2 = tree->AsOp()->gtOp2;
if (op1->OperIsLeaf() && op2->OperIsLeaf())
{
op1 = gtClone(op1);
if (op1 == nullptr)
{
return nullptr;
}
op2 = gtClone(op2);
if (op2 == nullptr)
{
return nullptr;
}
copy = gtNewOperNode(tree->OperGet(), tree->TypeGet(), op1, op2);
}
else
{
return nullptr;
}
}
else if (tree->gtOper == GT_ADDR)
{
GenTree* op1 = gtClone(tree->AsOp()->gtOp1);
if (op1 == nullptr)
{
return nullptr;
}
copy = gtNewOperNode(GT_ADDR, tree->TypeGet(), op1);
}
else
{
return nullptr;
}
break;
}
copy->gtFlags |= tree->gtFlags & ~GTF_NODE_MASK;
#if defined(DEBUG)
copy->gtDebugFlags |= tree->gtDebugFlags & ~GTF_DEBUG_NODE_MASK;
#endif // defined(DEBUG)
return copy;
}
//------------------------------------------------------------------------
// gtCloneExpr: Create a copy of `tree`, adding flags `addFlags`, mapping
// local `varNum` to int constant `varVal` if it appears at
// the root, and mapping uses of local `deepVarNum` to constant
// `deepVarVal` if they occur beyond the root.
//
// Arguments:
// tree - GenTree to create a copy of
// addFlags - GTF_* flags to add to the copied tree nodes
// varNum - lclNum to replace at the root, or ~0 for no root replacement
// varVal - If replacing at root, replace local `varNum` with IntCns `varVal`
// deepVarNum - lclNum to replace uses of beyond the root, or ~0 for no replacement
// deepVarVal - If replacing beyond root, replace `deepVarNum` with IntCns `deepVarVal`
//
// Return Value:
// A copy of the given tree with the replacements and added flags specified.
//
// Notes:
// Top-level callers should generally call the overload that doesn't have
// the explicit `deepVarNum` and `deepVarVal` parameters; those are used in
// recursive invocations to avoid replacing defs.
GenTree* Compiler::gtCloneExpr(
GenTree* tree, GenTreeFlags addFlags, unsigned varNum, int varVal, unsigned deepVarNum, int deepVarVal)
{
if (tree == nullptr)
{
return nullptr;
}
/* Figure out what kind of a node we have */
genTreeOps oper = tree->OperGet();
unsigned kind = tree->OperKind();
GenTree* copy;
/* Is this a leaf node? */
if (kind & GTK_LEAF)
{
switch (oper)
{
case GT_CNS_INT:
#if defined(LATE_DISASM)
if (tree->IsIconHandle())
{
copy =
gtNewIconHandleNode(tree->AsIntCon()->gtIconVal, tree->gtFlags, tree->AsIntCon()->gtFieldSeq);
copy->AsIntCon()->gtCompileTimeHandle = tree->AsIntCon()->gtCompileTimeHandle;
copy->gtType = tree->gtType;
}
else
#endif
{
copy = gtNewIconNode(tree->AsIntCon()->gtIconVal, tree->gtType);
#ifdef DEBUG
copy->AsIntCon()->gtTargetHandle = tree->AsIntCon()->gtTargetHandle;
#endif
copy->AsIntCon()->gtCompileTimeHandle = tree->AsIntCon()->gtCompileTimeHandle;
copy->AsIntCon()->gtFieldSeq = tree->AsIntCon()->gtFieldSeq;
}
goto DONE;
case GT_CNS_LNG:
copy = gtNewLconNode(tree->AsLngCon()->gtLconVal);
goto DONE;
case GT_CNS_DBL:
copy = gtNewDconNode(tree->AsDblCon()->gtDconVal);
copy->gtType = tree->gtType; // keep the same type
goto DONE;
case GT_CNS_STR:
copy = gtNewSconNode(tree->AsStrCon()->gtSconCPX, tree->AsStrCon()->gtScpHnd);
goto DONE;
case GT_LCL_VAR:
if (tree->AsLclVarCommon()->GetLclNum() == varNum)
{
copy = gtNewIconNode(varVal, tree->gtType);
if (tree->gtFlags & GTF_VAR_ARR_INDEX)
{
copy->LabelIndex(this);
}
}
else
{
// Remember that the LclVar node has been cloned. The flag will
// be set on 'copy' as well.
tree->gtFlags |= GTF_VAR_CLONED;
copy = gtNewLclvNode(tree->AsLclVar()->GetLclNum(),
tree->gtType DEBUGARG(tree->AsLclVar()->gtLclILoffs));
copy->AsLclVarCommon()->SetSsaNum(tree->AsLclVarCommon()->GetSsaNum());
}
goto DONE;
case GT_LCL_FLD:
if (tree->AsLclFld()->GetLclNum() == varNum)
{
IMPL_LIMITATION("replacing GT_LCL_FLD with a constant");
}
else
{
// Remember that the LclVar node has been cloned. The flag will
// be set on 'copy' as well.
tree->gtFlags |= GTF_VAR_CLONED;
copy =
new (this, GT_LCL_FLD) GenTreeLclFld(GT_LCL_FLD, tree->TypeGet(), tree->AsLclFld()->GetLclNum(),
tree->AsLclFld()->GetLclOffs());
copy->AsLclFld()->SetFieldSeq(tree->AsLclFld()->GetFieldSeq());
copy->gtFlags = tree->gtFlags;
}
goto DONE;
case GT_CLS_VAR:
copy = new (this, GT_CLS_VAR)
GenTreeClsVar(tree->TypeGet(), tree->AsClsVar()->gtClsVarHnd, tree->AsClsVar()->gtFieldSeq);
goto DONE;
case GT_RET_EXPR:
// GT_RET_EXPR is unique node, that contains a link to a gtInlineCandidate node,
// that is part of another statement. We cannot clone both here and cannot
// create another GT_RET_EXPR that points to the same gtInlineCandidate.
NO_WAY("Cloning of GT_RET_EXPR node not supported");
goto DONE;
case GT_MEMORYBARRIER:
copy = new (this, GT_MEMORYBARRIER) GenTree(GT_MEMORYBARRIER, TYP_VOID);
goto DONE;
case GT_ARGPLACE:
copy = gtNewArgPlaceHolderNode(tree->gtType, tree->AsArgPlace()->gtArgPlaceClsHnd);
goto DONE;
case GT_FTN_ADDR:
copy = new (this, oper) GenTreeFptrVal(tree->gtType, tree->AsFptrVal()->gtFptrMethod);
#ifdef FEATURE_READYTORUN
copy->AsFptrVal()->gtEntryPoint = tree->AsFptrVal()->gtEntryPoint;
#endif
goto DONE;
case GT_CATCH_ARG:
case GT_NO_OP:
case GT_LABEL:
copy = new (this, oper) GenTree(oper, tree->gtType);
goto DONE;
#if !defined(FEATURE_EH_FUNCLETS)
case GT_END_LFIN:
#endif // !FEATURE_EH_FUNCLETS
case GT_JMP:
copy = new (this, oper) GenTreeVal(oper, tree->gtType, tree->AsVal()->gtVal1);
goto DONE;
case GT_LCL_VAR_ADDR:
copy = new (this, oper) GenTreeLclVar(oper, tree->TypeGet(), tree->AsLclVar()->GetLclNum());
goto DONE;
case GT_LCL_FLD_ADDR:
copy = new (this, oper)
GenTreeLclFld(oper, tree->TypeGet(), tree->AsLclFld()->GetLclNum(), tree->AsLclFld()->GetLclOffs());
copy->AsLclFld()->SetFieldSeq(tree->AsLclFld()->GetFieldSeq());
goto DONE;
default:
NO_WAY("Cloning of node not supported");
goto DONE;
}
}
/* Is it a 'simple' unary/binary operator? */
if (kind & GTK_SMPOP)
{
/* If necessary, make sure we allocate a "fat" tree node */
CLANG_FORMAT_COMMENT_ANCHOR;
switch (oper)
{
/* These nodes sometimes get bashed to "fat" ones */
case GT_MUL:
case GT_DIV:
case GT_MOD:
case GT_UDIV:
case GT_UMOD:
// In the implementation of gtNewLargeOperNode you have
// to give an oper that will create a small node,
// otherwise it asserts.
//
if (GenTree::s_gtNodeSizes[oper] == TREE_NODE_SZ_SMALL)
{
copy = gtNewLargeOperNode(oper, tree->TypeGet(), tree->AsOp()->gtOp1,
tree->OperIsBinary() ? tree->AsOp()->gtOp2 : nullptr);
}
else // Always a large tree
{
if (tree->OperIsBinary())
{
copy = gtNewOperNode(oper, tree->TypeGet(), tree->AsOp()->gtOp1, tree->AsOp()->gtOp2);
}
else
{
copy = gtNewOperNode(oper, tree->TypeGet(), tree->AsOp()->gtOp1);
}
}
break;
case GT_CAST:
copy = new (this, LargeOpOpcode())
GenTreeCast(tree->TypeGet(), tree->AsCast()->CastOp(), tree->IsUnsigned(),
tree->AsCast()->gtCastType DEBUGARG(/*largeNode*/ TRUE));
break;
case GT_INDEX:
{
GenTreeIndex* asInd = tree->AsIndex();
copy = new (this, GT_INDEX)
GenTreeIndex(asInd->TypeGet(), asInd->Arr(), asInd->Index(), asInd->gtIndElemSize);
copy->AsIndex()->gtStructElemClass = asInd->gtStructElemClass;
}
break;
case GT_INDEX_ADDR:
{
GenTreeIndexAddr* asIndAddr = tree->AsIndexAddr();
copy = new (this, GT_INDEX_ADDR)
GenTreeIndexAddr(asIndAddr->Arr(), asIndAddr->Index(), asIndAddr->gtElemType,
asIndAddr->gtStructElemClass, asIndAddr->gtElemSize, asIndAddr->gtLenOffset,
asIndAddr->gtElemOffset);
copy->AsIndexAddr()->gtIndRngFailBB = asIndAddr->gtIndRngFailBB;
}
break;
case GT_ALLOCOBJ:
{
GenTreeAllocObj* asAllocObj = tree->AsAllocObj();
copy = new (this, GT_ALLOCOBJ)
GenTreeAllocObj(tree->TypeGet(), asAllocObj->gtNewHelper, asAllocObj->gtHelperHasSideEffects,
asAllocObj->gtAllocObjClsHnd, asAllocObj->gtOp1);
#ifdef FEATURE_READYTORUN
copy->AsAllocObj()->gtEntryPoint = asAllocObj->gtEntryPoint;
#endif
}
break;
case GT_RUNTIMELOOKUP:
{
GenTreeRuntimeLookup* asRuntimeLookup = tree->AsRuntimeLookup();
copy = new (this, GT_RUNTIMELOOKUP)
GenTreeRuntimeLookup(asRuntimeLookup->gtHnd, asRuntimeLookup->gtHndType, asRuntimeLookup->gtOp1);
}
break;
case GT_ARR_LENGTH:
copy = gtNewArrLen(tree->TypeGet(), tree->AsOp()->gtOp1, tree->AsArrLen()->ArrLenOffset(), nullptr);
break;
case GT_ARR_INDEX:
copy = new (this, GT_ARR_INDEX)
GenTreeArrIndex(tree->TypeGet(),
gtCloneExpr(tree->AsArrIndex()->ArrObj(), addFlags, deepVarNum, deepVarVal),
gtCloneExpr(tree->AsArrIndex()->IndexExpr(), addFlags, deepVarNum, deepVarVal),
tree->AsArrIndex()->gtCurrDim, tree->AsArrIndex()->gtArrRank,
tree->AsArrIndex()->gtArrElemType);
break;
case GT_QMARK:
copy = new (this, GT_QMARK)
GenTreeQmark(tree->TypeGet(), tree->AsOp()->gtGetOp1(), tree->AsOp()->gtGetOp2()->AsColon());
break;
case GT_OBJ:
copy =
new (this, GT_OBJ) GenTreeObj(tree->TypeGet(), tree->AsObj()->Addr(), tree->AsObj()->GetLayout());
break;
case GT_BLK:
copy = new (this, GT_BLK)
GenTreeBlk(GT_BLK, tree->TypeGet(), tree->AsBlk()->Addr(), tree->AsBlk()->GetLayout());
break;
case GT_FIELD:
copy = new (this, GT_FIELD) GenTreeField(tree->TypeGet(), tree->AsField()->GetFldObj(),
tree->AsField()->gtFldHnd, tree->AsField()->gtFldOffset);
copy->AsField()->gtFldMayOverlap = tree->AsField()->gtFldMayOverlap;
#ifdef FEATURE_READYTORUN
copy->AsField()->gtFieldLookup = tree->AsField()->gtFieldLookup;
#endif
break;
case GT_BOX:
copy = new (this, GT_BOX)
GenTreeBox(tree->TypeGet(), tree->AsOp()->gtOp1, tree->AsBox()->gtAsgStmtWhenInlinedBoxValue,
tree->AsBox()->gtCopyStmtWhenInlinedBoxValue);
break;
case GT_INTRINSIC:
copy = new (this, GT_INTRINSIC)
GenTreeIntrinsic(tree->TypeGet(), tree->AsOp()->gtOp1, tree->AsOp()->gtOp2,
tree->AsIntrinsic()->gtIntrinsicName, tree->AsIntrinsic()->gtMethodHandle);
#ifdef FEATURE_READYTORUN
copy->AsIntrinsic()->gtEntryPoint = tree->AsIntrinsic()->gtEntryPoint;
#endif
break;
case GT_BOUNDS_CHECK:
copy = new (this, GT_BOUNDS_CHECK)
GenTreeBoundsChk(tree->AsBoundsChk()->GetIndex(), tree->AsBoundsChk()->GetArrayLength(),
tree->AsBoundsChk()->gtThrowKind);
copy->AsBoundsChk()->gtIndRngFailBB = tree->AsBoundsChk()->gtIndRngFailBB;
break;
case GT_LEA:
{
GenTreeAddrMode* addrModeOp = tree->AsAddrMode();
copy = new (this, GT_LEA)
GenTreeAddrMode(addrModeOp->TypeGet(), addrModeOp->Base(), addrModeOp->Index(), addrModeOp->gtScale,
static_cast<unsigned>(addrModeOp->Offset()));
}
break;
case GT_COPY:
case GT_RELOAD:
{
copy = new (this, oper) GenTreeCopyOrReload(oper, tree->TypeGet(), tree->gtGetOp1());
}
break;
default:
assert(!GenTree::IsExOp(tree->OperKind()) && tree->OperIsSimple());
// We're in the SimpleOp case, so it's always unary or binary.
if (GenTree::OperIsUnary(tree->OperGet()))
{
copy = gtNewOperNode(oper, tree->TypeGet(), tree->AsOp()->gtOp1, /*doSimplifications*/ false);
}
else
{
assert(GenTree::OperIsBinary(tree->OperGet()));
copy = gtNewOperNode(oper, tree->TypeGet(), tree->AsOp()->gtOp1, tree->AsOp()->gtOp2);
}
break;
}
// Some flags are conceptually part of the gtOper, and should be copied immediately.
if (tree->gtOverflowEx())
{
copy->gtFlags |= GTF_OVERFLOW;
}
if (tree->AsOp()->gtOp1)
{
if (tree->gtOper == GT_ASG)
{
// Don't replace varNum if it appears as the LHS of an assign.
copy->AsOp()->gtOp1 = gtCloneExpr(tree->AsOp()->gtOp1, addFlags, -1, 0, deepVarNum, deepVarVal);
}
else
{
copy->AsOp()->gtOp1 = gtCloneExpr(tree->AsOp()->gtOp1, addFlags, deepVarNum, deepVarVal);
}
}
if (tree->gtGetOp2IfPresent())
{
copy->AsOp()->gtOp2 = gtCloneExpr(tree->AsOp()->gtOp2, addFlags, deepVarNum, deepVarVal);
}
/* Flags */
addFlags |= tree->gtFlags;
// Copy any node annotations, if necessary.
switch (tree->gtOper)
{
case GT_STOREIND:
case GT_IND:
case GT_OBJ:
case GT_STORE_OBJ:
{
ArrayInfo arrInfo;
if (!tree->AsIndir()->gtOp1->OperIs(GT_INDEX_ADDR) && TryGetArrayInfo(tree->AsIndir(), &arrInfo))
{
GetArrayInfoMap()->Set(copy, arrInfo);
}
}
break;
default:
break;
}
#ifdef DEBUG
/* GTF_NODE_MASK should not be propagated from 'tree' to 'copy' */
addFlags &= ~GTF_NODE_MASK;
#endif
// Effects flags propagate upwards.
if (copy->AsOp()->gtOp1 != nullptr)
{
copy->gtFlags |= (copy->AsOp()->gtOp1->gtFlags & GTF_ALL_EFFECT);
}
if (copy->gtGetOp2IfPresent() != nullptr)
{
copy->gtFlags |= (copy->gtGetOp2()->gtFlags & GTF_ALL_EFFECT);
}
goto DONE;
}
/* See what kind of a special operator we have here */
switch (oper)
{
case GT_CALL:
// We can't safely clone calls that have GT_RET_EXPRs via gtCloneExpr.
// You must use gtCloneCandidateCall for these calls (and then do appropriate other fixup)
if (tree->AsCall()->IsInlineCandidate() || tree->AsCall()->IsGuardedDevirtualizationCandidate())
{
NO_WAY("Cloning of calls with associated GT_RET_EXPR nodes is not supported");
}
copy = gtCloneExprCallHelper(tree->AsCall(), addFlags, deepVarNum, deepVarVal);
break;
#ifdef FEATURE_SIMD
case GT_SIMD:
copy = new (this, GT_SIMD)
GenTreeSIMD(tree->TypeGet(), IntrinsicNodeBuilder(getAllocator(CMK_ASTNode), tree->AsSIMD()),
tree->AsSIMD()->GetSIMDIntrinsicId(), tree->AsSIMD()->GetSimdBaseJitType(),
tree->AsSIMD()->GetSimdSize());
goto CLONE_MULTIOP_OPERANDS;
#endif
#ifdef FEATURE_HW_INTRINSICS
case GT_HWINTRINSIC:
copy = new (this, GT_HWINTRINSIC)
GenTreeHWIntrinsic(tree->TypeGet(), IntrinsicNodeBuilder(getAllocator(CMK_ASTNode), tree->AsMultiOp()),
tree->AsHWIntrinsic()->GetHWIntrinsicId(),
tree->AsHWIntrinsic()->GetSimdBaseJitType(), tree->AsHWIntrinsic()->GetSimdSize(),
tree->AsHWIntrinsic()->IsSimdAsHWIntrinsic());
copy->AsHWIntrinsic()->SetAuxiliaryJitType(tree->AsHWIntrinsic()->GetAuxiliaryJitType());
goto CLONE_MULTIOP_OPERANDS;
#endif
#if defined(FEATURE_SIMD) || defined(FEATURE_HW_INTRINSICS)
CLONE_MULTIOP_OPERANDS:
for (GenTree** use : copy->AsMultiOp()->UseEdges())
{
*use = gtCloneExpr(*use, addFlags, deepVarNum, deepVarVal);
}
break;
#endif
case GT_ARR_ELEM:
{
GenTreeArrElem* arrElem = tree->AsArrElem();
GenTree* inds[GT_ARR_MAX_RANK];
for (unsigned dim = 0; dim < arrElem->gtArrRank; dim++)
{
inds[dim] = gtCloneExpr(arrElem->gtArrInds[dim], addFlags, deepVarNum, deepVarVal);
}
copy = new (this, GT_ARR_ELEM)
GenTreeArrElem(arrElem->TypeGet(), gtCloneExpr(arrElem->gtArrObj, addFlags, deepVarNum, deepVarVal),
arrElem->gtArrRank, arrElem->gtArrElemSize, arrElem->gtArrElemType, &inds[0]);
}
break;
case GT_ARR_OFFSET:
{
copy = new (this, GT_ARR_OFFSET)
GenTreeArrOffs(tree->TypeGet(),
gtCloneExpr(tree->AsArrOffs()->gtOffset, addFlags, deepVarNum, deepVarVal),
gtCloneExpr(tree->AsArrOffs()->gtIndex, addFlags, deepVarNum, deepVarVal),
gtCloneExpr(tree->AsArrOffs()->gtArrObj, addFlags, deepVarNum, deepVarVal),
tree->AsArrOffs()->gtCurrDim, tree->AsArrOffs()->gtArrRank,
tree->AsArrOffs()->gtArrElemType);
}
break;
case GT_PHI:
{
copy = new (this, GT_PHI) GenTreePhi(tree->TypeGet());
GenTreePhi::Use** prevUse = ©->AsPhi()->gtUses;
for (GenTreePhi::Use& use : tree->AsPhi()->Uses())
{
*prevUse = new (this, CMK_ASTNode)
GenTreePhi::Use(gtCloneExpr(use.GetNode(), addFlags, deepVarNum, deepVarVal), *prevUse);
prevUse = &((*prevUse)->NextRef());
}
}
break;
case GT_FIELD_LIST:
copy = new (this, GT_FIELD_LIST) GenTreeFieldList();
for (GenTreeFieldList::Use& use : tree->AsFieldList()->Uses())
{
copy->AsFieldList()->AddField(this, gtCloneExpr(use.GetNode(), addFlags, deepVarNum, deepVarVal),
use.GetOffset(), use.GetType());
}
break;
case GT_CMPXCHG:
copy = new (this, GT_CMPXCHG)
GenTreeCmpXchg(tree->TypeGet(),
gtCloneExpr(tree->AsCmpXchg()->gtOpLocation, addFlags, deepVarNum, deepVarVal),
gtCloneExpr(tree->AsCmpXchg()->gtOpValue, addFlags, deepVarNum, deepVarVal),
gtCloneExpr(tree->AsCmpXchg()->gtOpComparand, addFlags, deepVarNum, deepVarVal));
break;
case GT_STORE_DYN_BLK:
copy = new (this, oper)
GenTreeStoreDynBlk(gtCloneExpr(tree->AsStoreDynBlk()->Addr(), addFlags, deepVarNum, deepVarVal),
gtCloneExpr(tree->AsStoreDynBlk()->Data(), addFlags, deepVarNum, deepVarVal),
gtCloneExpr(tree->AsStoreDynBlk()->gtDynamicSize, addFlags, deepVarNum, deepVarVal));
break;
default:
#ifdef DEBUG
gtDispTree(tree);
#endif
NO_WAY("unexpected operator");
}
DONE:
// If it has a zero-offset field seq, copy annotation.
if (tree->TypeGet() == TYP_BYREF)
{
FieldSeqNode* fldSeq = nullptr;
if (GetZeroOffsetFieldMap()->Lookup(tree, &fldSeq))
{
fgAddFieldSeqForZeroOffset(copy, fldSeq);
}
}
copy->gtVNPair = tree->gtVNPair; // A cloned tree gets the orginal's Value number pair
/* Compute the flags for the copied node. Note that we can do this only
if we didnt gtFoldExpr(copy) */
if (copy->gtOper == oper)
{
addFlags |= tree->gtFlags;
#ifdef DEBUG
/* GTF_NODE_MASK should not be propagated from 'tree' to 'copy' */
addFlags &= ~GTF_NODE_MASK;
#endif
copy->gtFlags |= addFlags;
// Update side effect flags since they may be different from the source side effect flags.
// For example, we may have replaced some locals with constants and made indirections non-throwing.
gtUpdateNodeSideEffects(copy);
}
/* GTF_COLON_COND should be propagated from 'tree' to 'copy' */
copy->gtFlags |= (tree->gtFlags & GTF_COLON_COND);
#if defined(DEBUG)
// Non-node debug flags should be propagated from 'tree' to 'copy'
copy->gtDebugFlags |= (tree->gtDebugFlags & ~GTF_DEBUG_NODE_MASK);
#endif
/* Make sure to copy back fields that may have been initialized */
copy->CopyRawCosts(tree);
copy->gtRsvdRegs = tree->gtRsvdRegs;
copy->CopyReg(tree);
return copy;
}
//------------------------------------------------------------------------
// gtCloneExprCallHelper: clone a call tree
//
// Notes:
// Do not invoke this method directly, instead call either gtCloneExpr
// or gtCloneCandidateCall, as appropriate.
//
// Arguments:
// tree - the call to clone
// addFlags - GTF_* flags to add to the copied tree nodes
// deepVarNum - lclNum to replace uses of beyond the root, or BAD_VAR_NUM for no replacement
// deepVarVal - If replacing beyond root, replace `deepVarNum` with IntCns `deepVarVal`
//
// Returns:
// Cloned copy of call and all subtrees.
GenTreeCall* Compiler::gtCloneExprCallHelper(GenTreeCall* tree,
GenTreeFlags addFlags,
unsigned deepVarNum,
int deepVarVal)
{
GenTreeCall* copy = new (this, GT_CALL) GenTreeCall(tree->TypeGet());
if (tree->gtCallThisArg == nullptr)
{
copy->gtCallThisArg = nullptr;
}
else
{
copy->gtCallThisArg =
gtNewCallArgs(gtCloneExpr(tree->gtCallThisArg->GetNode(), addFlags, deepVarNum, deepVarVal));
}
copy->gtCallMoreFlags = tree->gtCallMoreFlags;
copy->gtCallArgs = nullptr;
copy->gtCallLateArgs = nullptr;
copy->gtRetBufArg = nullptr;
GenTreeCall::Use** argsTail = ©->gtCallArgs;
for (GenTreeCall::Use& use : tree->Args())
{
GenTree* argNode = use.GetNode();
GenTree* copyArgNode = gtCloneExpr(argNode, addFlags, deepVarNum, deepVarVal);
*argsTail = gtNewCallArgs(copyArgNode);
if (tree->gtRetBufArg == &use)
{
// Set the return buffer arg, if any.
assert(copy->gtRetBufArg == nullptr);
copy->gtRetBufArg = *argsTail;
}
argsTail = &((*argsTail)->NextRef());
}
argsTail = ©->gtCallLateArgs;
for (GenTreeCall::Use& use : tree->LateArgs())
{
GenTree* argNode = use.GetNode();
GenTree* copyArgNode = gtCloneExpr(argNode, addFlags, deepVarNum, deepVarVal);
*argsTail = gtNewCallArgs(copyArgNode);
if (tree->gtRetBufArg == &use)
{
// Set the return buffer arg, if any.
assert(copy->gtRetBufArg == nullptr);
copy->gtRetBufArg = *argsTail;
}
argsTail = &((*argsTail)->NextRef());
}
// Either there was not return buffer for the "tree" or else we successfully set the
// return buffer in the copy.
assert((tree->gtRetBufArg == nullptr) || (copy->gtRetBufArg != nullptr));
// The call sig comes from the EE and doesn't change throughout the compilation process, meaning
// we only really need one physical copy of it. Therefore a shallow pointer copy will suffice.
// (Note that this still holds even if the tree we are cloning was created by an inlinee compiler,
// because the inlinee still uses the inliner's memory allocator anyway.)
INDEBUG(copy->callSig = tree->callSig;)
// The tail call info does not change after it is allocated, so for the same reasons as above
// a shallow copy suffices.
copy->tailCallInfo = tree->tailCallInfo;
copy->gtRetClsHnd = tree->gtRetClsHnd;
copy->gtControlExpr = gtCloneExpr(tree->gtControlExpr, addFlags, deepVarNum, deepVarVal);
copy->gtStubCallStubAddr = tree->gtStubCallStubAddr;
/* Copy the union */
if (tree->gtCallType == CT_INDIRECT)
{
copy->gtCallCookie =
tree->gtCallCookie ? gtCloneExpr(tree->gtCallCookie, addFlags, deepVarNum, deepVarVal) : nullptr;
copy->gtCallAddr = tree->gtCallAddr ? gtCloneExpr(tree->gtCallAddr, addFlags, deepVarNum, deepVarVal) : nullptr;
}
else
{
copy->gtCallMethHnd = tree->gtCallMethHnd;
copy->gtInlineCandidateInfo = tree->gtInlineCandidateInfo;
}
copy->gtCallType = tree->gtCallType;
copy->gtReturnType = tree->gtReturnType;
if (tree->fgArgInfo)
{
// Create and initialize the fgArgInfo for our copy of the call tree
copy->fgArgInfo = new (this, CMK_Unknown) fgArgInfo(copy, tree);
}
else
{
copy->fgArgInfo = nullptr;
}
#if FEATURE_MULTIREG_RET
copy->gtReturnTypeDesc = tree->gtReturnTypeDesc;
#endif
#ifdef FEATURE_READYTORUN
copy->setEntryPoint(tree->gtEntryPoint);
#endif
#if defined(DEBUG) || defined(INLINE_DATA)
copy->gtInlineObservation = tree->gtInlineObservation;
copy->gtRawILOffset = tree->gtRawILOffset;
copy->gtInlineContext = tree->gtInlineContext;
#endif
copy->CopyOtherRegFlags(tree);
// We keep track of the number of no return calls, so if we've cloned
// one of these, update the tracking.
//
if (tree->IsNoReturn())
{
assert(copy->IsNoReturn());
setMethodHasNoReturnCalls();
}
return copy;
}
//------------------------------------------------------------------------
// gtCloneCandidateCall: clone a call that is an inline or guarded
// devirtualization candidate (~ any call that can have a GT_RET_EXPR)
//
// Notes:
// If the call really is a candidate, the caller must take additional steps
// after cloning to re-establish candidate info and the relationship between
// the candidate and any associated GT_RET_EXPR.
//
// Arguments:
// call - the call to clone
//
// Returns:
// Cloned copy of call and all subtrees.
GenTreeCall* Compiler::gtCloneCandidateCall(GenTreeCall* call)
{
assert(call->IsInlineCandidate() || call->IsGuardedDevirtualizationCandidate());
GenTreeCall* result = gtCloneExprCallHelper(call);
// There is some common post-processing in gtCloneExpr that we reproduce
// here, for the fields that make sense for candidate calls.
result->gtFlags |= call->gtFlags;
#if defined(DEBUG)
result->gtDebugFlags |= (call->gtDebugFlags & ~GTF_DEBUG_NODE_MASK);
#endif
result->CopyReg(call);
return result;
}
//------------------------------------------------------------------------
// gtUpdateSideEffects: Update the side effects of a tree and its ancestors
//
// Arguments:
// stmt - The tree's statement
// tree - Tree to update the side effects for
//
// Note: If tree's order hasn't been established, the method updates side effect
// flags on all statement's nodes.
void Compiler::gtUpdateSideEffects(Statement* stmt, GenTree* tree)
{
if (fgStmtListThreaded)
{
gtUpdateTreeAncestorsSideEffects(tree);
}
else
{
gtUpdateStmtSideEffects(stmt);
}
}
//------------------------------------------------------------------------
// gtUpdateTreeAncestorsSideEffects: Update the side effects of a tree and its ancestors
// when statement order has been established.
//
// Arguments:
// tree - Tree to update the side effects for
//
void Compiler::gtUpdateTreeAncestorsSideEffects(GenTree* tree)
{
assert(fgStmtListThreaded);
while (tree != nullptr)
{
gtUpdateNodeSideEffects(tree);
tree = tree->gtGetParent(nullptr);
}
}
//------------------------------------------------------------------------
// gtUpdateStmtSideEffects: Update the side effects for statement tree nodes.
//
// Arguments:
// stmt - The statement to update side effects on
//
void Compiler::gtUpdateStmtSideEffects(Statement* stmt)
{
fgWalkTree(stmt->GetRootNodePointer(), fgUpdateSideEffectsPre, fgUpdateSideEffectsPost);
}
//------------------------------------------------------------------------
// gtUpdateNodeOperSideEffects: Update the side effects based on the node operation.
//
// Arguments:
// tree - Tree to update the side effects on
//
// Notes:
// This method currently only updates GTF_EXCEPT, GTF_ASG, and GTF_CALL flags.
// The other side effect flags may remain unnecessarily (conservatively) set.
// The caller of this method is expected to update the flags based on the children's flags.
//
void Compiler::gtUpdateNodeOperSideEffects(GenTree* tree)
{
if (tree->OperMayThrow(this))
{
tree->gtFlags |= GTF_EXCEPT;
}
else
{
tree->gtFlags &= ~GTF_EXCEPT;
if (tree->OperIsIndirOrArrLength())
{
tree->SetIndirExceptionFlags(this);
}
}
if (tree->OperRequiresAsgFlag())
{
tree->gtFlags |= GTF_ASG;
}
else
{
tree->gtFlags &= ~GTF_ASG;
}
if (tree->OperRequiresCallFlag(this))
{
tree->gtFlags |= GTF_CALL;
}
else
{
tree->gtFlags &= ~GTF_CALL;
}
}
//------------------------------------------------------------------------
// gtUpdateNodeOperSideEffectsPost: Update the side effects based on the node operation,
// in the post-order visit of a tree walk. It is expected that the pre-order visit cleared
// the bits, so the post-order visit only sets them. This is important for binary nodes
// where one child already may have set the GTF_EXCEPT bit. Note that `SetIndirExceptionFlags`
// looks at its child, which is why we need to do this in a bottom-up walk.
//
// Arguments:
// tree - Tree to update the side effects on
//
// Notes:
// This method currently only updates GTF_ASG, GTF_CALL, and GTF_EXCEPT flags.
// The other side effect flags may remain unnecessarily (conservatively) set.
//
void Compiler::gtUpdateNodeOperSideEffectsPost(GenTree* tree)
{
if (tree->OperMayThrow(this))
{
tree->gtFlags |= GTF_EXCEPT;
}
if (tree->OperRequiresAsgFlag())
{
tree->gtFlags |= GTF_ASG;
}
if (tree->OperRequiresCallFlag(this))
{
tree->gtFlags |= GTF_CALL;
}
}
//------------------------------------------------------------------------
// gtUpdateNodeSideEffects: Update the side effects based on the node operation and
// children's side efects.
//
// Arguments:
// tree - Tree to update the side effects on
//
// Notes:
// This method currently only updates GTF_EXCEPT, GTF_ASG, and GTF_CALL flags.
// The other side effect flags may remain unnecessarily (conservatively) set.
//
void Compiler::gtUpdateNodeSideEffects(GenTree* tree)
{
gtUpdateNodeOperSideEffects(tree);
tree->VisitOperands([tree](GenTree* operand) -> GenTree::VisitResult {
tree->gtFlags |= (operand->gtFlags & GTF_ALL_EFFECT);
return GenTree::VisitResult::Continue;
});
}
//------------------------------------------------------------------------
// fgUpdateSideEffectsPre: Update the side effects based on the tree operation.
// The pre-visit walk clears GTF_ASG, GTF_CALL, and GTF_EXCEPT; the post-visit walk sets
// the bits as necessary.
//
// Arguments:
// pTree - Pointer to the tree to update the side effects
// fgWalkPre - Walk data
//
Compiler::fgWalkResult Compiler::fgUpdateSideEffectsPre(GenTree** pTree, fgWalkData* fgWalkPre)
{
GenTree* tree = *pTree;
tree->gtFlags &= ~(GTF_ASG | GTF_CALL | GTF_EXCEPT);
return WALK_CONTINUE;
}
//------------------------------------------------------------------------
// fgUpdateSideEffectsPost: Update the side effects of the node and parent based on the tree's flags.
//
// Arguments:
// pTree - Pointer to the tree
// fgWalkPost - Walk data
//
// Notes:
// The routine is used for updating the stale side effect flags for ancestor
// nodes starting from treeParent up to the top-level stmt expr.
//
Compiler::fgWalkResult Compiler::fgUpdateSideEffectsPost(GenTree** pTree, fgWalkData* fgWalkPost)
{
GenTree* tree = *pTree;
// Update the node's side effects first.
fgWalkPost->compiler->gtUpdateNodeOperSideEffectsPost(tree);
// If this node is an indir or array length, and it doesn't have the GTF_EXCEPT bit set, we
// set the GTF_IND_NONFAULTING bit. This needs to be done after all children, and this node, have
// been processed.
if (tree->OperIsIndirOrArrLength() && ((tree->gtFlags & GTF_EXCEPT) == 0))
{
tree->gtFlags |= GTF_IND_NONFAULTING;
}
// Then update the parent's side effects based on this node.
GenTree* parent = fgWalkPost->parent;
if (parent != nullptr)
{
parent->gtFlags |= (tree->gtFlags & GTF_ALL_EFFECT);
}
return WALK_CONTINUE;
}
//------------------------------------------------------------------------
// gtGetThisArg: Return this pointer node for the call.
//
// Arguments:
// call - the call node with a this argument.
//
// Return value:
// the this pointer node.
//
GenTree* Compiler::gtGetThisArg(GenTreeCall* call)
{
assert(call->gtCallThisArg != nullptr);
GenTree* thisArg = call->gtCallThisArg->GetNode();
if (!thisArg->OperIs(GT_ASG))
{
if ((thisArg->gtFlags & GTF_LATE_ARG) == 0)
{
return thisArg;
}
}
assert(call->gtCallLateArgs != nullptr);
unsigned argNum = 0;
fgArgTabEntry* thisArgTabEntry = gtArgEntryByArgNum(call, argNum);
GenTree* result = thisArgTabEntry->GetNode();
// Assert if we used DEBUG_DESTROY_NODE.
assert(result->gtOper != GT_COUNT);
return result;
}
bool GenTree::gtSetFlags() const
{
//
// When FEATURE_SET_FLAGS (TARGET_ARM) is active the method returns true
// when the gtFlags has the flag GTF_SET_FLAGS set
// otherwise the architecture will be have instructions that typically set
// the flags and this method will return true.
//
// Exceptions: GT_IND (load/store) is not allowed to set the flags
// and on XARCH the GT_MUL/GT_DIV and all overflow instructions
// do not set the condition flags
//
// Precondition we have a GTK_SMPOP
//
if (!varTypeIsIntegralOrI(TypeGet()) && (TypeGet() != TYP_VOID))
{
return false;
}
if (((gtFlags & GTF_SET_FLAGS) != 0) && (gtOper != GT_IND))
{
// GTF_SET_FLAGS is not valid on GT_IND and is overlaid with GTF_NONFAULTING_IND
return true;
}
else
{
return false;
}
}
bool GenTree::gtRequestSetFlags()
{
bool result = false;
#if FEATURE_SET_FLAGS
// This method is a Nop unless FEATURE_SET_FLAGS is defined
// In order to set GTF_SET_FLAGS
// we must have a GTK_SMPOP
// and we have a integer or machine size type (not floating point or TYP_LONG on 32-bit)
//
if (!OperIsSimple())
return false;
if (!varTypeIsIntegralOrI(TypeGet()))
return false;
switch (gtOper)
{
case GT_IND:
case GT_ARR_LENGTH:
// These will turn into simple load from memory instructions
// and we can't force the setting of the flags on load from memory
break;
case GT_MUL:
case GT_DIV:
// These instructions don't set the flags (on x86/x64)
//
break;
default:
// Otherwise we can set the flags for this gtOper
// and codegen must set the condition flags.
//
gtFlags |= GTF_SET_FLAGS;
result = true;
break;
}
#endif // FEATURE_SET_FLAGS
// Codegen for this tree must set the condition flags if
// this method returns true.
//
return result;
}
GenTreeUseEdgeIterator::GenTreeUseEdgeIterator()
: m_advance(nullptr), m_node(nullptr), m_edge(nullptr), m_statePtr(nullptr), m_state(-1)
{
}
GenTreeUseEdgeIterator::GenTreeUseEdgeIterator(GenTree* node)
: m_advance(nullptr), m_node(node), m_edge(nullptr), m_statePtr(nullptr), m_state(0)
{
assert(m_node != nullptr);
// NOTE: the switch statement below must be updated when introducing new nodes.
switch (m_node->OperGet())
{
// Leaf nodes
case GT_LCL_VAR:
case GT_LCL_FLD:
case GT_LCL_VAR_ADDR:
case GT_LCL_FLD_ADDR:
case GT_CATCH_ARG:
case GT_LABEL:
case GT_FTN_ADDR:
case GT_RET_EXPR:
case GT_CNS_INT:
case GT_CNS_LNG:
case GT_CNS_DBL:
case GT_CNS_STR:
case GT_MEMORYBARRIER:
case GT_JMP:
case GT_JCC:
case GT_SETCC:
case GT_NO_OP:
case GT_START_NONGC:
case GT_START_PREEMPTGC:
case GT_PROF_HOOK:
#if !defined(FEATURE_EH_FUNCLETS)
case GT_END_LFIN:
#endif // !FEATURE_EH_FUNCLETS
case GT_PHI_ARG:
case GT_JMPTABLE:
case GT_CLS_VAR:
case GT_CLS_VAR_ADDR:
case GT_ARGPLACE:
case GT_PHYSREG:
case GT_EMITNOP:
case GT_PINVOKE_PROLOG:
case GT_PINVOKE_EPILOG:
case GT_IL_OFFSET:
m_state = -1;
return;
// Standard unary operators
case GT_STORE_LCL_VAR:
case GT_STORE_LCL_FLD:
case GT_NOT:
case GT_NEG:
case GT_COPY:
case GT_RELOAD:
case GT_ARR_LENGTH:
case GT_CAST:
case GT_BITCAST:
case GT_CKFINITE:
case GT_LCLHEAP:
case GT_ADDR:
case GT_IND:
case GT_OBJ:
case GT_BLK:
case GT_BOX:
case GT_ALLOCOBJ:
case GT_RUNTIMELOOKUP:
case GT_INIT_VAL:
case GT_JTRUE:
case GT_SWITCH:
case GT_NULLCHECK:
case GT_PUTARG_REG:
case GT_PUTARG_STK:
case GT_PUTARG_TYPE:
case GT_BSWAP:
case GT_BSWAP16:
case GT_KEEPALIVE:
case GT_INC_SATURATE:
#if FEATURE_ARG_SPLIT
case GT_PUTARG_SPLIT:
#endif // FEATURE_ARG_SPLIT
case GT_RETURNTRAP:
m_edge = &m_node->AsUnOp()->gtOp1;
assert(*m_edge != nullptr);
m_advance = &GenTreeUseEdgeIterator::Terminate;
return;
// Unary operators with an optional operand
case GT_NOP:
case GT_FIELD:
case GT_RETURN:
case GT_RETFILT:
if (m_node->AsUnOp()->gtOp1 == nullptr)
{
assert(m_node->NullOp1Legal());
m_state = -1;
}
else
{
m_edge = &m_node->AsUnOp()->gtOp1;
m_advance = &GenTreeUseEdgeIterator::Terminate;
}
return;
// Variadic nodes
#ifdef FEATURE_SIMD
case GT_SIMD:
#endif
#ifdef FEATURE_HW_INTRINSICS
case GT_HWINTRINSIC:
#endif
#if defined(FEATURE_SIMD) || defined(FEATURE_HW_INTRINSICS)
SetEntryStateForMultiOp();
return;
#endif // defined(FEATURE_SIMD) || defined(FEATURE_HW_INTRINSICS)
// LEA, which may have no first operand
case GT_LEA:
if (m_node->AsAddrMode()->gtOp1 == nullptr)
{
m_edge = &m_node->AsAddrMode()->gtOp2;
m_advance = &GenTreeUseEdgeIterator::Terminate;
}
else
{
SetEntryStateForBinOp();
}
return;
// Special nodes
case GT_FIELD_LIST:
m_statePtr = m_node->AsFieldList()->Uses().GetHead();
m_advance = &GenTreeUseEdgeIterator::AdvanceFieldList;
AdvanceFieldList();
return;
case GT_PHI:
m_statePtr = m_node->AsPhi()->gtUses;
m_advance = &GenTreeUseEdgeIterator::AdvancePhi;
AdvancePhi();
return;
case GT_CMPXCHG:
m_edge = &m_node->AsCmpXchg()->gtOpLocation;
assert(*m_edge != nullptr);
m_advance = &GenTreeUseEdgeIterator::AdvanceCmpXchg;
return;
case GT_ARR_ELEM:
m_edge = &m_node->AsArrElem()->gtArrObj;
assert(*m_edge != nullptr);
m_advance = &GenTreeUseEdgeIterator::AdvanceArrElem;
return;
case GT_ARR_OFFSET:
m_edge = &m_node->AsArrOffs()->gtOffset;
assert(*m_edge != nullptr);
m_advance = &GenTreeUseEdgeIterator::AdvanceArrOffset;
return;
case GT_STORE_DYN_BLK:
m_edge = &m_node->AsStoreDynBlk()->Addr();
assert(*m_edge != nullptr);
m_advance = &GenTreeUseEdgeIterator::AdvanceStoreDynBlk;
return;
case GT_CALL:
AdvanceCall<CALL_INSTANCE>();
return;
// Binary nodes
default:
assert(m_node->OperIsBinary());
SetEntryStateForBinOp();
return;
}
}
//------------------------------------------------------------------------
// GenTreeUseEdgeIterator::AdvanceCmpXchg: produces the next operand of a CmpXchg node and advances the state.
//
void GenTreeUseEdgeIterator::AdvanceCmpXchg()
{
switch (m_state)
{
case 0:
m_edge = &m_node->AsCmpXchg()->gtOpValue;
m_state = 1;
break;
case 1:
m_edge = &m_node->AsCmpXchg()->gtOpComparand;
m_advance = &GenTreeUseEdgeIterator::Terminate;
break;
default:
unreached();
}
assert(*m_edge != nullptr);
}
//------------------------------------------------------------------------
// GenTreeUseEdgeIterator::AdvanceArrElem: produces the next operand of a ArrElem node and advances the state.
//
// Because these nodes are variadic, this function uses `m_state` to index into the list of array indices.
//
void GenTreeUseEdgeIterator::AdvanceArrElem()
{
if (m_state < m_node->AsArrElem()->gtArrRank)
{
m_edge = &m_node->AsArrElem()->gtArrInds[m_state];
assert(*m_edge != nullptr);
m_state++;
}
else
{
m_state = -1;
}
}
//------------------------------------------------------------------------
// GenTreeUseEdgeIterator::AdvanceArrOffset: produces the next operand of a ArrOffset node and advances the state.
//
void GenTreeUseEdgeIterator::AdvanceArrOffset()
{
switch (m_state)
{
case 0:
m_edge = &m_node->AsArrOffs()->gtIndex;
m_state = 1;
break;
case 1:
m_edge = &m_node->AsArrOffs()->gtArrObj;
m_advance = &GenTreeUseEdgeIterator::Terminate;
break;
default:
unreached();
}
assert(*m_edge != nullptr);
}
//------------------------------------------------------------------------
// GenTreeUseEdgeIterator::AdvanceStoreDynBlk: produces the next operand of a StoreDynBlk node and advances the state.
//
void GenTreeUseEdgeIterator::AdvanceStoreDynBlk()
{
GenTreeStoreDynBlk* const dynBlock = m_node->AsStoreDynBlk();
switch (m_state)
{
case 0:
m_edge = &dynBlock->Data();
m_state = 1;
break;
case 1:
m_edge = &dynBlock->gtDynamicSize;
m_advance = &GenTreeUseEdgeIterator::Terminate;
break;
default:
unreached();
}
assert(*m_edge != nullptr);
}
//------------------------------------------------------------------------
// GenTreeUseEdgeIterator::AdvanceFieldList: produces the next operand of a FieldList node and advances the state.
//
void GenTreeUseEdgeIterator::AdvanceFieldList()
{
assert(m_state == 0);
if (m_statePtr == nullptr)
{
m_state = -1;
}
else
{
GenTreeFieldList::Use* currentUse = static_cast<GenTreeFieldList::Use*>(m_statePtr);
m_edge = ¤tUse->NodeRef();
m_statePtr = currentUse->GetNext();
}
}
//------------------------------------------------------------------------
// GenTreeUseEdgeIterator::AdvancePhi: produces the next operand of a Phi node and advances the state.
//
void GenTreeUseEdgeIterator::AdvancePhi()
{
assert(m_state == 0);
if (m_statePtr == nullptr)
{
m_state = -1;
}
else
{
GenTreePhi::Use* currentUse = static_cast<GenTreePhi::Use*>(m_statePtr);
m_edge = ¤tUse->NodeRef();
m_statePtr = currentUse->GetNext();
}
}
//------------------------------------------------------------------------
// GenTreeUseEdgeIterator::AdvanceBinOp: produces the next operand of a binary node and advances the state.
//
// This function must be instantiated s.t. `ReverseOperands` is `true` iff the node is marked with the
// `GTF_REVERSE_OPS` flag.
//
template <bool ReverseOperands>
void GenTreeUseEdgeIterator::AdvanceBinOp()
{
assert(ReverseOperands == ((m_node->gtFlags & GTF_REVERSE_OPS) != 0));
m_edge = !ReverseOperands ? &m_node->AsOp()->gtOp2 : &m_node->AsOp()->gtOp1;
assert(*m_edge != nullptr);
m_advance = &GenTreeUseEdgeIterator::Terminate;
}
//------------------------------------------------------------------------
// GenTreeUseEdgeIterator::SetEntryStateForBinOp: produces the first operand of a binary node and chooses
// the appropriate advance function.
//
void GenTreeUseEdgeIterator::SetEntryStateForBinOp()
{
assert(m_node != nullptr);
assert(m_node->OperIsBinary());
GenTreeOp* const node = m_node->AsOp();
if (node->gtOp2 == nullptr)
{
assert(node->gtOp1 != nullptr);
assert(node->NullOp2Legal());
m_edge = &node->gtOp1;
m_advance = &GenTreeUseEdgeIterator::Terminate;
}
else if ((node->gtFlags & GTF_REVERSE_OPS) != 0)
{
m_edge = &m_node->AsOp()->gtOp2;
m_advance = &GenTreeUseEdgeIterator::AdvanceBinOp<true>;
}
else
{
m_edge = &m_node->AsOp()->gtOp1;
m_advance = &GenTreeUseEdgeIterator::AdvanceBinOp<false>;
}
}
#if defined(FEATURE_SIMD) || defined(FEATURE_HW_INTRINSICS)
//------------------------------------------------------------------------
// GenTreeUseEdgeIterator::AdvanceMultiOp: produces the next operand of a multi-op node and advances the state.
//
// Takes advantage of the fact that GenTreeMultiOp stores the operands in a contigious array, simply
// incrementing the "m_edge" pointer, unless the end, stored in "m_statePtr", has been reached.
//
void GenTreeUseEdgeIterator::AdvanceMultiOp()
{
assert(m_node != nullptr);
assert(m_node->OperIs(GT_SIMD, GT_HWINTRINSIC));
m_edge++;
if (m_edge == m_statePtr)
{
Terminate();
}
}
//------------------------------------------------------------------------
// GenTreeUseEdgeIterator::AdvanceReversedMultiOp: produces the next operand of a multi-op node
// marked with GTF_REVRESE_OPS and advances the state.
//
// Takes advantage of the fact that GenTreeMultiOp stores the operands in a contigious array, simply
// decrementing the "m_edge" pointer, unless the beginning, stored in "m_statePtr", has been reached.
//
void GenTreeUseEdgeIterator::AdvanceReversedMultiOp()
{
assert(m_node != nullptr);
assert(m_node->OperIs(GT_SIMD, GT_HWINTRINSIC));
assert((m_node->AsMultiOp()->GetOperandCount() == 2) && m_node->IsReverseOp());
m_edge--;
if (m_edge == m_statePtr)
{
Terminate();
}
}
//------------------------------------------------------------------------
// GenTreeUseEdgeIterator::SetEntryStateForMultiOp: produces the first operand of a multi-op node and sets the
// required advance function.
//
void GenTreeUseEdgeIterator::SetEntryStateForMultiOp()
{
size_t operandCount = m_node->AsMultiOp()->GetOperandCount();
if (operandCount == 0)
{
Terminate();
}
else
{
if (m_node->IsReverseOp())
{
assert(operandCount == 2);
m_edge = m_node->AsMultiOp()->GetOperandArray() + 1;
m_statePtr = m_node->AsMultiOp()->GetOperandArray() - 1;
m_advance = &GenTreeUseEdgeIterator::AdvanceReversedMultiOp;
}
else
{
m_edge = m_node->AsMultiOp()->GetOperandArray();
m_statePtr = m_node->AsMultiOp()->GetOperandArray(operandCount);
m_advance = &GenTreeUseEdgeIterator::AdvanceMultiOp;
}
}
}
#endif
//------------------------------------------------------------------------
// GenTreeUseEdgeIterator::AdvanceCall: produces the next operand of a call node and advances the state.
//
// This function is a bit tricky: in order to avoid doing unnecessary work, it is instantiated with the
// state number the iterator will be in when it is called. For example, `AdvanceCall<CALL_INSTANCE>`
// is the instantiation used when the iterator is at the `CALL_INSTANCE` state (i.e. the entry state).
// This sort of templating allows each state to avoid processing earlier states without unnecessary
// duplication of code.
//
// Note that this method expands the argument lists (`gtCallArgs` and `gtCallLateArgs`) into their
// component operands.
//
template <int state>
void GenTreeUseEdgeIterator::AdvanceCall()
{
GenTreeCall* const call = m_node->AsCall();
switch (state)
{
case CALL_INSTANCE:
m_statePtr = call->gtCallArgs;
m_advance = &GenTreeUseEdgeIterator::AdvanceCall<CALL_ARGS>;
if (call->gtCallThisArg != nullptr)
{
m_edge = &call->gtCallThisArg->NodeRef();
return;
}
FALLTHROUGH;
case CALL_ARGS:
if (m_statePtr != nullptr)
{
GenTreeCall::Use* use = static_cast<GenTreeCall::Use*>(m_statePtr);
m_edge = &use->NodeRef();
m_statePtr = use->GetNext();
return;
}
m_statePtr = call->gtCallLateArgs;
m_advance = &GenTreeUseEdgeIterator::AdvanceCall<CALL_LATE_ARGS>;
FALLTHROUGH;
case CALL_LATE_ARGS:
if (m_statePtr != nullptr)
{
GenTreeCall::Use* use = static_cast<GenTreeCall::Use*>(m_statePtr);
m_edge = &use->NodeRef();
m_statePtr = use->GetNext();
return;
}
m_advance = &GenTreeUseEdgeIterator::AdvanceCall<CALL_CONTROL_EXPR>;
FALLTHROUGH;
case CALL_CONTROL_EXPR:
if (call->gtControlExpr != nullptr)
{
if (call->gtCallType == CT_INDIRECT)
{
m_advance = &GenTreeUseEdgeIterator::AdvanceCall<CALL_COOKIE>;
}
else
{
m_advance = &GenTreeUseEdgeIterator::Terminate;
}
m_edge = &call->gtControlExpr;
return;
}
else if (call->gtCallType != CT_INDIRECT)
{
m_state = -1;
return;
}
FALLTHROUGH;
case CALL_COOKIE:
assert(call->gtCallType == CT_INDIRECT);
m_advance = &GenTreeUseEdgeIterator::AdvanceCall<CALL_ADDRESS>;
if (call->gtCallCookie != nullptr)
{
m_edge = &call->gtCallCookie;
return;
}
FALLTHROUGH;
case CALL_ADDRESS:
assert(call->gtCallType == CT_INDIRECT);
m_advance = &GenTreeUseEdgeIterator::Terminate;
if (call->gtCallAddr != nullptr)
{
m_edge = &call->gtCallAddr;
}
return;
default:
unreached();
}
}
//------------------------------------------------------------------------
// GenTreeUseEdgeIterator::Terminate: advances the iterator to the terminal state.
//
void GenTreeUseEdgeIterator::Terminate()
{
m_state = -1;
}
//------------------------------------------------------------------------
// GenTreeUseEdgeIterator::operator++: advances the iterator to the next operand.
//
GenTreeUseEdgeIterator& GenTreeUseEdgeIterator::operator++()
{
// If we've reached the terminal state, do nothing.
if (m_state != -1)
{
(this->*m_advance)();
}
return *this;
}
GenTreeUseEdgeIterator GenTree::UseEdgesBegin()
{
return GenTreeUseEdgeIterator(this);
}
GenTreeUseEdgeIterator GenTree::UseEdgesEnd()
{
return GenTreeUseEdgeIterator();
}
IteratorPair<GenTreeUseEdgeIterator> GenTree::UseEdges()
{
return MakeIteratorPair(UseEdgesBegin(), UseEdgesEnd());
}
GenTreeOperandIterator GenTree::OperandsBegin()
{
return GenTreeOperandIterator(this);
}
GenTreeOperandIterator GenTree::OperandsEnd()
{
return GenTreeOperandIterator();
}
IteratorPair<GenTreeOperandIterator> GenTree::Operands()
{
return MakeIteratorPair(OperandsBegin(), OperandsEnd());
}
bool GenTree::Precedes(GenTree* other)
{
assert(other != nullptr);
for (GenTree* node = gtNext; node != nullptr; node = node->gtNext)
{
if (node == other)
{
return true;
}
}
return false;
}
//------------------------------------------------------------------------------
// SetIndirExceptionFlags : Set GTF_EXCEPT and GTF_IND_NONFAULTING flags as appropriate
// on an indirection or an array length node.
//
// Arguments:
// comp - compiler instance
//
void GenTree::SetIndirExceptionFlags(Compiler* comp)
{
assert(OperIsIndirOrArrLength());
if (OperMayThrow(comp))
{
gtFlags |= GTF_EXCEPT;
return;
}
GenTree* addr = nullptr;
if (OperIsIndir())
{
addr = AsIndir()->Addr();
}
else
{
assert(gtOper == GT_ARR_LENGTH);
addr = AsArrLen()->ArrRef();
}
if ((addr->gtFlags & GTF_EXCEPT) != 0)
{
gtFlags |= GTF_EXCEPT;
}
else
{
gtFlags &= ~GTF_EXCEPT;
gtFlags |= GTF_IND_NONFAULTING;
}
}
#ifdef DEBUG
/* static */ int GenTree::gtDispFlags(GenTreeFlags flags, GenTreeDebugFlags debugFlags)
{
int charsDisplayed = 11; // 11 is the "baseline" number of flag characters displayed
printf("%c", (flags & GTF_ASG) ? 'A' : (IsContained(flags) ? 'c' : '-'));
printf("%c", (flags & GTF_CALL) ? 'C' : '-');
printf("%c", (flags & GTF_EXCEPT) ? 'X' : '-');
printf("%c", (flags & GTF_GLOB_REF) ? 'G' : '-');
printf("%c", (debugFlags & GTF_DEBUG_NODE_MORPHED) ? '+' : // First print '+' if GTF_DEBUG_NODE_MORPHED is set
(flags & GTF_ORDER_SIDEEFF) ? 'O' : '-'); // otherwise print 'O' or '-'
printf("%c", (flags & GTF_COLON_COND) ? '?' : '-');
printf("%c", (flags & GTF_DONT_CSE) ? 'N' : // N is for No cse
(flags & GTF_MAKE_CSE) ? 'H' : '-'); // H is for Hoist this expr
printf("%c", (flags & GTF_REVERSE_OPS) ? 'R' : '-');
printf("%c", (flags & GTF_UNSIGNED) ? 'U' : (flags & GTF_BOOLEAN) ? 'B' : '-');
#if FEATURE_SET_FLAGS
printf("%c", (flags & GTF_SET_FLAGS) ? 'S' : '-');
++charsDisplayed;
#endif
printf("%c", (flags & GTF_LATE_ARG) ? 'L' : '-');
printf("%c", (flags & GTF_SPILLED) ? 'z' : (flags & GTF_SPILL) ? 'Z' : '-');
return charsDisplayed;
}
#ifdef TARGET_X86
inline const char* GetCallConvName(CorInfoCallConvExtension callConv)
{
switch (callConv)
{
case CorInfoCallConvExtension::Managed:
return "Managed";
case CorInfoCallConvExtension::C:
return "C";
case CorInfoCallConvExtension::Stdcall:
return "Stdcall";
case CorInfoCallConvExtension::Thiscall:
return "Thiscall";
case CorInfoCallConvExtension::Fastcall:
return "Fastcall";
case CorInfoCallConvExtension::CMemberFunction:
return "CMemberFunction";
case CorInfoCallConvExtension::StdcallMemberFunction:
return "StdcallMemberFunction";
case CorInfoCallConvExtension::FastcallMemberFunction:
return "FastcallMemberFunction";
default:
return "UnknownCallConv";
}
}
#endif // TARGET_X86
/*****************************************************************************/
void Compiler::gtDispNodeName(GenTree* tree)
{
/* print the node name */
const char* name;
assert(tree);
if (tree->gtOper < GT_COUNT)
{
name = GenTree::OpName(tree->OperGet());
}
else
{
name = "<ERROR>";
}
char buf[32];
char* bufp = &buf[0];
if ((tree->gtOper == GT_CNS_INT) && tree->IsIconHandle())
{
sprintf_s(bufp, sizeof(buf), " %s(h)%c", name, 0);
}
else if (tree->gtOper == GT_PUTARG_STK)
{
sprintf_s(bufp, sizeof(buf), " %s [+0x%02x]%c", name, tree->AsPutArgStk()->getArgOffset(), 0);
}
else if (tree->gtOper == GT_CALL)
{
const char* callType = "CALL";
const char* gtfType = "";
const char* ctType = "";
char gtfTypeBuf[100];
if (tree->AsCall()->gtCallType == CT_USER_FUNC)
{
if (tree->AsCall()->IsVirtual())
{
callType = "CALLV";
}
}
else if (tree->AsCall()->gtCallType == CT_HELPER)
{
ctType = " help";
}
else if (tree->AsCall()->gtCallType == CT_INDIRECT)
{
ctType = " ind";
}
else
{
assert(!"Unknown gtCallType");
}
if (tree->gtFlags & GTF_CALL_NULLCHECK)
{
gtfType = " nullcheck";
}
if (tree->AsCall()->IsVirtualVtable())
{
gtfType = " vt-ind";
}
else if (tree->AsCall()->IsVirtualStub())
{
gtfType = " stub";
}
#ifdef FEATURE_READYTORUN
else if (tree->AsCall()->IsR2RRelativeIndir())
{
gtfType = " r2r_ind";
}
#endif // FEATURE_READYTORUN
else if (tree->gtFlags & GTF_CALL_UNMANAGED)
{
char* gtfTypeBufWalk = gtfTypeBuf;
gtfTypeBufWalk += SimpleSprintf_s(gtfTypeBufWalk, gtfTypeBuf, sizeof(gtfTypeBuf), " unman");
if (tree->gtFlags & GTF_CALL_POP_ARGS)
{
gtfTypeBufWalk += SimpleSprintf_s(gtfTypeBufWalk, gtfTypeBuf, sizeof(gtfTypeBuf), " popargs");
}
if (tree->AsCall()->gtCallMoreFlags & GTF_CALL_M_UNMGD_THISCALL)
{
gtfTypeBufWalk += SimpleSprintf_s(gtfTypeBufWalk, gtfTypeBuf, sizeof(gtfTypeBuf), " thiscall");
}
#ifdef TARGET_X86
gtfTypeBufWalk += SimpleSprintf_s(gtfTypeBufWalk, gtfTypeBuf, sizeof(gtfTypeBuf), " %s",
GetCallConvName(tree->AsCall()->GetUnmanagedCallConv()));
#endif // TARGET_X86
gtfType = gtfTypeBuf;
}
sprintf_s(bufp, sizeof(buf), " %s%s%s%c", callType, ctType, gtfType, 0);
}
else if (tree->gtOper == GT_ARR_ELEM)
{
bufp += SimpleSprintf_s(bufp, buf, sizeof(buf), " %s[", name);
for (unsigned rank = tree->AsArrElem()->gtArrRank - 1; rank; rank--)
{
bufp += SimpleSprintf_s(bufp, buf, sizeof(buf), ",");
}
SimpleSprintf_s(bufp, buf, sizeof(buf), "]");
}
else if (tree->gtOper == GT_ARR_OFFSET || tree->gtOper == GT_ARR_INDEX)
{
bufp += SimpleSprintf_s(bufp, buf, sizeof(buf), " %s[", name);
unsigned char currDim;
unsigned char rank;
if (tree->gtOper == GT_ARR_OFFSET)
{
currDim = tree->AsArrOffs()->gtCurrDim;
rank = tree->AsArrOffs()->gtArrRank;
}
else
{
currDim = tree->AsArrIndex()->gtCurrDim;
rank = tree->AsArrIndex()->gtArrRank;
}
for (unsigned char dim = 0; dim < rank; dim++)
{
// Use a defacto standard i,j,k for the dimensions.
// Note that we only support up to rank 3 arrays with these nodes, so we won't run out of characters.
char dimChar = '*';
if (dim == currDim)
{
dimChar = 'i' + dim;
}
else if (dim > currDim)
{
dimChar = ' ';
}
bufp += SimpleSprintf_s(bufp, buf, sizeof(buf), "%c", dimChar);
if (dim != rank - 1)
{
bufp += SimpleSprintf_s(bufp, buf, sizeof(buf), ",");
}
}
SimpleSprintf_s(bufp, buf, sizeof(buf), "]");
}
else if (tree->gtOper == GT_LEA)
{
GenTreeAddrMode* lea = tree->AsAddrMode();
bufp += SimpleSprintf_s(bufp, buf, sizeof(buf), " %s(", name);
if (lea->Base() != nullptr)
{
bufp += SimpleSprintf_s(bufp, buf, sizeof(buf), "b+");
}
if (lea->Index() != nullptr)
{
bufp += SimpleSprintf_s(bufp, buf, sizeof(buf), "(i*%d)+", lea->gtScale);
}
bufp += SimpleSprintf_s(bufp, buf, sizeof(buf), "%d)", lea->Offset());
}
else if (tree->gtOper == GT_BOUNDS_CHECK)
{
switch (tree->AsBoundsChk()->gtThrowKind)
{
case SCK_RNGCHK_FAIL:
{
bufp += SimpleSprintf_s(bufp, buf, sizeof(buf), " %s_Rng", name);
if (tree->AsBoundsChk()->gtIndRngFailBB != nullptr)
{
bufp += SimpleSprintf_s(bufp, buf, sizeof(buf), " -> " FMT_BB,
tree->AsBoundsChk()->gtIndRngFailBB->bbNum);
}
break;
}
case SCK_ARG_EXCPN:
sprintf_s(bufp, sizeof(buf), " %s_Arg", name);
break;
case SCK_ARG_RNG_EXCPN:
sprintf_s(bufp, sizeof(buf), " %s_ArgRng", name);
break;
default:
unreached();
}
}
else if (tree->gtOverflowEx())
{
sprintf_s(bufp, sizeof(buf), " %s_ovfl%c", name, 0);
}
else
{
sprintf_s(bufp, sizeof(buf), " %s%c", name, 0);
}
if (strlen(buf) < 10)
{
printf(" %-10s", buf);
}
else
{
printf(" %s", buf);
}
}
//------------------------------------------------------------------------
// gtDispZeroFieldSeq: If this node has a zero fieldSeq annotation
// then print this Field Sequence
//
void Compiler::gtDispZeroFieldSeq(GenTree* tree)
{
NodeToFieldSeqMap* map = GetZeroOffsetFieldMap();
// THe most common case is having no entries in this map
if (map->GetCount() > 0)
{
FieldSeqNode* fldSeq = nullptr;
if (map->Lookup(tree, &fldSeq))
{
printf(" Zero");
gtDispAnyFieldSeq(fldSeq);
}
}
}
//------------------------------------------------------------------------
// gtDispVN: Utility function that prints a tree's ValueNumber: gtVNPair
//
void Compiler::gtDispVN(GenTree* tree)
{
if (tree->gtVNPair.GetLiberal() != ValueNumStore::NoVN)
{
assert(tree->gtVNPair.GetConservative() != ValueNumStore::NoVN);
printf(" ");
vnpPrint(tree->gtVNPair, 0);
}
}
//------------------------------------------------------------------------
// gtDispCommonEndLine
// Utility function that prints the following node information
// 1: The associated zero field sequence (if any)
// 2. The register assigned to this node (if any)
// 2. The value number assigned (if any)
// 3. A newline character
//
void Compiler::gtDispCommonEndLine(GenTree* tree)
{
gtDispZeroFieldSeq(tree);
gtDispRegVal(tree);
gtDispVN(tree);
printf("\n");
}
//------------------------------------------------------------------------
// gtDispNode: Print a tree to jitstdout.
//
// Arguments:
// tree - the tree to be printed
// indentStack - the specification for the current level of indentation & arcs
// msg - a contextual method (i.e. from the parent) to print
//
// Return Value:
// None.
//
// Notes:
// 'indentStack' may be null, in which case no indentation or arcs are printed
// 'msg' may be null
void Compiler::gtDispNode(GenTree* tree, IndentStack* indentStack, _In_ _In_opt_z_ const char* msg, bool isLIR)
{
bool printFlags = true; // always true..
int msgLength = 25;
GenTree* prev;
if (tree->gtSeqNum)
{
printf("N%03u ", tree->gtSeqNum);
if (tree->gtCostsInitialized)
{
printf("(%3u,%3u) ", tree->GetCostEx(), tree->GetCostSz());
}
else
{
printf("(???"
",???"
") "); // This probably indicates a bug: the node has a sequence number, but not costs.
}
}
else
{
prev = tree;
bool hasSeqNum = true;
unsigned dotNum = 0;
do
{
dotNum++;
prev = prev->gtPrev;
if ((prev == nullptr) || (prev == tree))
{
hasSeqNum = false;
break;
}
assert(prev);
} while (prev->gtSeqNum == 0);
// If we have an indent stack, don't add additional characters,
// as it will mess up the alignment.
bool displayDotNum = hasSeqNum && (indentStack == nullptr);
if (displayDotNum)
{
printf("N%03u.%02u ", prev->gtSeqNum, dotNum);
}
else
{
printf(" ");
}
if (tree->gtCostsInitialized)
{
printf("(%3u,%3u) ", tree->GetCostEx(), tree->GetCostSz());
}
else
{
if (displayDotNum)
{
// Do better alignment in this case
printf(" ");
}
else
{
printf(" ");
}
}
}
if (optValnumCSE_phase)
{
if (IS_CSE_INDEX(tree->gtCSEnum))
{
printf(FMT_CSE " (%s)", GET_CSE_INDEX(tree->gtCSEnum), (IS_CSE_USE(tree->gtCSEnum) ? "use" : "def"));
}
else
{
printf(" ");
}
}
/* Print the node ID */
printTreeID(tree);
printf(" ");
if (tree->gtOper >= GT_COUNT)
{
printf(" **** ILLEGAL NODE ****");
return;
}
if (printFlags)
{
/* First print the flags associated with the node */
switch (tree->gtOper)
{
case GT_LEA:
case GT_BLK:
case GT_OBJ:
case GT_STORE_BLK:
case GT_STORE_OBJ:
case GT_STORE_DYN_BLK:
case GT_IND:
// We prefer printing V or U
if ((tree->gtFlags & (GTF_IND_VOLATILE | GTF_IND_UNALIGNED)) == 0)
{
if (tree->gtFlags & GTF_IND_TGTANYWHERE)
{
printf("*");
--msgLength;
break;
}
if (tree->gtFlags & GTF_IND_TGT_NOT_HEAP)
{
printf("s");
--msgLength;
break;
}
if (tree->gtFlags & GTF_IND_INVARIANT)
{
printf("#");
--msgLength;
break;
}
if (tree->gtFlags & GTF_IND_ARR_INDEX)
{
printf("a");
--msgLength;
break;
}
if (tree->gtFlags & GTF_IND_NONFAULTING)
{
printf("n"); // print a n for non-faulting
--msgLength;
break;
}
if (tree->gtFlags & GTF_IND_ASG_LHS)
{
printf("D"); // print a D for definition
--msgLength;
break;
}
if (tree->gtFlags & GTF_IND_NONNULL)
{
printf("@");
--msgLength;
break;
}
}
FALLTHROUGH;
case GT_INDEX:
case GT_INDEX_ADDR:
case GT_FIELD:
case GT_CLS_VAR:
if (tree->gtFlags & GTF_IND_VOLATILE)
{
printf("V");
--msgLength;
break;
}
if (tree->gtFlags & GTF_IND_UNALIGNED)
{
printf("U");
--msgLength;
break;
}
goto DASH;
case GT_ASG:
if (tree->OperIsInitBlkOp())
{
printf("I");
--msgLength;
break;
}
goto DASH;
case GT_CALL:
if (tree->AsCall()->IsInlineCandidate())
{
if (tree->AsCall()->IsGuardedDevirtualizationCandidate())
{
printf("&");
}
else
{
printf("I");
}
--msgLength;
break;
}
else if (tree->AsCall()->IsGuardedDevirtualizationCandidate())
{
printf("G");
--msgLength;
break;
}
if (tree->AsCall()->gtCallMoreFlags & GTF_CALL_M_RETBUFFARG)
{
printf("S");
--msgLength;
break;
}
if (tree->gtFlags & GTF_CALL_HOISTABLE)
{
printf("H");
--msgLength;
break;
}
goto DASH;
case GT_MUL:
#if !defined(TARGET_64BIT)
case GT_MUL_LONG:
#endif
if (tree->gtFlags & GTF_MUL_64RSLT)
{
printf("L");
--msgLength;
break;
}
goto DASH;
case GT_DIV:
case GT_MOD:
case GT_UDIV:
case GT_UMOD:
if (tree->gtFlags & GTF_DIV_BY_CNS_OPT)
{
printf("M"); // We will use a Multiply by reciprical
--msgLength;
break;
}
goto DASH;
case GT_LCL_FLD:
case GT_LCL_VAR:
case GT_LCL_VAR_ADDR:
case GT_LCL_FLD_ADDR:
case GT_STORE_LCL_FLD:
case GT_STORE_LCL_VAR:
if (tree->gtFlags & GTF_VAR_USEASG)
{
printf("U");
--msgLength;
break;
}
if (tree->gtFlags & GTF_VAR_MULTIREG)
{
printf((tree->gtFlags & GTF_VAR_DEF) ? "M" : "m");
--msgLength;
break;
}
if (tree->gtFlags & GTF_VAR_DEF)
{
printf("D");
--msgLength;
break;
}
if (tree->gtFlags & GTF_VAR_CAST)
{
printf("C");
--msgLength;
break;
}
if (tree->gtFlags & GTF_VAR_ARR_INDEX)
{
printf("i");
--msgLength;
break;
}
if (tree->gtFlags & GTF_VAR_CONTEXT)
{
printf("!");
--msgLength;
break;
}
goto DASH;
case GT_EQ:
case GT_NE:
case GT_LT:
case GT_LE:
case GT_GE:
case GT_GT:
case GT_TEST_EQ:
case GT_TEST_NE:
if (tree->gtFlags & GTF_RELOP_NAN_UN)
{
printf("N");
--msgLength;
break;
}
if (tree->gtFlags & GTF_RELOP_JMP_USED)
{
printf("J");
--msgLength;
break;
}
goto DASH;
case GT_JCMP:
printf((tree->gtFlags & GTF_JCMP_TST) ? "T" : "C");
printf((tree->gtFlags & GTF_JCMP_EQ) ? "EQ" : "NE");
goto DASH;
case GT_CNS_INT:
if (tree->IsIconHandle())
{
if ((tree->gtFlags & GTF_ICON_INITCLASS) != 0)
{
printf("I"); // Static Field handle with INITCLASS requirement
--msgLength;
break;
}
else if ((tree->gtFlags & GTF_ICON_FIELD_OFF) != 0)
{
printf("O");
--msgLength;
break;
}
else
{
// Some other handle
printf("H");
--msgLength;
break;
}
}
goto DASH;
default:
DASH:
printf("-");
--msgLength;
break;
}
/* Then print the general purpose flags */
GenTreeFlags flags = tree->gtFlags;
if (tree->OperIsBinary() || tree->OperIsMultiOp())
{
genTreeOps oper = tree->OperGet();
// Check for GTF_ADDRMODE_NO_CSE flag on add/mul/shl Binary Operators
if ((oper == GT_ADD) || (oper == GT_MUL) || (oper == GT_LSH))
{
if ((tree->gtFlags & GTF_ADDRMODE_NO_CSE) != 0)
{
flags |= GTF_DONT_CSE; // Force the GTF_ADDRMODE_NO_CSE flag to print out like GTF_DONT_CSE
}
}
}
else // !(tree->OperIsBinary() || tree->OperIsMultiOp())
{
// the GTF_REVERSE flag only applies to binary operations (which some MultiOp nodes are).
flags &= ~GTF_REVERSE_OPS; // we use this value for GTF_VAR_ARR_INDEX above
}
msgLength -= GenTree::gtDispFlags(flags, tree->gtDebugFlags);
/*
printf("%c", (flags & GTF_ASG ) ? 'A' : '-');
printf("%c", (flags & GTF_CALL ) ? 'C' : '-');
printf("%c", (flags & GTF_EXCEPT ) ? 'X' : '-');
printf("%c", (flags & GTF_GLOB_REF ) ? 'G' : '-');
printf("%c", (flags & GTF_ORDER_SIDEEFF ) ? 'O' : '-');
printf("%c", (flags & GTF_COLON_COND ) ? '?' : '-');
printf("%c", (flags & GTF_DONT_CSE ) ? 'N' : // N is for No cse
(flags & GTF_MAKE_CSE ) ? 'H' : '-'); // H is for Hoist this expr
printf("%c", (flags & GTF_REVERSE_OPS ) ? 'R' : '-');
printf("%c", (flags & GTF_UNSIGNED ) ? 'U' :
(flags & GTF_BOOLEAN ) ? 'B' : '-');
printf("%c", (flags & GTF_SET_FLAGS ) ? 'S' : '-');
printf("%c", (flags & GTF_SPILLED ) ? 'z' : '-');
printf("%c", (flags & GTF_SPILL ) ? 'Z' : '-');
*/
}
// If we're printing a node for LIR, we use the space normally associated with the message
// to display the node's temp name (if any)
const bool hasOperands = tree->OperandsBegin() != tree->OperandsEnd();
if (isLIR)
{
assert(msg == nullptr);
// If the tree does not have any operands, we do not display the indent stack. This gives us
// two additional characters for alignment.
if (!hasOperands)
{
msgLength += 1;
}
if (tree->IsValue())
{
const size_t bufLength = msgLength - 1;
msg = reinterpret_cast<char*>(_alloca(bufLength * sizeof(char)));
sprintf_s(const_cast<char*>(msg), bufLength, "t%d = %s", tree->gtTreeID, hasOperands ? "" : " ");
}
}
/* print the msg associated with the node */
if (msg == nullptr)
{
msg = "";
}
if (msgLength < 0)
{
msgLength = 0;
}
printf(isLIR ? " %+*s" : " %-*s", msgLength, msg);
/* Indent the node accordingly */
if (!isLIR || hasOperands)
{
printIndent(indentStack);
}
gtDispNodeName(tree);
assert(tree == nullptr || tree->gtOper < GT_COUNT);
if (tree)
{
/* print the type of the node */
if (tree->gtOper != GT_CAST)
{
printf(" %-6s", varTypeName(tree->TypeGet()));
if (varTypeIsStruct(tree->TypeGet()))
{
ClassLayout* layout = nullptr;
if (tree->OperIs(GT_BLK, GT_OBJ, GT_STORE_BLK, GT_STORE_OBJ))
{
layout = tree->AsBlk()->GetLayout();
}
else if (tree->OperIs(GT_LCL_VAR, GT_STORE_LCL_VAR))
{
LclVarDsc* varDsc = lvaGetDesc(tree->AsLclVar());
if (varTypeIsStruct(varDsc->TypeGet()))
{
layout = varDsc->GetLayout();
}
}
else if (tree->OperIs(GT_INDEX))
{
GenTreeIndex* asInd = tree->AsIndex();
CORINFO_CLASS_HANDLE clsHnd = asInd->gtStructElemClass;
if (clsHnd != nullptr)
{
// We could create a layout with `typGetObjLayout(asInd->gtStructElemClass)` but we
// don't want to affect the layout table.
const unsigned classSize = info.compCompHnd->getClassSize(clsHnd);
const char16_t* shortClassName = eeGetShortClassName(clsHnd);
printf("<%S, %u>", shortClassName, classSize);
}
}
else if (tree->OperIsIndir())
{
ArrayInfo arrInfo;
if (TryGetArrayInfo(tree->AsIndir(), &arrInfo))
{
if (varTypeIsStruct(arrInfo.m_elemType))
{
CORINFO_CLASS_HANDLE clsHnd = arrInfo.m_elemStructType;
// We could create a layout with `typGetObjLayout(asInd->gtStructElemClass)` but we
// don't want to affect the layout table.
const unsigned classSize = info.compCompHnd->getClassSize(clsHnd);
const char16_t* shortClassName = eeGetShortClassName(clsHnd);
printf("<%S, %u>", shortClassName, classSize);
}
}
}
if (layout != nullptr)
{
gtDispClassLayout(layout, tree->TypeGet());
}
}
if (tree->gtOper == GT_LCL_VAR || tree->gtOper == GT_STORE_LCL_VAR)
{
LclVarDsc* varDsc = lvaGetDesc(tree->AsLclVarCommon());
if (varDsc->IsAddressExposed())
{
printf("(AX)"); // Variable has address exposed.
}
if (varDsc->IsHiddenBufferStructArg())
{
printf("(RB)"); // Variable is hidden return buffer
}
if (varDsc->lvUnusedStruct)
{
assert(varDsc->lvPromoted);
printf("(U)"); // Unused struct
}
else if (varDsc->lvPromoted)
{
if (varTypeIsPromotable(varDsc))
{
printf("(P)"); // Promoted struct
}
else
{
// Promoted implicit by-refs can have this state during
// global morph while they are being rewritten
printf("(P?!)"); // Promoted struct
}
}
}
if (tree->IsArgPlaceHolderNode() && (tree->AsArgPlace()->gtArgPlaceClsHnd != nullptr))
{
printf(" => [clsHnd=%08X]", dspPtr(tree->AsArgPlace()->gtArgPlaceClsHnd));
}
if (tree->gtOper == GT_RUNTIMELOOKUP)
{
#ifdef TARGET_64BIT
printf(" 0x%llx", dspPtr(tree->AsRuntimeLookup()->gtHnd));
#else
printf(" 0x%x", dspPtr(tree->AsRuntimeLookup()->gtHnd));
#endif
switch (tree->AsRuntimeLookup()->gtHndType)
{
case CORINFO_HANDLETYPE_CLASS:
printf(" class");
break;
case CORINFO_HANDLETYPE_METHOD:
printf(" method");
break;
case CORINFO_HANDLETYPE_FIELD:
printf(" field");
break;
default:
printf(" unknown");
break;
}
}
}
// for tracking down problems in reguse prediction or liveness tracking
if (verbose && 0)
{
printf(" RR=");
dspRegMask(tree->gtRsvdRegs);
printf("\n");
}
}
}
#if FEATURE_MULTIREG_RET
//----------------------------------------------------------------------------------
// gtDispMultiRegCount: determine how many registers to print for a multi-reg node
//
// Arguments:
// tree - GenTree node whose registers we want to print
//
// Return Value:
// The number of registers to print
//
// Notes:
// This is not the same in all cases as GenTree::GetMultiRegCount().
// In particular, for COPY or RELOAD it only returns the number of *valid* registers,
// and for CALL, it will return 0 if the ReturnTypeDesc hasn't yet been initialized.
// But we want to print all register positions.
//
unsigned Compiler::gtDispMultiRegCount(GenTree* tree)
{
if (tree->IsCopyOrReload())
{
// GetRegCount() will return only the number of valid regs for COPY or RELOAD,
// but we want to print all positions, so we get the reg count for op1.
return gtDispMultiRegCount(tree->gtGetOp1());
}
else if (!tree->IsMultiRegNode())
{
// We can wind up here because IsMultiRegNode() always returns true for COPY or RELOAD,
// even if its op1 is not multireg.
// Note that this method won't be called for non-register-producing nodes.
return 1;
}
else if (tree->OperIs(GT_CALL))
{
unsigned regCount = tree->AsCall()->GetReturnTypeDesc()->TryGetReturnRegCount();
// If it hasn't yet been initialized, we'd still like to see the registers printed.
if (regCount == 0)
{
regCount = MAX_RET_REG_COUNT;
}
return regCount;
}
else
{
return tree->GetMultiRegCount(this);
}
}
#endif // FEATURE_MULTIREG_RET
//----------------------------------------------------------------------------------
// gtDispRegVal: Print the register(s) defined by the given node
//
// Arguments:
// tree - Gentree node whose registers we want to print
//
void Compiler::gtDispRegVal(GenTree* tree)
{
switch (tree->GetRegTag())
{
// Don't display anything for the GT_REGTAG_NONE case;
// the absence of printed register values will imply this state.
case GenTree::GT_REGTAG_REG:
printf(" REG %s", compRegVarName(tree->GetRegNum()));
break;
default:
return;
}
#if FEATURE_MULTIREG_RET
if (tree->IsMultiRegNode())
{
// 0th reg is GetRegNum(), which is already printed above.
// Print the remaining regs of a multi-reg node.
unsigned regCount = gtDispMultiRegCount(tree);
// For some nodes, e.g. COPY, RELOAD or CALL, we may not have valid regs for all positions.
for (unsigned i = 1; i < regCount; ++i)
{
regNumber reg = tree->GetRegByIndex(i);
printf(",%s", genIsValidReg(reg) ? compRegVarName(reg) : "NA");
}
}
#endif
}
// We usually/commonly don't expect to print anything longer than this string,
#define LONGEST_COMMON_LCL_VAR_DISPLAY "V99 PInvokeFrame"
#define LONGEST_COMMON_LCL_VAR_DISPLAY_LENGTH (sizeof(LONGEST_COMMON_LCL_VAR_DISPLAY))
#define BUF_SIZE (LONGEST_COMMON_LCL_VAR_DISPLAY_LENGTH * 2)
void Compiler::gtGetLclVarNameInfo(unsigned lclNum, const char** ilKindOut, const char** ilNameOut, unsigned* ilNumOut)
{
const char* ilKind = nullptr;
const char* ilName = nullptr;
unsigned ilNum = compMap2ILvarNum(lclNum);
if (ilNum == (unsigned)ICorDebugInfo::RETBUF_ILNUM)
{
ilName = "RetBuf";
}
else if (ilNum == (unsigned)ICorDebugInfo::VARARGS_HND_ILNUM)
{
ilName = "VarArgHandle";
}
else if (ilNum == (unsigned)ICorDebugInfo::TYPECTXT_ILNUM)
{
ilName = "TypeCtx";
}
else if (ilNum == (unsigned)ICorDebugInfo::UNKNOWN_ILNUM)
{
if (lclNumIsTrueCSE(lclNum))
{
ilKind = "cse";
ilNum = lclNum - optCSEstart;
}
else if (lclNum >= optCSEstart)
{
// Currently any new LclVar's introduced after the CSE phase
// are believed to be created by the "rationalizer" that is what is meant by the "rat" prefix.
ilKind = "rat";
ilNum = lclNum - (optCSEstart + optCSEcount);
}
else
{
if (lclNum == info.compLvFrameListRoot)
{
ilName = "FramesRoot";
}
else if (lclNum == lvaInlinedPInvokeFrameVar)
{
ilName = "PInvokeFrame";
}
else if (lclNum == lvaGSSecurityCookie)
{
ilName = "GsCookie";
}
else if (lclNum == lvaRetAddrVar)
{
ilName = "ReturnAddress";
}
#if FEATURE_FIXED_OUT_ARGS
else if (lclNum == lvaPInvokeFrameRegSaveVar)
{
ilName = "PInvokeFrameRegSave";
}
else if (lclNum == lvaOutgoingArgSpaceVar)
{
ilName = "OutArgs";
}
#endif // FEATURE_FIXED_OUT_ARGS
#if !defined(FEATURE_EH_FUNCLETS)
else if (lclNum == lvaShadowSPslotsVar)
{
ilName = "EHSlots";
}
#endif // !FEATURE_EH_FUNCLETS
#ifdef JIT32_GCENCODER
else if (lclNum == lvaLocAllocSPvar)
{
ilName = "LocAllocSP";
}
#endif // JIT32_GCENCODER
#if defined(FEATURE_EH_FUNCLETS)
else if (lclNum == lvaPSPSym)
{
ilName = "PSPSym";
}
#endif // FEATURE_EH_FUNCLETS
else
{
ilKind = "tmp";
if (compIsForInlining())
{
ilNum = lclNum - impInlineInfo->InlinerCompiler->info.compLocalsCount;
}
else
{
ilNum = lclNum - info.compLocalsCount;
}
}
}
}
else if (lclNum < (compIsForInlining() ? impInlineInfo->InlinerCompiler->info.compArgsCount : info.compArgsCount))
{
if (ilNum == 0 && !info.compIsStatic)
{
ilName = "this";
}
else
{
ilKind = "arg";
}
}
else
{
if (!lvaTable[lclNum].lvIsStructField)
{
ilKind = "loc";
}
if (compIsForInlining())
{
ilNum -= impInlineInfo->InlinerCompiler->info.compILargsCount;
}
else
{
ilNum -= info.compILargsCount;
}
}
*ilKindOut = ilKind;
*ilNameOut = ilName;
*ilNumOut = ilNum;
}
/*****************************************************************************/
int Compiler::gtGetLclVarName(unsigned lclNum, char* buf, unsigned buf_remaining)
{
char* bufp_next = buf;
unsigned charsPrinted = 0;
int sprintf_result;
sprintf_result = sprintf_s(bufp_next, buf_remaining, "V%02u", lclNum);
if (sprintf_result < 0)
{
return sprintf_result;
}
charsPrinted += sprintf_result;
bufp_next += sprintf_result;
buf_remaining -= sprintf_result;
const char* ilKind = nullptr;
const char* ilName = nullptr;
unsigned ilNum = 0;
gtGetLclVarNameInfo(lclNum, &ilKind, &ilName, &ilNum);
if (ilName != nullptr)
{
sprintf_result = sprintf_s(bufp_next, buf_remaining, " %s", ilName);
if (sprintf_result < 0)
{
return sprintf_result;
}
charsPrinted += sprintf_result;
bufp_next += sprintf_result;
buf_remaining -= sprintf_result;
}
else if (ilKind != nullptr)
{
sprintf_result = sprintf_s(bufp_next, buf_remaining, " %s%d", ilKind, ilNum);
if (sprintf_result < 0)
{
return sprintf_result;
}
charsPrinted += sprintf_result;
bufp_next += sprintf_result;
buf_remaining -= sprintf_result;
}
assert(charsPrinted > 0);
assert(buf_remaining > 0);
return (int)charsPrinted;
}
/*****************************************************************************
* Get the local var name, and create a copy of the string that can be used in debug output.
*/
char* Compiler::gtGetLclVarName(unsigned lclNum)
{
char buf[BUF_SIZE];
int charsPrinted = gtGetLclVarName(lclNum, buf, ArrLen(buf));
if (charsPrinted < 0)
{
return nullptr;
}
char* retBuf = new (this, CMK_DebugOnly) char[charsPrinted + 1];
strcpy_s(retBuf, charsPrinted + 1, buf);
return retBuf;
}
/*****************************************************************************/
void Compiler::gtDispLclVar(unsigned lclNum, bool padForBiggestDisp)
{
char buf[BUF_SIZE];
int charsPrinted = gtGetLclVarName(lclNum, buf, ArrLen(buf));
if (charsPrinted < 0)
{
return;
}
printf("%s", buf);
if (padForBiggestDisp && (charsPrinted < (int)LONGEST_COMMON_LCL_VAR_DISPLAY_LENGTH))
{
printf("%*c", LONGEST_COMMON_LCL_VAR_DISPLAY_LENGTH - charsPrinted, ' ');
}
}
//------------------------------------------------------------------------
// gtDispLclVarStructType: Print size and type information about a struct or lclBlk local variable.
//
// Arguments:
// lclNum - The local var id.
//
void Compiler::gtDispLclVarStructType(unsigned lclNum)
{
LclVarDsc* varDsc = lvaGetDesc(lclNum);
var_types type = varDsc->TypeGet();
if (type == TYP_STRUCT)
{
ClassLayout* layout = varDsc->GetLayout();
assert(layout != nullptr);
gtDispClassLayout(layout, type);
}
else if (type == TYP_LCLBLK)
{
#if FEATURE_FIXED_OUT_ARGS
assert(lclNum == lvaOutgoingArgSpaceVar);
// Since lvaOutgoingArgSpaceSize is a PhasedVar we can't read it for Dumping until
// after we set it to something.
if (lvaOutgoingArgSpaceSize.HasFinalValue())
{
// A PhasedVar<T> can't be directly used as an arg to a variadic function
unsigned value = lvaOutgoingArgSpaceSize;
printf("<%u> ", value);
}
else
{
printf("<na> "); // The value hasn't yet been determined
}
#else
assert(!"Unknown size");
NO_WAY("Target doesn't support TYP_LCLBLK");
#endif // FEATURE_FIXED_OUT_ARGS
}
}
//------------------------------------------------------------------------
// gtDispClassLayout: Print size and type information about a layout.
//
// Arguments:
// layout - the layout;
// type - variable type, used to avoid printing size for SIMD nodes.
//
void Compiler::gtDispClassLayout(ClassLayout* layout, var_types type)
{
assert(layout != nullptr);
if (layout->IsBlockLayout())
{
printf("<%u>", layout->GetSize());
}
else if (varTypeIsSIMD(type))
{
printf("<%S>", layout->GetShortClassName());
}
else
{
printf("<%S, %u>", layout->GetShortClassName(), layout->GetSize());
}
}
/*****************************************************************************/
void Compiler::gtDispConst(GenTree* tree)
{
assert(tree->OperIsConst());
switch (tree->gtOper)
{
case GT_CNS_INT:
if (tree->IsIconHandle(GTF_ICON_STR_HDL))
{
const WCHAR* str = eeGetCPString(tree->AsIntCon()->gtIconVal);
// If *str points to a '\0' then don't print the string's values
if ((str != nullptr) && (*str != '\0'))
{
printf(" 0x%X \"%S\"", dspPtr(tree->AsIntCon()->gtIconVal), str);
}
else // We can't print the value of the string
{
// Note that eeGetCPString isn't currently implemented on Linux/ARM
// and instead always returns nullptr
printf(" 0x%X [ICON_STR_HDL]", dspPtr(tree->AsIntCon()->gtIconVal));
}
}
else
{
ssize_t dspIconVal =
tree->IsIconHandle() ? dspPtr(tree->AsIntCon()->gtIconVal) : tree->AsIntCon()->gtIconVal;
if (tree->TypeGet() == TYP_REF)
{
assert(tree->AsIntCon()->gtIconVal == 0);
printf(" null");
}
else if ((tree->AsIntCon()->gtIconVal > -1000) && (tree->AsIntCon()->gtIconVal < 1000))
{
printf(" %ld", dspIconVal);
}
#ifdef TARGET_64BIT
else if ((tree->AsIntCon()->gtIconVal & 0xFFFFFFFF00000000LL) != 0)
{
if (dspIconVal >= 0)
{
printf(" 0x%llx", dspIconVal);
}
else
{
printf(" -0x%llx", -dspIconVal);
}
}
#endif
else
{
if (dspIconVal >= 0)
{
printf(" 0x%X", dspIconVal);
}
else
{
printf(" -0x%X", -dspIconVal);
}
}
if (tree->IsIconHandle())
{
switch (tree->GetIconHandleFlag())
{
case GTF_ICON_SCOPE_HDL:
printf(" scope");
break;
case GTF_ICON_CLASS_HDL:
printf(" class");
break;
case GTF_ICON_METHOD_HDL:
printf(" method");
break;
case GTF_ICON_FIELD_HDL:
printf(" field");
break;
case GTF_ICON_STATIC_HDL:
printf(" static");
break;
case GTF_ICON_STR_HDL:
unreached(); // This case is handled above
break;
case GTF_ICON_CONST_PTR:
printf(" const ptr");
break;
case GTF_ICON_GLOBAL_PTR:
printf(" global ptr");
break;
case GTF_ICON_VARG_HDL:
printf(" vararg");
break;
case GTF_ICON_PINVKI_HDL:
printf(" pinvoke");
break;
case GTF_ICON_TOKEN_HDL:
printf(" token");
break;
case GTF_ICON_TLS_HDL:
printf(" tls");
break;
case GTF_ICON_FTN_ADDR:
printf(" ftn");
break;
case GTF_ICON_CIDMID_HDL:
printf(" cid/mid");
break;
case GTF_ICON_BBC_PTR:
printf(" bbc");
break;
case GTF_ICON_STATIC_BOX_PTR:
printf(" static box ptr");
break;
default:
printf(" UNKNOWN");
break;
}
}
if ((tree->gtFlags & GTF_ICON_FIELD_OFF) != 0)
{
printf(" field offset");
}
#ifdef FEATURE_SIMD
if ((tree->gtFlags & GTF_ICON_SIMD_COUNT) != 0)
{
printf(" vector element count");
}
#endif
if ((tree->IsReuseRegVal()) != 0)
{
printf(" reuse reg val");
}
}
gtDispFieldSeq(tree->AsIntCon()->gtFieldSeq);
break;
case GT_CNS_LNG:
printf(" 0x%016I64x", tree->AsLngCon()->gtLconVal);
break;
case GT_CNS_DBL:
if (*((__int64*)&tree->AsDblCon()->gtDconVal) == (__int64)I64(0x8000000000000000))
{
printf(" -0.00000");
}
else
{
printf(" %#.17g", tree->AsDblCon()->gtDconVal);
}
break;
case GT_CNS_STR:
printf("<string constant>");
break;
default:
assert(!"unexpected constant node");
}
}
//------------------------------------------------------------------------
// gtDispFieldSeq: "gtDispFieldSeq" that also prints "<NotAField>".
//
// Useful for printing zero-offset field sequences.
//
void Compiler::gtDispAnyFieldSeq(FieldSeqNode* fieldSeq)
{
if (fieldSeq == FieldSeqStore::NotAField())
{
printf(" Fseq<NotAField>");
return;
}
gtDispFieldSeq(fieldSeq);
}
//------------------------------------------------------------------------
// gtDispFieldSeq: Print out the fields in this field sequence.
//
void Compiler::gtDispFieldSeq(FieldSeqNode* pfsn)
{
if ((pfsn == nullptr) || (pfsn == FieldSeqStore::NotAField()))
{
return;
}
// Otherwise...
printf(" Fseq[");
while (pfsn != nullptr)
{
assert(pfsn != FieldSeqStore::NotAField()); // Can't exist in a field sequence list except alone
CORINFO_FIELD_HANDLE fldHnd = pfsn->GetFieldHandleValue();
// First check the "pseudo" field handles...
if (fldHnd == FieldSeqStore::FirstElemPseudoField)
{
printf("#FirstElem");
}
else if (fldHnd == FieldSeqStore::ConstantIndexPseudoField)
{
printf("#ConstantIndex");
}
else
{
printf("%s", eeGetFieldName(fldHnd));
}
pfsn = pfsn->GetNext();
if (pfsn != nullptr)
{
printf(", ");
}
}
printf("]");
}
//------------------------------------------------------------------------
// gtDispLeaf: Print a single leaf node to jitstdout.
//
// Arguments:
// tree - the tree to be printed
// indentStack - the specification for the current level of indentation & arcs
//
// Return Value:
// None.
//
// Notes:
// 'indentStack' may be null, in which case no indentation or arcs are printed
void Compiler::gtDispLeaf(GenTree* tree, IndentStack* indentStack)
{
if (tree->OperIsConst())
{
gtDispConst(tree);
return;
}
bool isLclFld = false;
switch (tree->gtOper)
{
case GT_LCL_FLD:
case GT_LCL_FLD_ADDR:
case GT_STORE_LCL_FLD:
isLclFld = true;
FALLTHROUGH;
case GT_PHI_ARG:
case GT_LCL_VAR:
case GT_LCL_VAR_ADDR:
case GT_STORE_LCL_VAR:
{
printf(" ");
const unsigned varNum = tree->AsLclVarCommon()->GetLclNum();
const LclVarDsc* varDsc = lvaGetDesc(varNum);
gtDispLclVar(varNum);
if (tree->AsLclVarCommon()->HasSsaName())
{
if (tree->gtFlags & GTF_VAR_USEASG)
{
assert(tree->gtFlags & GTF_VAR_DEF);
printf("ud:%d->%d", tree->AsLclVarCommon()->GetSsaNum(), GetSsaNumForLocalVarDef(tree));
}
else
{
printf("%s:%d", (tree->gtFlags & GTF_VAR_DEF) ? "d" : "u", tree->AsLclVarCommon()->GetSsaNum());
}
}
if (isLclFld)
{
printf("[+%u]", tree->AsLclFld()->GetLclOffs());
gtDispFieldSeq(tree->AsLclFld()->GetFieldSeq());
}
if (varDsc->lvRegister)
{
printf(" ");
varDsc->PrintVarReg();
}
else if (tree->InReg())
{
printf(" %s", compRegVarName(tree->GetRegNum()));
}
if (varDsc->lvPromoted)
{
if (!varTypeIsPromotable(varDsc) && !varDsc->lvUnusedStruct)
{
// Promoted implicit byrefs can get in this state while they are being rewritten
// in global morph.
}
else
{
for (unsigned i = varDsc->lvFieldLclStart; i < varDsc->lvFieldLclStart + varDsc->lvFieldCnt; ++i)
{
LclVarDsc* fieldVarDsc = lvaGetDesc(i);
const char* fieldName;
#if !defined(TARGET_64BIT)
if (varTypeIsLong(varDsc))
{
fieldName = (i == 0) ? "lo" : "hi";
}
else
#endif // !defined(TARGET_64BIT)
{
CORINFO_CLASS_HANDLE typeHnd = varDsc->GetStructHnd();
CORINFO_FIELD_HANDLE fldHnd =
info.compCompHnd->getFieldInClass(typeHnd, fieldVarDsc->lvFldOrdinal);
fieldName = eeGetFieldName(fldHnd);
}
printf("\n");
printf(" ");
printIndent(indentStack);
printf(" %-6s V%02u.%s (offs=0x%02x) -> ", varTypeName(fieldVarDsc->TypeGet()),
tree->AsLclVarCommon()->GetLclNum(), fieldName, fieldVarDsc->lvFldOffset);
gtDispLclVar(i);
if (fieldVarDsc->lvRegister)
{
printf(" ");
fieldVarDsc->PrintVarReg();
}
if (fieldVarDsc->lvTracked && fgLocalVarLivenessDone && tree->IsMultiRegLclVar() &&
tree->AsLclVar()->IsLastUse(i - varDsc->lvFieldLclStart))
{
printf(" (last use)");
}
}
}
}
else // a normal not-promoted lclvar
{
if (varDsc->lvTracked && fgLocalVarLivenessDone && ((tree->gtFlags & GTF_VAR_DEATH) != 0))
{
printf(" (last use)");
}
}
}
break;
case GT_JMP:
{
const char* methodName;
const char* className;
methodName = eeGetMethodName((CORINFO_METHOD_HANDLE)tree->AsVal()->gtVal1, &className);
printf(" %s.%s\n", className, methodName);
}
break;
case GT_CLS_VAR:
printf(" Hnd=%#x", dspPtr(tree->AsClsVar()->gtClsVarHnd));
gtDispFieldSeq(tree->AsClsVar()->gtFieldSeq);
break;
case GT_CLS_VAR_ADDR:
printf(" Hnd=%#x", dspPtr(tree->AsClsVar()->gtClsVarHnd));
break;
case GT_LABEL:
break;
case GT_FTN_ADDR:
{
const char* methodName;
const char* className;
methodName = eeGetMethodName((CORINFO_METHOD_HANDLE)tree->AsFptrVal()->gtFptrMethod, &className);
printf(" %s.%s\n", className, methodName);
}
break;
#if !defined(FEATURE_EH_FUNCLETS)
case GT_END_LFIN:
printf(" endNstLvl=%d", tree->AsVal()->gtVal1);
break;
#endif // !FEATURE_EH_FUNCLETS
// Vanilla leaves. No qualifying information available. So do nothing
case GT_NO_OP:
case GT_START_NONGC:
case GT_START_PREEMPTGC:
case GT_PROF_HOOK:
case GT_CATCH_ARG:
case GT_MEMORYBARRIER:
case GT_ARGPLACE:
case GT_PINVOKE_PROLOG:
case GT_JMPTABLE:
break;
case GT_RET_EXPR:
{
GenTree* const associatedTree = tree->AsRetExpr()->gtInlineCandidate;
printf("(inl return %s ", tree->IsCall() ? " from call" : "expr");
printTreeID(associatedTree);
printf(")");
}
break;
case GT_PHYSREG:
printf(" %s", getRegName(tree->AsPhysReg()->gtSrcReg));
break;
case GT_IL_OFFSET:
printf(" ");
tree->AsILOffset()->gtStmtDI.Dump(true);
break;
case GT_JCC:
case GT_SETCC:
printf(" cond=%s", tree->AsCC()->gtCondition.Name());
break;
case GT_JCMP:
printf(" cond=%s%s", (tree->gtFlags & GTF_JCMP_TST) ? "TEST_" : "",
(tree->gtFlags & GTF_JCMP_EQ) ? "EQ" : "NE");
break;
default:
assert(!"don't know how to display tree leaf node");
}
}
//------------------------------------------------------------------------
// gtDispLeaf: Print a child node to jitstdout.
//
// Arguments:
// tree - the tree to be printed
// indentStack - the specification for the current level of indentation & arcs
// arcType - the type of arc to use for this child
// msg - a contextual method (i.e. from the parent) to print
// topOnly - a boolean indicating whether to print the children, or just the top node
//
// Return Value:
// None.
//
// Notes:
// 'indentStack' may be null, in which case no indentation or arcs are printed
// 'msg' has a default value of null
// 'topOnly' is an optional argument that defaults to false
void Compiler::gtDispChild(GenTree* child,
IndentStack* indentStack,
IndentInfo arcType,
_In_opt_ const char* msg, /* = nullptr */
bool topOnly) /* = false */
{
indentStack->Push(arcType);
gtDispTree(child, indentStack, msg, topOnly);
indentStack->Pop();
}
#ifdef FEATURE_SIMD
// Intrinsic Id to name map
extern const char* const simdIntrinsicNames[] = {
#define SIMD_INTRINSIC(mname, inst, id, name, r, ac, arg1, arg2, arg3, t1, t2, t3, t4, t5, t6, t7, t8, t9, t10) name,
#include "simdintrinsiclist.h"
};
#endif // FEATURE_SIMD
/*****************************************************************************/
void Compiler::gtDispTree(GenTree* tree,
IndentStack* indentStack, /* = nullptr */
_In_ _In_opt_z_ const char* msg, /* = nullptr */
bool topOnly, /* = false */
bool isLIR) /* = false */
{
if (tree == nullptr)
{
printf(" [%08X] <NULL>\n", tree);
printf(""); // null string means flush
return;
}
if (indentStack == nullptr)
{
indentStack = new (this, CMK_DebugOnly) IndentStack(this);
}
if (IsUninitialized(tree))
{
/* Value used to initalize nodes */
printf("Uninitialized tree node!\n");
return;
}
if (tree->gtOper >= GT_COUNT)
{
gtDispNode(tree, indentStack, msg, isLIR);
printf("Bogus operator!\n");
return;
}
/* Is tree a leaf node? */
if (tree->OperIsLeaf() || tree->OperIsLocalStore()) // local stores used to be leaves
{
gtDispNode(tree, indentStack, msg, isLIR);
gtDispLeaf(tree, indentStack);
gtDispCommonEndLine(tree);
if (tree->OperIsLocalStore() && !topOnly)
{
gtDispChild(tree->AsOp()->gtOp1, indentStack, IINone);
}
return;
}
// Determine what kind of arc to propagate.
IndentInfo myArc = IINone;
IndentInfo lowerArc = IINone;
if (indentStack->Depth() > 0)
{
myArc = indentStack->Pop();
switch (myArc)
{
case IIArcBottom:
indentStack->Push(IIArc);
lowerArc = IINone;
break;
case IIArc:
indentStack->Push(IIArc);
lowerArc = IIArc;
break;
case IIArcTop:
indentStack->Push(IINone);
lowerArc = IIArc;
break;
case IINone:
indentStack->Push(IINone);
lowerArc = IINone;
break;
default:
unreached();
break;
}
}
/* Is it a 'simple' unary/binary operator? */
const char* childMsg = nullptr;
if (tree->OperIsSimple())
{
// Now, get the right type of arc for this node
if (myArc != IINone)
{
indentStack->Pop();
indentStack->Push(myArc);
}
gtDispNode(tree, indentStack, msg, isLIR);
// Propagate lowerArc to the lower children.
if (indentStack->Depth() > 0)
{
(void)indentStack->Pop();
indentStack->Push(lowerArc);
}
if (tree->gtOper == GT_CAST)
{
/* Format a message that explains the effect of this GT_CAST */
var_types fromType = genActualType(tree->AsCast()->CastOp()->TypeGet());
var_types toType = tree->CastToType();
var_types finalType = tree->TypeGet();
/* if GTF_UNSIGNED is set then force fromType to an unsigned type */
if (tree->gtFlags & GTF_UNSIGNED)
{
fromType = varTypeToUnsigned(fromType);
}
if (finalType != toType)
{
printf(" %s <-", varTypeName(finalType));
}
printf(" %s <- %s", varTypeName(toType), varTypeName(fromType));
}
if (tree->OperIsBlkOp())
{
if (tree->OperIsCopyBlkOp())
{
printf(" (copy)");
}
else if (tree->OperIsInitBlkOp())
{
printf(" (init)");
}
if (tree->OperIsStoreBlk() && (tree->AsBlk()->gtBlkOpKind != GenTreeBlk::BlkOpKindInvalid))
{
switch (tree->AsBlk()->gtBlkOpKind)
{
#ifdef TARGET_XARCH
case GenTreeBlk::BlkOpKindRepInstr:
printf(" (RepInstr)");
break;
#endif
case GenTreeBlk::BlkOpKindUnroll:
printf(" (Unroll)");
break;
#ifndef TARGET_X86
case GenTreeBlk::BlkOpKindHelper:
printf(" (Helper)");
break;
#endif
default:
unreached();
}
}
}
#if FEATURE_PUT_STRUCT_ARG_STK
else if (tree->OperGet() == GT_PUTARG_STK)
{
const GenTreePutArgStk* putArg = tree->AsPutArgStk();
#if !defined(DEBUG_ARG_SLOTS)
printf(" (%d stackByteSize), (%d byteOffset)", putArg->GetStackByteSize(), putArg->getArgOffset());
#else
if (compMacOsArm64Abi())
{
printf(" (%d stackByteSize), (%d byteOffset)", putArg->GetStackByteSize(), putArg->getArgOffset());
}
else
{
printf(" (%d slots), (%d stackByteSize), (%d slot), (%d byteOffset)", putArg->gtNumSlots,
putArg->GetStackByteSize(), putArg->gtSlotNum, putArg->getArgOffset());
}
#endif
if (putArg->gtPutArgStkKind != GenTreePutArgStk::Kind::Invalid)
{
switch (putArg->gtPutArgStkKind)
{
case GenTreePutArgStk::Kind::RepInstr:
printf(" (RepInstr)");
break;
case GenTreePutArgStk::Kind::PartialRepInstr:
printf(" (PartialRepInstr)");
break;
case GenTreePutArgStk::Kind::Unroll:
printf(" (Unroll)");
break;
case GenTreePutArgStk::Kind::Push:
printf(" (Push)");
break;
case GenTreePutArgStk::Kind::PushAllSlots:
printf(" (PushAllSlots)");
break;
default:
unreached();
}
}
}
#if FEATURE_ARG_SPLIT
else if (tree->OperGet() == GT_PUTARG_SPLIT)
{
const GenTreePutArgSplit* putArg = tree->AsPutArgSplit();
#if !defined(DEBUG_ARG_SLOTS)
printf(" (%d stackByteSize), (%d numRegs)", putArg->GetStackByteSize(), putArg->gtNumRegs);
#else
if (compMacOsArm64Abi())
{
printf(" (%d stackByteSize), (%d numRegs)", putArg->GetStackByteSize(), putArg->gtNumRegs);
}
else
{
printf(" (%d slots), (%d stackByteSize), (%d numRegs)", putArg->gtNumSlots, putArg->GetStackByteSize(),
putArg->gtNumRegs);
}
#endif
}
#endif // FEATURE_ARG_SPLIT
#endif // FEATURE_PUT_STRUCT_ARG_STK
if (tree->OperIs(GT_FIELD))
{
if (FieldSeqStore::IsPseudoField(tree->AsField()->gtFldHnd))
{
printf(" #PseudoField:0x%x", tree->AsField()->gtFldOffset);
}
else
{
printf(" %s", eeGetFieldName(tree->AsField()->gtFldHnd), 0);
}
}
if (tree->gtOper == GT_INTRINSIC)
{
GenTreeIntrinsic* intrinsic = tree->AsIntrinsic();
switch (intrinsic->gtIntrinsicName)
{
case NI_System_Math_Abs:
printf(" abs");
break;
case NI_System_Math_Acos:
printf(" acos");
break;
case NI_System_Math_Acosh:
printf(" acosh");
break;
case NI_System_Math_Asin:
printf(" asin");
break;
case NI_System_Math_Asinh:
printf(" asinh");
break;
case NI_System_Math_Atan:
printf(" atan");
break;
case NI_System_Math_Atanh:
printf(" atanh");
break;
case NI_System_Math_Atan2:
printf(" atan2");
break;
case NI_System_Math_Cbrt:
printf(" cbrt");
break;
case NI_System_Math_Ceiling:
printf(" ceiling");
break;
case NI_System_Math_Cos:
printf(" cos");
break;
case NI_System_Math_Cosh:
printf(" cosh");
break;
case NI_System_Math_Exp:
printf(" exp");
break;
case NI_System_Math_Floor:
printf(" floor");
break;
case NI_System_Math_FMod:
printf(" fmod");
break;
case NI_System_Math_FusedMultiplyAdd:
printf(" fma");
break;
case NI_System_Math_ILogB:
printf(" ilogb");
break;
case NI_System_Math_Log:
printf(" log");
break;
case NI_System_Math_Log2:
printf(" log2");
break;
case NI_System_Math_Log10:
printf(" log10");
break;
case NI_System_Math_Max:
printf(" max");
break;
case NI_System_Math_Min:
printf(" min");
break;
case NI_System_Math_Pow:
printf(" pow");
break;
case NI_System_Math_Round:
printf(" round");
break;
case NI_System_Math_Sin:
printf(" sin");
break;
case NI_System_Math_Sinh:
printf(" sinh");
break;
case NI_System_Math_Sqrt:
printf(" sqrt");
break;
case NI_System_Math_Tan:
printf(" tan");
break;
case NI_System_Math_Tanh:
printf(" tanh");
break;
case NI_System_Math_Truncate:
printf(" truncate");
break;
case NI_System_Object_GetType:
printf(" objGetType");
break;
case NI_System_Runtime_CompilerServices_RuntimeHelpers_IsKnownConstant:
printf(" isKnownConst");
break;
default:
unreached();
}
}
gtDispCommonEndLine(tree);
if (!topOnly)
{
if (tree->AsOp()->gtOp1 != nullptr)
{
// Label the child of the GT_COLON operator
// op1 is the else part
if (tree->gtOper == GT_COLON)
{
childMsg = "else";
}
else if (tree->gtOper == GT_QMARK)
{
childMsg = " if";
}
gtDispChild(tree->AsOp()->gtOp1, indentStack,
(tree->gtGetOp2IfPresent() == nullptr) ? IIArcBottom : IIArc, childMsg, topOnly);
}
if (tree->gtGetOp2IfPresent())
{
// Label the childMsgs of the GT_COLON operator
// op2 is the then part
if (tree->gtOper == GT_COLON)
{
childMsg = "then";
}
gtDispChild(tree->AsOp()->gtOp2, indentStack, IIArcBottom, childMsg, topOnly);
}
}
return;
}
// Now, get the right type of arc for this node
if (myArc != IINone)
{
indentStack->Pop();
indentStack->Push(myArc);
}
gtDispNode(tree, indentStack, msg, isLIR);
// Propagate lowerArc to the lower children.
if (indentStack->Depth() > 0)
{
(void)indentStack->Pop();
indentStack->Push(lowerArc);
}
// See what kind of a special operator we have here, and handle its special children.
switch (tree->gtOper)
{
case GT_FIELD_LIST:
gtDispCommonEndLine(tree);
if (!topOnly)
{
for (GenTreeFieldList::Use& use : tree->AsFieldList()->Uses())
{
char offset[32];
sprintf_s(offset, sizeof(offset), "ofs %u", use.GetOffset());
gtDispChild(use.GetNode(), indentStack, (use.GetNext() == nullptr) ? IIArcBottom : IIArc, offset);
}
}
break;
case GT_PHI:
gtDispCommonEndLine(tree);
if (!topOnly)
{
for (GenTreePhi::Use& use : tree->AsPhi()->Uses())
{
char block[32];
sprintf_s(block, sizeof(block), "pred " FMT_BB, use.GetNode()->AsPhiArg()->gtPredBB->bbNum);
gtDispChild(use.GetNode(), indentStack, (use.GetNext() == nullptr) ? IIArcBottom : IIArc, block);
}
}
break;
case GT_CALL:
{
GenTreeCall* call = tree->AsCall();
GenTree* lastChild = nullptr;
call->VisitOperands([&lastChild](GenTree* operand) -> GenTree::VisitResult {
lastChild = operand;
return GenTree::VisitResult::Continue;
});
if (call->gtCallType != CT_INDIRECT)
{
const char* methodName;
const char* className;
methodName = eeGetMethodName(call->gtCallMethHnd, &className);
printf(" %s.%s", className, methodName);
}
if ((call->gtFlags & GTF_CALL_UNMANAGED) && (call->gtCallMoreFlags & GTF_CALL_M_FRAME_VAR_DEATH))
{
printf(" (FramesRoot last use)");
}
if (((call->gtFlags & GTF_CALL_INLINE_CANDIDATE) != 0) && (call->gtInlineCandidateInfo != nullptr) &&
(call->gtInlineCandidateInfo->exactContextHnd != nullptr))
{
printf(" (exactContextHnd=0x%p)", dspPtr(call->gtInlineCandidateInfo->exactContextHnd));
}
gtDispCommonEndLine(tree);
if (!topOnly)
{
char buf[64];
char* bufp;
bufp = &buf[0];
if ((call->gtCallThisArg != nullptr) && !call->gtCallThisArg->GetNode()->OperIs(GT_NOP, GT_ARGPLACE))
{
if (call->gtCallThisArg->GetNode()->OperIs(GT_ASG))
{
sprintf_s(bufp, sizeof(buf), "this SETUP%c", 0);
}
else
{
sprintf_s(bufp, sizeof(buf), "this in %s%c", compRegVarName(REG_ARG_0), 0);
}
gtDispChild(call->gtCallThisArg->GetNode(), indentStack,
(call->gtCallThisArg->GetNode() == lastChild) ? IIArcBottom : IIArc, bufp, topOnly);
}
if (call->gtCallArgs)
{
gtDispArgList(call, lastChild, indentStack);
}
if (call->gtCallType == CT_INDIRECT)
{
gtDispChild(call->gtCallAddr, indentStack, (call->gtCallAddr == lastChild) ? IIArcBottom : IIArc,
"calli tgt", topOnly);
}
if (call->gtControlExpr != nullptr)
{
gtDispChild(call->gtControlExpr, indentStack,
(call->gtControlExpr == lastChild) ? IIArcBottom : IIArc, "control expr", topOnly);
}
int lateArgIndex = 0;
for (GenTreeCall::Use& use : call->LateArgs())
{
IndentInfo arcType = (use.GetNext() == nullptr) ? IIArcBottom : IIArc;
gtGetLateArgMsg(call, use.GetNode(), lateArgIndex, bufp, sizeof(buf));
gtDispChild(use.GetNode(), indentStack, arcType, bufp, topOnly);
lateArgIndex++;
}
}
}
break;
#if defined(FEATURE_SIMD) || defined(FEATURE_HW_INTRINSICS)
#if defined(FEATURE_SIMD)
case GT_SIMD:
#endif
#if defined(FEATURE_HW_INTRINSICS)
case GT_HWINTRINSIC:
#endif
#if defined(FEATURE_SIMD)
if (tree->OperIs(GT_SIMD))
{
printf(" %s %s", varTypeName(tree->AsSIMD()->GetSimdBaseType()),
simdIntrinsicNames[tree->AsSIMD()->GetSIMDIntrinsicId()]);
}
#endif // defined(FEATURE_SIMD)
#if defined(FEATURE_HW_INTRINSICS)
if (tree->OperIs(GT_HWINTRINSIC))
{
printf(" %s %s", tree->AsHWIntrinsic()->GetSimdBaseType() == TYP_UNKNOWN
? ""
: varTypeName(tree->AsHWIntrinsic()->GetSimdBaseType()),
HWIntrinsicInfo::lookupName(tree->AsHWIntrinsic()->GetHWIntrinsicId()));
}
#endif // defined(FEATURE_HW_INTRINSICS)
gtDispCommonEndLine(tree);
if (!topOnly)
{
size_t index = 0;
size_t count = tree->AsMultiOp()->GetOperandCount();
for (GenTree* operand : tree->AsMultiOp()->Operands())
{
gtDispChild(operand, indentStack, ++index < count ? IIArc : IIArcBottom, nullptr, topOnly);
}
}
break;
#endif // defined(FEATURE_SIMD) || defined(FEATURE_HW_INTRINSICS)
case GT_ARR_ELEM:
gtDispCommonEndLine(tree);
if (!topOnly)
{
gtDispChild(tree->AsArrElem()->gtArrObj, indentStack, IIArc, nullptr, topOnly);
unsigned dim;
for (dim = 0; dim < tree->AsArrElem()->gtArrRank; dim++)
{
IndentInfo arcType = ((dim + 1) == tree->AsArrElem()->gtArrRank) ? IIArcBottom : IIArc;
gtDispChild(tree->AsArrElem()->gtArrInds[dim], indentStack, arcType, nullptr, topOnly);
}
}
break;
case GT_ARR_OFFSET:
gtDispCommonEndLine(tree);
if (!topOnly)
{
gtDispChild(tree->AsArrOffs()->gtOffset, indentStack, IIArc, nullptr, topOnly);
gtDispChild(tree->AsArrOffs()->gtIndex, indentStack, IIArc, nullptr, topOnly);
gtDispChild(tree->AsArrOffs()->gtArrObj, indentStack, IIArcBottom, nullptr, topOnly);
}
break;
case GT_CMPXCHG:
gtDispCommonEndLine(tree);
if (!topOnly)
{
gtDispChild(tree->AsCmpXchg()->gtOpLocation, indentStack, IIArc, nullptr, topOnly);
gtDispChild(tree->AsCmpXchg()->gtOpValue, indentStack, IIArc, nullptr, topOnly);
gtDispChild(tree->AsCmpXchg()->gtOpComparand, indentStack, IIArcBottom, nullptr, topOnly);
}
break;
case GT_STORE_DYN_BLK:
if (tree->OperIsCopyBlkOp())
{
printf(" (copy)");
}
else if (tree->OperIsInitBlkOp())
{
printf(" (init)");
}
gtDispCommonEndLine(tree);
if (!topOnly)
{
gtDispChild(tree->AsStoreDynBlk()->Addr(), indentStack, IIArc, nullptr, topOnly);
if (tree->AsStoreDynBlk()->Data() != nullptr)
{
gtDispChild(tree->AsStoreDynBlk()->Data(), indentStack, IIArc, nullptr, topOnly);
}
gtDispChild(tree->AsStoreDynBlk()->gtDynamicSize, indentStack, IIArcBottom, nullptr, topOnly);
}
break;
default:
printf("<DON'T KNOW HOW TO DISPLAY THIS NODE> :");
printf(""); // null string means flush
break;
}
}
//------------------------------------------------------------------------
// gtGetArgMsg: Construct a message about the given argument
//
// Arguments:
// call - The call for which 'arg' is an argument
// arg - The argument for which a message should be constructed
// argNum - The ordinal number of the arg in the argument list
// bufp - A pointer to the buffer into which the message is written
// bufLength - The length of the buffer pointed to by bufp
//
// Return Value:
// No return value, but bufp is written.
//
// Assumptions:
// 'call' must be a call node
// 'arg' must be an argument to 'call' (else gtArgEntryByNode will assert)
void Compiler::gtGetArgMsg(GenTreeCall* call, GenTree* arg, unsigned argNum, char* bufp, unsigned bufLength)
{
if (call->gtCallLateArgs != nullptr)
{
fgArgTabEntry* curArgTabEntry = gtArgEntryByArgNum(call, argNum);
assert(curArgTabEntry);
if (arg->gtFlags & GTF_LATE_ARG)
{
sprintf_s(bufp, bufLength, "arg%d SETUP%c", argNum, 0);
}
else
{
#ifdef TARGET_ARM
if (curArgTabEntry->IsSplit())
{
regNumber firstReg = curArgTabEntry->GetRegNum();
if (curArgTabEntry->numRegs == 1)
{
sprintf_s(bufp, bufLength, "arg%d %s out+%02x%c", argNum, compRegVarName(firstReg),
(curArgTabEntry->slotNum) * TARGET_POINTER_SIZE, 0);
}
else
{
regNumber lastReg = REG_STK;
char separator = (curArgTabEntry->numRegs == 2) ? ',' : '-';
if (curArgTabEntry->IsHfaRegArg())
{
unsigned lastRegNum = genMapFloatRegNumToRegArgNum(firstReg) + curArgTabEntry->numRegs - 1;
lastReg = genMapFloatRegArgNumToRegNum(lastRegNum);
}
else
{
unsigned lastRegNum = genMapIntRegNumToRegArgNum(firstReg) + curArgTabEntry->numRegs - 1;
lastReg = genMapIntRegArgNumToRegNum(lastRegNum);
}
sprintf_s(bufp, bufLength, "arg%d %s%c%s out+%02x%c", argNum, compRegVarName(firstReg), separator,
compRegVarName(lastReg), (curArgTabEntry->slotNum) * TARGET_POINTER_SIZE, 0);
}
return;
}
#endif // TARGET_ARM
#if FEATURE_FIXED_OUT_ARGS
sprintf_s(bufp, bufLength, "arg%d out+%02x%c", argNum, curArgTabEntry->GetByteOffset(), 0);
#else
sprintf_s(bufp, bufLength, "arg%d on STK%c", argNum, 0);
#endif
}
}
else
{
sprintf_s(bufp, bufLength, "arg%d%c", argNum, 0);
}
}
//------------------------------------------------------------------------
// gtGetLateArgMsg: Construct a message about the given argument
//
// Arguments:
// call - The call for which 'arg' is an argument
// argx - The argument for which a message should be constructed
// lateArgIndex - The ordinal number of the arg in the lastArg list
// bufp - A pointer to the buffer into which the message is written
// bufLength - The length of the buffer pointed to by bufp
//
// Return Value:
// No return value, but bufp is written.
//
// Assumptions:
// 'call' must be a call node
// 'arg' must be an argument to 'call' (else gtArgEntryByNode will assert)
void Compiler::gtGetLateArgMsg(GenTreeCall* call, GenTree* argx, int lateArgIndex, char* bufp, unsigned bufLength)
{
assert(!argx->IsArgPlaceHolderNode()); // No place holders nodes are in gtCallLateArgs;
fgArgTabEntry* curArgTabEntry = gtArgEntryByLateArgIndex(call, lateArgIndex);
assert(curArgTabEntry);
regNumber argReg = curArgTabEntry->GetRegNum();
#if FEATURE_FIXED_OUT_ARGS
if (argReg == REG_STK)
{
sprintf_s(bufp, bufLength, "arg%d in out+%02x%c", curArgTabEntry->argNum, curArgTabEntry->GetByteOffset(), 0);
}
else
#endif
{
if (curArgTabEntry->use == call->gtCallThisArg)
{
sprintf_s(bufp, bufLength, "this in %s%c", compRegVarName(argReg), 0);
}
#ifdef TARGET_ARM
else if (curArgTabEntry->IsSplit())
{
regNumber firstReg = curArgTabEntry->GetRegNum();
unsigned argNum = curArgTabEntry->argNum;
if (curArgTabEntry->numRegs == 1)
{
sprintf_s(bufp, bufLength, "arg%d %s out+%02x%c", argNum, compRegVarName(firstReg),
(curArgTabEntry->slotNum) * TARGET_POINTER_SIZE, 0);
}
else
{
regNumber lastReg = REG_STK;
char separator = (curArgTabEntry->numRegs == 2) ? ',' : '-';
if (curArgTabEntry->IsHfaRegArg())
{
unsigned lastRegNum = genMapFloatRegNumToRegArgNum(firstReg) + curArgTabEntry->numRegs - 1;
lastReg = genMapFloatRegArgNumToRegNum(lastRegNum);
}
else
{
unsigned lastRegNum = genMapIntRegNumToRegArgNum(firstReg) + curArgTabEntry->numRegs - 1;
lastReg = genMapIntRegArgNumToRegNum(lastRegNum);
}
sprintf_s(bufp, bufLength, "arg%d %s%c%s out+%02x%c", argNum, compRegVarName(firstReg), separator,
compRegVarName(lastReg), (curArgTabEntry->slotNum) * TARGET_POINTER_SIZE, 0);
}
return;
}
#endif // TARGET_ARM
else
{
#if FEATURE_MULTIREG_ARGS
if (curArgTabEntry->numRegs >= 2)
{
char separator = (curArgTabEntry->numRegs == 2) ? ',' : '-';
sprintf_s(bufp, bufLength, "arg%d %s%c%s%c", curArgTabEntry->argNum, compRegVarName(argReg), separator,
compRegVarName(curArgTabEntry->GetRegNum(curArgTabEntry->numRegs - 1)), 0);
}
else
#endif
{
sprintf_s(bufp, bufLength, "arg%d in %s%c", curArgTabEntry->argNum, compRegVarName(argReg), 0);
}
}
}
}
//------------------------------------------------------------------------
// gtDispArgList: Dump the tree for a call arg list
//
// Arguments:
// call - the call to dump arguments for
// lastCallOperand - the call's last operand (to determine the arc types)
// indentStack - the specification for the current level of indentation & arcs
//
// Return Value:
// None.
//
void Compiler::gtDispArgList(GenTreeCall* call, GenTree* lastCallOperand, IndentStack* indentStack)
{
unsigned argnum = 0;
if (call->gtCallThisArg != nullptr)
{
argnum++;
}
for (GenTreeCall::Use& use : call->Args())
{
GenTree* argNode = use.GetNode();
if (!argNode->IsNothingNode() && !argNode->IsArgPlaceHolderNode())
{
char buf[256];
gtGetArgMsg(call, argNode, argnum, buf, sizeof(buf));
gtDispChild(argNode, indentStack, (argNode == lastCallOperand) ? IIArcBottom : IIArc, buf, false);
}
argnum++;
}
}
// gtDispStmt: Print a statement to jitstdout.
//
// Arguments:
// stmt - the statement to be printed;
// msg - an additional message to print before the statement.
//
void Compiler::gtDispStmt(Statement* stmt, const char* msg /* = nullptr */)
{
if (opts.compDbgInfo)
{
if (msg != nullptr)
{
printf("%s ", msg);
}
printStmtID(stmt);
printf(" ( ");
const DebugInfo& di = stmt->GetDebugInfo();
// For statements in the root we display just the location without the
// inline context info.
if (di.GetInlineContext() == nullptr || di.GetInlineContext()->IsRoot())
{
di.GetLocation().Dump();
}
else
{
stmt->GetDebugInfo().Dump(false);
}
printf(" ... ");
IL_OFFSET lastILOffs = stmt->GetLastILOffset();
if (lastILOffs == BAD_IL_OFFSET)
{
printf("???");
}
else
{
printf("0x%03X", lastILOffs);
}
printf(" )");
DebugInfo par;
if (stmt->GetDebugInfo().GetParent(&par))
{
printf(" <- ");
par.Dump(true);
}
printf("\n");
}
gtDispTree(stmt->GetRootNode());
}
//------------------------------------------------------------------------
// gtDispBlockStmts: dumps all statements inside `block`.
//
// Arguments:
// block - the block to display statements for.
//
void Compiler::gtDispBlockStmts(BasicBlock* block)
{
for (Statement* const stmt : block->Statements())
{
gtDispStmt(stmt);
printf("\n");
}
}
//------------------------------------------------------------------------
// Compiler::gtDispRange: dumps a range of LIR.
//
// Arguments:
// range - the range of LIR to display.
//
void Compiler::gtDispRange(LIR::ReadOnlyRange const& range)
{
for (GenTree* node : range)
{
gtDispLIRNode(node);
}
}
//------------------------------------------------------------------------
// Compiler::gtDispTreeRange: dumps the LIR range that contains all of the
// nodes in the dataflow tree rooted at a given
// node.
//
// Arguments:
// containingRange - the LIR range that contains the root node.
// tree - the root of the dataflow tree.
//
void Compiler::gtDispTreeRange(LIR::Range& containingRange, GenTree* tree)
{
bool unused;
gtDispRange(containingRange.GetTreeRange(tree, &unused));
}
//------------------------------------------------------------------------
// Compiler::gtDispLIRNode: dumps a single LIR node.
//
// Arguments:
// node - the LIR node to dump.
// prefixMsg - an optional prefix for each line of output.
//
void Compiler::gtDispLIRNode(GenTree* node, const char* prefixMsg /* = nullptr */)
{
auto displayOperand = [](GenTree* operand, const char* message, IndentInfo operandArc, IndentStack& indentStack,
size_t prefixIndent) {
assert(operand != nullptr);
assert(message != nullptr);
if (prefixIndent != 0)
{
printf("%*s", (int)prefixIndent, "");
}
// 50 spaces for alignment
printf("%-50s", "");
#if FEATURE_SET_FLAGS
// additional flag enlarges the flag field by one character
printf(" ");
#endif
indentStack.Push(operandArc);
indentStack.print();
indentStack.Pop();
operandArc = IIArc;
printf(" t%-5d %-6s %s\n", operand->gtTreeID, varTypeName(operand->TypeGet()), message);
};
IndentStack indentStack(this);
size_t prefixIndent = 0;
if (prefixMsg != nullptr)
{
prefixIndent = strlen(prefixMsg);
}
const int bufLength = 256;
char buf[bufLength];
const bool nodeIsCall = node->IsCall();
// Visit operands
IndentInfo operandArc = IIArcTop;
for (GenTree* operand : node->Operands())
{
if (operand->IsArgPlaceHolderNode() || !operand->IsValue())
{
// Either of these situations may happen with calls.
continue;
}
if (nodeIsCall)
{
GenTreeCall* call = node->AsCall();
if ((call->gtCallThisArg != nullptr) && (operand == call->gtCallThisArg->GetNode()))
{
sprintf_s(buf, sizeof(buf), "this in %s", compRegVarName(REG_ARG_0));
displayOperand(operand, buf, operandArc, indentStack, prefixIndent);
}
else if (operand == call->gtCallAddr)
{
displayOperand(operand, "calli tgt", operandArc, indentStack, prefixIndent);
}
else if (operand == call->gtControlExpr)
{
displayOperand(operand, "control expr", operandArc, indentStack, prefixIndent);
}
else if (operand == call->gtCallCookie)
{
displayOperand(operand, "cookie", operandArc, indentStack, prefixIndent);
}
else
{
fgArgTabEntry* curArgTabEntry = gtArgEntryByNode(call, operand);
assert(curArgTabEntry);
if (!curArgTabEntry->isLateArg())
{
gtGetArgMsg(call, operand, curArgTabEntry->argNum, buf, sizeof(buf));
}
else
{
gtGetLateArgMsg(call, operand, curArgTabEntry->GetLateArgInx(), buf, sizeof(buf));
}
displayOperand(operand, buf, operandArc, indentStack, prefixIndent);
}
}
else if (node->OperIs(GT_STORE_DYN_BLK))
{
if (operand == node->AsBlk()->Addr())
{
displayOperand(operand, "lhs", operandArc, indentStack, prefixIndent);
}
else if (operand == node->AsBlk()->Data())
{
displayOperand(operand, "rhs", operandArc, indentStack, prefixIndent);
}
else
{
assert(operand == node->AsStoreDynBlk()->gtDynamicSize);
displayOperand(operand, "size", operandArc, indentStack, prefixIndent);
}
}
else if (node->OperIs(GT_ASG))
{
if (operand == node->gtGetOp1())
{
displayOperand(operand, "lhs", operandArc, indentStack, prefixIndent);
}
else
{
displayOperand(operand, "rhs", operandArc, indentStack, prefixIndent);
}
}
else
{
displayOperand(operand, "", operandArc, indentStack, prefixIndent);
}
operandArc = IIArc;
}
// Visit the operator
if (prefixMsg != nullptr)
{
printf("%s", prefixMsg);
}
const bool topOnly = true;
const bool isLIR = true;
gtDispTree(node, &indentStack, nullptr, topOnly, isLIR);
}
/*****************************************************************************/
#endif // DEBUG
/*****************************************************************************
*
* Check if the given node can be folded,
* and call the methods to perform the folding
*/
GenTree* Compiler::gtFoldExpr(GenTree* tree)
{
unsigned kind = tree->OperKind();
/* We must have a simple operation to fold */
// If we're in CSE, it's not safe to perform tree
// folding given that it can will potentially
// change considered CSE candidates.
if (optValnumCSE_phase)
{
return tree;
}
if (!(kind & GTK_SMPOP))
{
return tree;
}
GenTree* op1 = tree->AsOp()->gtOp1;
/* Filter out non-foldable trees that can have constant children */
assert(kind & (GTK_UNOP | GTK_BINOP));
switch (tree->gtOper)
{
case GT_RETFILT:
case GT_RETURN:
case GT_IND:
return tree;
default:
break;
}
/* try to fold the current node */
if ((kind & GTK_UNOP) && op1)
{
if (op1->OperIsConst())
{
return gtFoldExprConst(tree);
}
}
else if ((kind & GTK_BINOP) && op1 && tree->AsOp()->gtOp2 &&
// Don't take out conditionals for debugging
(opts.OptimizationEnabled() || !tree->OperIsCompare()))
{
GenTree* op2 = tree->AsOp()->gtOp2;
// The atomic operations are exempted here because they are never computable statically;
// one of their arguments is an address.
if (op1->OperIsConst() && op2->OperIsConst() && !tree->OperIsAtomicOp())
{
/* both nodes are constants - fold the expression */
return gtFoldExprConst(tree);
}
else if (op1->OperIsConst() || op2->OperIsConst())
{
/* at least one is a constant - see if we have a
* special operator that can use only one constant
* to fold - e.g. booleans */
return gtFoldExprSpecial(tree);
}
else if (tree->OperIsCompare())
{
/* comparisons of two local variables can sometimes be folded */
return gtFoldExprCompare(tree);
}
}
/* Return the original node (folded/bashed or not) */
return tree;
}
//------------------------------------------------------------------------
// gtFoldExprCall: see if a call is foldable
//
// Arguments:
// call - call to examine
//
// Returns:
// The original call if no folding happened.
// An alternative tree if folding happens.
//
// Notes:
// Checks for calls to Type.op_Equality, Type.op_Inequality, and
// Enum.HasFlag, and if the call is to one of these,
// attempts to optimize.
GenTree* Compiler::gtFoldExprCall(GenTreeCall* call)
{
// Can only fold calls to special intrinsics.
if ((call->gtCallMoreFlags & GTF_CALL_M_SPECIAL_INTRINSIC) == 0)
{
return call;
}
// Defer folding if not optimizing.
if (opts.OptimizationDisabled())
{
return call;
}
// Check for a new-style jit intrinsic.
const NamedIntrinsic ni = lookupNamedIntrinsic(call->gtCallMethHnd);
switch (ni)
{
case NI_System_Enum_HasFlag:
{
GenTree* thisOp = call->gtCallThisArg->GetNode();
GenTree* flagOp = call->gtCallArgs->GetNode();
GenTree* result = gtOptimizeEnumHasFlag(thisOp, flagOp);
if (result != nullptr)
{
return result;
}
break;
}
case NI_System_Type_op_Equality:
case NI_System_Type_op_Inequality:
{
noway_assert(call->TypeGet() == TYP_INT);
GenTree* op1 = call->gtCallArgs->GetNode();
GenTree* op2 = call->gtCallArgs->GetNext()->GetNode();
// If either operand is known to be a RuntimeType, this can be folded
GenTree* result = gtFoldTypeEqualityCall(ni == NI_System_Type_op_Equality, op1, op2);
if (result != nullptr)
{
return result;
}
break;
}
default:
break;
}
return call;
}
//------------------------------------------------------------------------
// gtFoldTypeEqualityCall: see if a (potential) type equality call is foldable
//
// Arguments:
// isEq -- is it == or != operator
// op1 -- first argument to call
// op2 -- second argument to call
//
// Returns:
// nulltpr if no folding happened.
// An alternative tree if folding happens.
//
// Notes:
// If either operand is known to be a a RuntimeType, then the type
// equality methods will simply check object identity and so we can
// fold the call into a simple compare of the call's operands.
GenTree* Compiler::gtFoldTypeEqualityCall(bool isEq, GenTree* op1, GenTree* op2)
{
if ((gtGetTypeProducerKind(op1) == TPK_Unknown) && (gtGetTypeProducerKind(op2) == TPK_Unknown))
{
return nullptr;
}
const genTreeOps simpleOp = isEq ? GT_EQ : GT_NE;
JITDUMP("\nFolding call to Type:op_%s to a simple compare via %s\n", isEq ? "Equality" : "Inequality",
GenTree::OpName(simpleOp));
GenTree* compare = gtNewOperNode(simpleOp, TYP_INT, op1, op2);
return compare;
}
/*****************************************************************************
*
* Some comparisons can be folded:
*
* locA == locA
* classVarA == classVarA
* locA + locB == locB + locA
*
*/
GenTree* Compiler::gtFoldExprCompare(GenTree* tree)
{
GenTree* op1 = tree->AsOp()->gtOp1;
GenTree* op2 = tree->AsOp()->gtOp2;
assert(tree->OperIsCompare());
/* Filter out cases that cannot be folded here */
/* Do not fold floats or doubles (e.g. NaN != Nan) */
if (varTypeIsFloating(op1->TypeGet()))
{
return tree;
}
// Currently we can only fold when the two subtrees exactly match
// and everything is side effect free.
//
if (((tree->gtFlags & GTF_SIDE_EFFECT) != 0) || !GenTree::Compare(op1, op2, true))
{
// No folding.
//
return tree;
}
// GTF_ORDER_SIDEEFF here may indicate volatile subtrees.
// Or it may indicate a non-null assertion prop into an indir subtree.
//
// Check the operands.
//
if ((tree->gtFlags & GTF_ORDER_SIDEEFF) != 0)
{
// If op1 is "volatle" and op2 is not, we can still fold.
//
const bool op1MayBeVolatile = (op1->gtFlags & GTF_ORDER_SIDEEFF) != 0;
const bool op2MayBeVolatile = (op2->gtFlags & GTF_ORDER_SIDEEFF) != 0;
if (!op1MayBeVolatile || op2MayBeVolatile)
{
// No folding.
//
return tree;
}
}
GenTree* cons;
switch (tree->gtOper)
{
case GT_EQ:
case GT_LE:
case GT_GE:
cons = gtNewIconNode(true); /* Folds to GT_CNS_INT(true) */
break;
case GT_NE:
case GT_LT:
case GT_GT:
cons = gtNewIconNode(false); /* Folds to GT_CNS_INT(false) */
break;
default:
assert(!"Unexpected relOp");
return tree;
}
/* The node has beeen folded into 'cons' */
JITDUMP("\nFolding comparison with identical operands:\n");
DISPTREE(tree);
if (fgGlobalMorph)
{
fgMorphTreeDone(cons);
}
else
{
cons->gtNext = tree->gtNext;
cons->gtPrev = tree->gtPrev;
}
JITDUMP("Bashed to %s:\n", cons->AsIntConCommon()->IconValue() ? "true" : "false");
DISPTREE(cons);
return cons;
}
//------------------------------------------------------------------------
// gtCreateHandleCompare: generate a type handle comparison
//
// Arguments:
// oper -- comparison operation (equal/not equal)
// op1 -- first operand
// op2 -- second operand
// typeCheckInliningResult -- indicates how the comparison should happen
//
// Returns:
// Type comparison tree
//
GenTree* Compiler::gtCreateHandleCompare(genTreeOps oper,
GenTree* op1,
GenTree* op2,
CorInfoInlineTypeCheck typeCheckInliningResult)
{
// If we can compare pointers directly, just emit the binary operation
if (typeCheckInliningResult == CORINFO_INLINE_TYPECHECK_PASS)
{
return gtNewOperNode(oper, TYP_INT, op1, op2);
}
assert(typeCheckInliningResult == CORINFO_INLINE_TYPECHECK_USE_HELPER);
// Emit a call to a runtime helper
GenTreeCall::Use* helperArgs = gtNewCallArgs(op1, op2);
GenTree* ret = gtNewHelperCallNode(CORINFO_HELP_ARE_TYPES_EQUIVALENT, TYP_INT, helperArgs);
if (oper == GT_EQ)
{
ret = gtNewOperNode(GT_NE, TYP_INT, ret, gtNewIconNode(0, TYP_INT));
}
else
{
assert(oper == GT_NE);
ret = gtNewOperNode(GT_EQ, TYP_INT, ret, gtNewIconNode(0, TYP_INT));
}
return ret;
}
//------------------------------------------------------------------------
// gtFoldTypeCompare: see if a type comparison can be further simplified
//
// Arguments:
// tree -- tree possibly comparing types
//
// Returns:
// An alternative tree if folding happens.
// Original tree otherwise.
//
// Notes:
// Checks for
// typeof(...) == obj.GetType()
// typeof(...) == typeof(...)
// obj1.GetType() == obj2.GetType()
//
// And potentially optimizes away the need to obtain actual
// RuntimeType objects to do the comparison.
GenTree* Compiler::gtFoldTypeCompare(GenTree* tree)
{
// Only handle EQ and NE
// (maybe relop vs null someday)
const genTreeOps oper = tree->OperGet();
if ((oper != GT_EQ) && (oper != GT_NE))
{
return tree;
}
// Screen for the right kinds of operands
GenTree* const op1 = tree->AsOp()->gtOp1;
const TypeProducerKind op1Kind = gtGetTypeProducerKind(op1);
if (op1Kind == TPK_Unknown)
{
return tree;
}
GenTree* const op2 = tree->AsOp()->gtOp2;
const TypeProducerKind op2Kind = gtGetTypeProducerKind(op2);
if (op2Kind == TPK_Unknown)
{
return tree;
}
// If both types are created via handles, we can simply compare
// handles instead of the types that they'd create.
if ((op1Kind == TPK_Handle) && (op2Kind == TPK_Handle))
{
JITDUMP("Optimizing compare of types-from-handles to instead compare handles\n");
GenTree* op1ClassFromHandle = tree->AsOp()->gtOp1->AsCall()->gtCallArgs->GetNode();
GenTree* op2ClassFromHandle = tree->AsOp()->gtOp2->AsCall()->gtCallArgs->GetNode();
CORINFO_CLASS_HANDLE cls1Hnd = NO_CLASS_HANDLE;
CORINFO_CLASS_HANDLE cls2Hnd = NO_CLASS_HANDLE;
// Try and find class handles from op1 and op2
cls1Hnd = gtGetHelperArgClassHandle(op1ClassFromHandle);
cls2Hnd = gtGetHelperArgClassHandle(op2ClassFromHandle);
// If we have both class handles, try and resolve the type equality test completely.
bool resolveFailed = false;
if ((cls1Hnd != NO_CLASS_HANDLE) && (cls2Hnd != NO_CLASS_HANDLE))
{
JITDUMP("Asking runtime to compare %p (%s) and %p (%s) for equality\n", dspPtr(cls1Hnd),
eeGetClassName(cls1Hnd), dspPtr(cls2Hnd), eeGetClassName(cls2Hnd));
TypeCompareState s = info.compCompHnd->compareTypesForEquality(cls1Hnd, cls2Hnd);
if (s != TypeCompareState::May)
{
// Type comparison result is known.
const bool typesAreEqual = (s == TypeCompareState::Must);
const bool operatorIsEQ = (oper == GT_EQ);
const int compareResult = operatorIsEQ ^ typesAreEqual ? 0 : 1;
JITDUMP("Runtime reports comparison is known at jit time: %u\n", compareResult);
GenTree* result = gtNewIconNode(compareResult);
return result;
}
else
{
resolveFailed = true;
}
}
if (resolveFailed)
{
JITDUMP("Runtime reports comparison is NOT known at jit time\n");
}
else
{
JITDUMP("Could not find handle for %s%s\n", (cls1Hnd == NO_CLASS_HANDLE) ? " cls1" : "",
(cls2Hnd == NO_CLASS_HANDLE) ? " cls2" : "");
}
// We can't answer the equality comparison definitively at jit
// time, but can still simplify the comparison.
//
// Find out how we can compare the two handles.
// NOTE: We're potentially passing NO_CLASS_HANDLE, but the runtime knows what to do with it here.
CorInfoInlineTypeCheck inliningKind =
info.compCompHnd->canInlineTypeCheck(cls1Hnd, CORINFO_INLINE_TYPECHECK_SOURCE_TOKEN);
// If the first type needs helper, check the other type: it might be okay with a simple compare.
if (inliningKind == CORINFO_INLINE_TYPECHECK_USE_HELPER)
{
inliningKind = info.compCompHnd->canInlineTypeCheck(cls2Hnd, CORINFO_INLINE_TYPECHECK_SOURCE_TOKEN);
}
assert(inliningKind == CORINFO_INLINE_TYPECHECK_PASS || inliningKind == CORINFO_INLINE_TYPECHECK_USE_HELPER);
GenTree* compare = gtCreateHandleCompare(oper, op1ClassFromHandle, op2ClassFromHandle, inliningKind);
// Drop any now-irrelvant flags
compare->gtFlags |= tree->gtFlags & (GTF_RELOP_JMP_USED | GTF_DONT_CSE);
return compare;
}
if ((op1Kind == TPK_GetType) && (op2Kind == TPK_GetType))
{
GenTree* arg1;
if (op1->OperGet() == GT_INTRINSIC)
{
arg1 = op1->AsUnOp()->gtOp1;
}
else
{
arg1 = op1->AsCall()->gtCallThisArg->GetNode();
}
arg1 = gtNewMethodTableLookup(arg1);
GenTree* arg2;
if (op2->OperGet() == GT_INTRINSIC)
{
arg2 = op2->AsUnOp()->gtOp1;
}
else
{
arg2 = op2->AsCall()->gtCallThisArg->GetNode();
}
arg2 = gtNewMethodTableLookup(arg2);
CorInfoInlineTypeCheck inliningKind =
info.compCompHnd->canInlineTypeCheck(nullptr, CORINFO_INLINE_TYPECHECK_SOURCE_VTABLE);
assert(inliningKind == CORINFO_INLINE_TYPECHECK_PASS || inliningKind == CORINFO_INLINE_TYPECHECK_USE_HELPER);
GenTree* compare = gtCreateHandleCompare(oper, arg1, arg2, inliningKind);
// Drop any now-irrelvant flags
compare->gtFlags |= tree->gtFlags & (GTF_RELOP_JMP_USED | GTF_DONT_CSE);
return compare;
}
// If one operand creates a type from a handle and the other operand is fetching the type from an object,
// we can sometimes optimize the type compare into a simpler
// method table comparison.
//
// TODO: if other operand is null...
if (!(((op1Kind == TPK_GetType) && (op2Kind == TPK_Handle)) ||
((op1Kind == TPK_Handle) && (op2Kind == TPK_GetType))))
{
return tree;
}
GenTree* const opHandle = (op1Kind == TPK_Handle) ? op1 : op2;
GenTree* const opOther = (op1Kind == TPK_Handle) ? op2 : op1;
// Tunnel through the handle operand to get at the class handle involved.
GenTree* const opHandleArgument = opHandle->AsCall()->gtCallArgs->GetNode();
CORINFO_CLASS_HANDLE clsHnd = gtGetHelperArgClassHandle(opHandleArgument);
// If we couldn't find the class handle, give up.
if (clsHnd == NO_CLASS_HANDLE)
{
return tree;
}
// Ask the VM if this type can be equality tested by a simple method
// table comparison.
CorInfoInlineTypeCheck typeCheckInliningResult =
info.compCompHnd->canInlineTypeCheck(clsHnd, CORINFO_INLINE_TYPECHECK_SOURCE_VTABLE);
if (typeCheckInliningResult == CORINFO_INLINE_TYPECHECK_NONE)
{
return tree;
}
// We're good to go.
JITDUMP("Optimizing compare of obj.GetType()"
" and type-from-handle to compare method table pointer\n");
// opHandleArgument is the method table we're looking for.
GenTree* const knownMT = opHandleArgument;
// Fetch object method table from the object itself.
GenTree* objOp = nullptr;
// Note we may see intrinsified or regular calls to GetType
if (opOther->OperGet() == GT_INTRINSIC)
{
objOp = opOther->AsUnOp()->gtOp1;
}
else
{
objOp = opOther->AsCall()->gtCallThisArg->GetNode();
}
bool pIsExact = false;
bool pIsNonNull = false;
CORINFO_CLASS_HANDLE objCls = gtGetClassHandle(objOp, &pIsExact, &pIsNonNull);
// if both classes are "final" (e.g. System.String[]) we can replace the comparison
// with `true/false` + null check.
if ((objCls != NO_CLASS_HANDLE) && (pIsExact || impIsClassExact(objCls)))
{
TypeCompareState tcs = info.compCompHnd->compareTypesForEquality(objCls, clsHnd);
if (tcs != TypeCompareState::May)
{
const bool operatorIsEQ = oper == GT_EQ;
const bool typesAreEqual = tcs == TypeCompareState::Must;
GenTree* compareResult = gtNewIconNode((operatorIsEQ ^ typesAreEqual) ? 0 : 1);
if (!pIsNonNull)
{
// we still have to emit a null-check
// obj.GetType == typeof() -> (nullcheck) true/false
GenTree* nullcheck = gtNewNullCheck(objOp, compCurBB);
return gtNewOperNode(GT_COMMA, tree->TypeGet(), nullcheck, compareResult);
}
else if (objOp->gtFlags & GTF_ALL_EFFECT)
{
return gtNewOperNode(GT_COMMA, tree->TypeGet(), objOp, compareResult);
}
else
{
return compareResult;
}
}
}
// Fetch the method table from the object
GenTree* const objMT = gtNewMethodTableLookup(objOp);
// Compare the two method tables
GenTree* const compare = gtCreateHandleCompare(oper, objMT, knownMT, typeCheckInliningResult);
// Drop any now irrelevant flags
compare->gtFlags |= tree->gtFlags & (GTF_RELOP_JMP_USED | GTF_DONT_CSE);
// And we're done
return compare;
}
//------------------------------------------------------------------------
// gtGetHelperArgClassHandle: find the compile time class handle from
// a helper call argument tree
//
// Arguments:
// tree - tree that passes the handle to the helper
//
// Returns:
// The compile time class handle if known.
//
CORINFO_CLASS_HANDLE Compiler::gtGetHelperArgClassHandle(GenTree* tree)
{
CORINFO_CLASS_HANDLE result = NO_CLASS_HANDLE;
// Walk through any wrapping nop.
if ((tree->gtOper == GT_NOP) && (tree->gtType == TYP_I_IMPL))
{
tree = tree->AsOp()->gtOp1;
}
// The handle could be a literal constant
if ((tree->OperGet() == GT_CNS_INT) && (tree->TypeGet() == TYP_I_IMPL))
{
assert(tree->IsIconHandle(GTF_ICON_CLASS_HDL));
result = (CORINFO_CLASS_HANDLE)tree->AsIntCon()->gtCompileTimeHandle;
}
// Or the result of a runtime lookup
else if (tree->OperGet() == GT_RUNTIMELOOKUP)
{
result = tree->AsRuntimeLookup()->GetClassHandle();
}
// Or something reached indirectly
else if (tree->gtOper == GT_IND)
{
// The handle indirs we are looking for will be marked as non-faulting.
// Certain others (eg from refanytype) may not be.
if (tree->gtFlags & GTF_IND_NONFAULTING)
{
GenTree* handleTreeInternal = tree->AsOp()->gtOp1;
if ((handleTreeInternal->OperGet() == GT_CNS_INT) && (handleTreeInternal->TypeGet() == TYP_I_IMPL))
{
// These handle constants should be class handles.
assert(handleTreeInternal->IsIconHandle(GTF_ICON_CLASS_HDL));
result = (CORINFO_CLASS_HANDLE)handleTreeInternal->AsIntCon()->gtCompileTimeHandle;
}
}
}
return result;
}
//------------------------------------------------------------------------
// gtFoldExprSpecial -- optimize binary ops with one constant operand
//
// Arguments:
// tree - tree to optimize
//
// Return value:
// Tree (possibly modified at root or below), or a new tree
// Any new tree is fully morphed, if necessary.
//
GenTree* Compiler::gtFoldExprSpecial(GenTree* tree)
{
GenTree* op1 = tree->AsOp()->gtOp1;
GenTree* op2 = tree->AsOp()->gtOp2;
genTreeOps oper = tree->OperGet();
GenTree* op;
GenTree* cons;
ssize_t val;
assert(tree->OperKind() & GTK_BINOP);
/* Filter out operators that cannot be folded here */
if (oper == GT_CAST)
{
return tree;
}
/* We only consider TYP_INT for folding
* Do not fold pointer arithmetic (e.g. addressing modes!) */
if (oper != GT_QMARK && !varTypeIsIntOrI(tree->gtType))
{
return tree;
}
/* Find out which is the constant node */
if (op1->IsCnsIntOrI())
{
op = op2;
cons = op1;
}
else if (op2->IsCnsIntOrI())
{
op = op1;
cons = op2;
}
else
{
return tree;
}
/* Get the constant value */
val = cons->AsIntConCommon()->IconValue();
// Transforms that would drop op cannot be performed if op has side effects
bool opHasSideEffects = (op->gtFlags & GTF_SIDE_EFFECT) != 0;
// Helper function that creates a new IntCon node and morphs it, if required
auto NewMorphedIntConNode = [&](int value) -> GenTreeIntCon* {
GenTreeIntCon* icon = gtNewIconNode(value);
if (fgGlobalMorph)
{
fgMorphTreeDone(icon);
}
return icon;
};
// Here `op` is the non-constant operand, `cons` is the constant operand
// and `val` is the constant value.
switch (oper)
{
case GT_LE:
if (tree->IsUnsigned() && (val == 0) && (op1 == cons) && !opHasSideEffects)
{
// unsigned (0 <= x) is always true
op = NewMorphedIntConNode(1);
goto DONE_FOLD;
}
break;
case GT_GE:
if (tree->IsUnsigned() && (val == 0) && (op2 == cons) && !opHasSideEffects)
{
// unsigned (x >= 0) is always true
op = NewMorphedIntConNode(1);
goto DONE_FOLD;
}
break;
case GT_LT:
if (tree->IsUnsigned() && (val == 0) && (op2 == cons) && !opHasSideEffects)
{
// unsigned (x < 0) is always false
op = NewMorphedIntConNode(0);
goto DONE_FOLD;
}
break;
case GT_GT:
if (tree->IsUnsigned() && (val == 0) && (op1 == cons) && !opHasSideEffects)
{
// unsigned (0 > x) is always false
op = NewMorphedIntConNode(0);
goto DONE_FOLD;
}
FALLTHROUGH;
case GT_EQ:
case GT_NE:
// Optimize boxed value classes; these are always false. This IL is
// generated when a generic value is tested against null:
// <T> ... foo(T x) { ... if ((object)x == null) ...
if ((val == 0) && op->IsBoxedValue())
{
JITDUMP("\nAttempting to optimize BOX(valueType) %s null [%06u]\n", GenTree::OpName(oper),
dspTreeID(tree));
// We don't expect GT_GT with signed compares, and we
// can't predict the result if we do see it, since the
// boxed object addr could have its high bit set.
if ((oper == GT_GT) && !tree->IsUnsigned())
{
JITDUMP(" bailing; unexpected signed compare via GT_GT\n");
}
else
{
// The tree under the box must be side effect free
// since we will drop it if we optimize.
assert(!gtTreeHasSideEffects(op->AsBox()->BoxOp(), GTF_SIDE_EFFECT));
// See if we can optimize away the box and related statements.
GenTree* boxSourceTree = gtTryRemoveBoxUpstreamEffects(op);
bool didOptimize = (boxSourceTree != nullptr);
// If optimization succeeded, remove the box.
if (didOptimize)
{
// Set up the result of the compare.
int compareResult = 0;
if (oper == GT_GT)
{
// GT_GT(null, box) == false
// GT_GT(box, null) == true
compareResult = (op1 == op);
}
else if (oper == GT_EQ)
{
// GT_EQ(box, null) == false
// GT_EQ(null, box) == false
compareResult = 0;
}
else
{
assert(oper == GT_NE);
// GT_NE(box, null) == true
// GT_NE(null, box) == true
compareResult = 1;
}
JITDUMP("\nSuccess: replacing BOX(valueType) %s null with %d\n", GenTree::OpName(oper),
compareResult);
return NewMorphedIntConNode(compareResult);
}
}
}
else
{
return gtFoldBoxNullable(tree);
}
break;
case GT_ADD:
if (val == 0)
{
goto DONE_FOLD;
}
break;
case GT_MUL:
if (val == 1)
{
goto DONE_FOLD;
}
else if (val == 0)
{
/* Multiply by zero - return the 'zero' node, but not if side effects */
if (!opHasSideEffects)
{
op = cons;
goto DONE_FOLD;
}
}
break;
case GT_DIV:
case GT_UDIV:
if ((op2 == cons) && (val == 1) && !op1->OperIsConst())
{
goto DONE_FOLD;
}
break;
case GT_SUB:
if ((op2 == cons) && (val == 0) && !op1->OperIsConst())
{
goto DONE_FOLD;
}
break;
case GT_AND:
if (val == 0)
{
/* AND with zero - return the 'zero' node, but not if side effects */
if (!opHasSideEffects)
{
op = cons;
goto DONE_FOLD;
}
}
else
{
/* The GTF_BOOLEAN flag is set for nodes that are part
* of a boolean expression, thus all their children
* are known to evaluate to only 0 or 1 */
if (tree->gtFlags & GTF_BOOLEAN)
{
/* The constant value must be 1
* AND with 1 stays the same */
assert(val == 1);
goto DONE_FOLD;
}
}
break;
case GT_OR:
if (val == 0)
{
goto DONE_FOLD;
}
else if (tree->gtFlags & GTF_BOOLEAN)
{
/* The constant value must be 1 - OR with 1 is 1 */
assert(val == 1);
/* OR with one - return the 'one' node, but not if side effects */
if (!opHasSideEffects)
{
op = cons;
goto DONE_FOLD;
}
}
break;
case GT_LSH:
case GT_RSH:
case GT_RSZ:
case GT_ROL:
case GT_ROR:
if (val == 0)
{
if (op2 == cons)
{
goto DONE_FOLD;
}
else if (!opHasSideEffects)
{
op = cons;
goto DONE_FOLD;
}
}
break;
case GT_QMARK:
{
assert(op1 == cons && op2 == op && op2->gtOper == GT_COLON);
assert(op2->AsOp()->gtOp1 && op2->AsOp()->gtOp2);
assert(val == 0 || val == 1);
if (val)
{
op = op2->AsColon()->ThenNode();
}
else
{
op = op2->AsColon()->ElseNode();
}
// Clear colon flags only if the qmark itself is not conditionaly executed
if ((tree->gtFlags & GTF_COLON_COND) == 0)
{
fgWalkTreePre(&op, gtClearColonCond);
}
}
goto DONE_FOLD;
default:
break;
}
/* The node is not foldable */
return tree;
DONE_FOLD:
/* The node has beeen folded into 'op' */
// If there was an assigment update, we just morphed it into
// a use, update the flags appropriately
if (op->gtOper == GT_LCL_VAR)
{
assert(tree->OperIs(GT_ASG) || (op->gtFlags & (GTF_VAR_USEASG | GTF_VAR_DEF)) == 0);
op->gtFlags &= ~(GTF_VAR_USEASG | GTF_VAR_DEF);
}
JITDUMP("\nFolding binary operator with a constant operand:\n");
DISPTREE(tree);
JITDUMP("Transformed into:\n");
DISPTREE(op);
return op;
}
//------------------------------------------------------------------------
// gtFoldBoxNullable -- optimize a boxed nullable feeding a compare to zero
//
// Arguments:
// tree - binop tree to potentially optimize, must be
// GT_GT, GT_EQ, or GT_NE
//
// Return value:
// Tree (possibly modified below the root).
//
GenTree* Compiler::gtFoldBoxNullable(GenTree* tree)
{
assert(tree->OperKind() & GTK_BINOP);
assert(tree->OperIs(GT_GT, GT_EQ, GT_NE));
genTreeOps const oper = tree->OperGet();
if ((oper == GT_GT) && !tree->IsUnsigned())
{
return tree;
}
GenTree* const op1 = tree->AsOp()->gtOp1;
GenTree* const op2 = tree->AsOp()->gtOp2;
GenTree* op;
GenTree* cons;
if (op1->IsCnsIntOrI())
{
op = op2;
cons = op1;
}
else if (op2->IsCnsIntOrI())
{
op = op1;
cons = op2;
}
else
{
return tree;
}
ssize_t const val = cons->AsIntConCommon()->IconValue();
if (val != 0)
{
return tree;
}
if (!op->IsCall())
{
return tree;
}
GenTreeCall* const call = op->AsCall();
if (!call->IsHelperCall(this, CORINFO_HELP_BOX_NULLABLE))
{
return tree;
}
JITDUMP("\nAttempting to optimize BOX_NULLABLE(&x) %s null [%06u]\n", GenTree::OpName(oper), dspTreeID(tree));
// Get the address of the struct being boxed
GenTree* const arg = call->gtCallArgs->GetNext()->GetNode();
if (arg->OperIs(GT_ADDR) && ((arg->gtFlags & GTF_LATE_ARG) == 0))
{
CORINFO_CLASS_HANDLE nullableHnd = gtGetStructHandle(arg->AsOp()->gtOp1);
CORINFO_FIELD_HANDLE fieldHnd = info.compCompHnd->getFieldInClass(nullableHnd, 0);
// Replace the box with an access of the nullable 'hasValue' field.
JITDUMP("\nSuccess: replacing BOX_NULLABLE(&x) [%06u] with x.hasValue\n", dspTreeID(op));
GenTree* newOp = gtNewFieldRef(TYP_BOOL, fieldHnd, arg, 0);
if (op == op1)
{
tree->AsOp()->gtOp1 = newOp;
}
else
{
tree->AsOp()->gtOp2 = newOp;
}
cons->gtType = TYP_INT;
}
return tree;
}
//------------------------------------------------------------------------
// gtTryRemoveBoxUpstreamEffects: given an unused value type box,
// try and remove the upstream allocation and unnecessary parts of
// the copy.
//
// Arguments:
// op - the box node to optimize
// options - controls whether and how trees are modified
// (see notes)
//
// Return Value:
// A tree representing the original value to box, if removal
// is successful/possible (but see note). nullptr if removal fails.
//
// Notes:
// Value typed box gets special treatment because it has associated
// side effects that can be removed if the box result is not used.
//
// By default (options == BR_REMOVE_AND_NARROW) this method will
// try and remove unnecessary trees and will try and reduce remaning
// operations to the minimal set, possibly narrowing the width of
// loads from the box source if it is a struct.
//
// To perform a trial removal, pass BR_DONT_REMOVE. This can be
// useful to determine if this optimization should only be
// performed if some other conditions hold true.
//
// To remove but not alter the access to the box source, pass
// BR_REMOVE_BUT_NOT_NARROW.
//
// To remove and return the tree for the type handle used for
// the boxed newobj, pass BR_REMOVE_BUT_NOT_NARROW_WANT_TYPE_HANDLE.
// This can be useful when the only part of the box that is "live"
// is its type.
//
// If removal fails, is is possible that a subsequent pass may be
// able to optimize. Blocking side effects may now be minimized
// (null or bounds checks might have been removed) or might be
// better known (inline return placeholder updated with the actual
// return expression). So the box is perhaps best left as is to
// help trigger this re-examination.
GenTree* Compiler::gtTryRemoveBoxUpstreamEffects(GenTree* op, BoxRemovalOptions options)
{
assert(op->IsBoxedValue());
// grab related parts for the optimization
GenTreeBox* box = op->AsBox();
Statement* asgStmt = box->gtAsgStmtWhenInlinedBoxValue;
Statement* copyStmt = box->gtCopyStmtWhenInlinedBoxValue;
JITDUMP("gtTryRemoveBoxUpstreamEffects: %s to %s of BOX (valuetype)"
" [%06u] (assign/newobj " FMT_STMT " copy " FMT_STMT "\n",
(options == BR_DONT_REMOVE) ? "checking if it is possible" : "attempting",
(options == BR_MAKE_LOCAL_COPY) ? "make local unboxed version" : "remove side effects", dspTreeID(op),
asgStmt->GetID(), copyStmt->GetID());
// If we don't recognize the form of the assign, bail.
GenTree* asg = asgStmt->GetRootNode();
if (asg->gtOper != GT_ASG)
{
JITDUMP(" bailing; unexpected assignment op %s\n", GenTree::OpName(asg->gtOper));
return nullptr;
}
// If we're eventually going to return the type handle, remember it now.
GenTree* boxTypeHandle = nullptr;
if ((options == BR_REMOVE_AND_NARROW_WANT_TYPE_HANDLE) || (options == BR_DONT_REMOVE_WANT_TYPE_HANDLE))
{
GenTree* asgSrc = asg->AsOp()->gtOp2;
genTreeOps asgSrcOper = asgSrc->OperGet();
// Allocation may be via AllocObj or via helper call, depending
// on when this is invoked and whether the jit is using AllocObj
// for R2R allocations.
if (asgSrcOper == GT_ALLOCOBJ)
{
GenTreeAllocObj* allocObj = asgSrc->AsAllocObj();
boxTypeHandle = allocObj->AsOp()->gtOp1;
}
else if (asgSrcOper == GT_CALL)
{
GenTreeCall* newobjCall = asgSrc->AsCall();
GenTreeCall::Use* newobjArgs = newobjCall->gtCallArgs;
// In R2R expansions the handle may not be an explicit operand to the helper,
// so we can't remove the box.
if (newobjArgs == nullptr)
{
assert(newobjCall->IsHelperCall(this, CORINFO_HELP_READYTORUN_NEW));
JITDUMP(" bailing; newobj via R2R helper\n");
return nullptr;
}
boxTypeHandle = newobjArgs->GetNode();
}
else
{
unreached();
}
assert(boxTypeHandle != nullptr);
}
// If we don't recognize the form of the copy, bail.
GenTree* copy = copyStmt->GetRootNode();
if (copy->gtOper != GT_ASG)
{
// GT_RET_EXPR is a tolerable temporary failure.
// The jit will revisit this optimization after
// inlining is done.
if (copy->gtOper == GT_RET_EXPR)
{
JITDUMP(" bailing; must wait for replacement of copy %s\n", GenTree::OpName(copy->gtOper));
}
else
{
// Anything else is a missed case we should
// figure out how to handle. One known case
// is GT_COMMAs enclosing the GT_ASG we are
// looking for.
JITDUMP(" bailing; unexpected copy op %s\n", GenTree::OpName(copy->gtOper));
}
return nullptr;
}
// Handle case where we are optimizing the box into a local copy
if (options == BR_MAKE_LOCAL_COPY)
{
// Drill into the box to get at the box temp local and the box type
GenTree* boxTemp = box->BoxOp();
assert(boxTemp->IsLocal());
const unsigned boxTempLcl = boxTemp->AsLclVar()->GetLclNum();
assert(lvaTable[boxTempLcl].lvType == TYP_REF);
CORINFO_CLASS_HANDLE boxClass = lvaTable[boxTempLcl].lvClassHnd;
assert(boxClass != nullptr);
// Verify that the copyDst has the expected shape
// (blk|obj|ind (add (boxTempLcl, ptr-size)))
//
// The shape here is constrained to the patterns we produce
// over in impImportAndPushBox for the inlined box case.
GenTree* copyDst = copy->AsOp()->gtOp1;
if (!copyDst->OperIs(GT_BLK, GT_IND, GT_OBJ))
{
JITDUMP("Unexpected copy dest operator %s\n", GenTree::OpName(copyDst->gtOper));
return nullptr;
}
GenTree* copyDstAddr = copyDst->AsOp()->gtOp1;
if (copyDstAddr->OperGet() != GT_ADD)
{
JITDUMP("Unexpected copy dest address tree\n");
return nullptr;
}
GenTree* copyDstAddrOp1 = copyDstAddr->AsOp()->gtOp1;
if ((copyDstAddrOp1->OperGet() != GT_LCL_VAR) || (copyDstAddrOp1->AsLclVarCommon()->GetLclNum() != boxTempLcl))
{
JITDUMP("Unexpected copy dest address 1st addend\n");
return nullptr;
}
GenTree* copyDstAddrOp2 = copyDstAddr->AsOp()->gtOp2;
if (!copyDstAddrOp2->IsIntegralConst(TARGET_POINTER_SIZE))
{
JITDUMP("Unexpected copy dest address 2nd addend\n");
return nullptr;
}
// Screening checks have all passed. Do the transformation.
//
// Retype the box temp to be a struct
JITDUMP("Retyping box temp V%02u to struct %s\n", boxTempLcl, eeGetClassName(boxClass));
lvaTable[boxTempLcl].lvType = TYP_UNDEF;
const bool isUnsafeValueClass = false;
lvaSetStruct(boxTempLcl, boxClass, isUnsafeValueClass);
var_types boxTempType = lvaTable[boxTempLcl].lvType;
// Remove the newobj and assigment to box temp
JITDUMP("Bashing NEWOBJ [%06u] to NOP\n", dspTreeID(asg));
asg->gtBashToNOP();
// Update the copy from the value to be boxed to the box temp
GenTree* newDst = gtNewOperNode(GT_ADDR, TYP_BYREF, gtNewLclvNode(boxTempLcl, boxTempType));
copyDst->AsOp()->gtOp1 = newDst;
// Return the address of the now-struct typed box temp
GenTree* retValue = gtNewOperNode(GT_ADDR, TYP_BYREF, gtNewLclvNode(boxTempLcl, boxTempType));
return retValue;
}
// If the copy is a struct copy, make sure we know how to isolate
// any source side effects.
GenTree* copySrc = copy->AsOp()->gtOp2;
// If the copy source is from a pending inline, wait for it to resolve.
if (copySrc->gtOper == GT_RET_EXPR)
{
JITDUMP(" bailing; must wait for replacement of copy source %s\n", GenTree::OpName(copySrc->gtOper));
return nullptr;
}
bool hasSrcSideEffect = false;
bool isStructCopy = false;
if (gtTreeHasSideEffects(copySrc, GTF_SIDE_EFFECT))
{
hasSrcSideEffect = true;
if (varTypeIsStruct(copySrc->gtType))
{
isStructCopy = true;
if ((copySrc->gtOper != GT_OBJ) && (copySrc->gtOper != GT_IND) && (copySrc->gtOper != GT_FIELD))
{
// We don't know how to handle other cases, yet.
JITDUMP(" bailing; unexpected copy source struct op with side effect %s\n",
GenTree::OpName(copySrc->gtOper));
return nullptr;
}
}
}
// If this was a trial removal, we're done.
if (options == BR_DONT_REMOVE)
{
return copySrc;
}
if (options == BR_DONT_REMOVE_WANT_TYPE_HANDLE)
{
return boxTypeHandle;
}
// Otherwise, proceed with the optimization.
//
// Change the assignment expression to a NOP.
JITDUMP("\nBashing NEWOBJ [%06u] to NOP\n", dspTreeID(asg));
asg->gtBashToNOP();
// Change the copy expression so it preserves key
// source side effects.
JITDUMP("\nBashing COPY [%06u]", dspTreeID(copy));
if (!hasSrcSideEffect)
{
// If there were no copy source side effects just bash
// the copy to a NOP.
copy->gtBashToNOP();
JITDUMP(" to NOP; no source side effects.\n");
}
else if (!isStructCopy)
{
// For scalar types, go ahead and produce the
// value as the copy is fairly cheap and likely
// the optimizer can trim things down to just the
// minimal side effect parts.
copyStmt->SetRootNode(copySrc);
JITDUMP(" to scalar read via [%06u]\n", dspTreeID(copySrc));
}
else
{
// For struct types read the first byte of the
// source struct; there's no need to read the
// entire thing, and no place to put it.
assert(copySrc->OperIs(GT_OBJ, GT_IND, GT_FIELD));
copyStmt->SetRootNode(copySrc);
if (options == BR_REMOVE_AND_NARROW || options == BR_REMOVE_AND_NARROW_WANT_TYPE_HANDLE)
{
JITDUMP(" to read first byte of struct via modified [%06u]\n", dspTreeID(copySrc));
gtChangeOperToNullCheck(copySrc, compCurBB);
}
else
{
JITDUMP(" to read entire struct via modified [%06u]\n", dspTreeID(copySrc));
}
}
if (fgStmtListThreaded)
{
fgSetStmtSeq(asgStmt);
fgSetStmtSeq(copyStmt);
}
// Box effects were successfully optimized.
if (options == BR_REMOVE_AND_NARROW_WANT_TYPE_HANDLE)
{
return boxTypeHandle;
}
else
{
return copySrc;
}
}
//------------------------------------------------------------------------
// gtOptimizeEnumHasFlag: given the operands for a call to Enum.HasFlag,
// try and optimize the call to a simple and/compare tree.
//
// Arguments:
// thisOp - first argument to the call
// flagOp - second argument to the call
//
// Return Value:
// A new cmp/amd tree if successful. nullptr on failure.
//
// Notes:
// If successful, may allocate new temps and modify connected
// statements.
GenTree* Compiler::gtOptimizeEnumHasFlag(GenTree* thisOp, GenTree* flagOp)
{
JITDUMP("Considering optimizing call to Enum.HasFlag....\n");
// Operands must be boxes
if (!thisOp->IsBoxedValue() || !flagOp->IsBoxedValue())
{
JITDUMP("bailing, need both inputs to be BOXes\n");
return nullptr;
}
// Operands must have same type
bool isExactThis = false;
bool isNonNullThis = false;
CORINFO_CLASS_HANDLE thisHnd = gtGetClassHandle(thisOp, &isExactThis, &isNonNullThis);
if (thisHnd == nullptr)
{
JITDUMP("bailing, can't find type for 'this' operand\n");
return nullptr;
}
// A boxed thisOp should have exact type and non-null instance
assert(isExactThis);
assert(isNonNullThis);
bool isExactFlag = false;
bool isNonNullFlag = false;
CORINFO_CLASS_HANDLE flagHnd = gtGetClassHandle(flagOp, &isExactFlag, &isNonNullFlag);
if (flagHnd == nullptr)
{
JITDUMP("bailing, can't find type for 'flag' operand\n");
return nullptr;
}
// A boxed flagOp should have exact type and non-null instance
assert(isExactFlag);
assert(isNonNullFlag);
if (flagHnd != thisHnd)
{
JITDUMP("bailing, operand types differ\n");
return nullptr;
}
// If we have a shared type instance we can't safely check type
// equality, so bail.
DWORD classAttribs = info.compCompHnd->getClassAttribs(thisHnd);
if (classAttribs & CORINFO_FLG_SHAREDINST)
{
JITDUMP("bailing, have shared instance type\n");
return nullptr;
}
// Simulate removing the box for thisOP. We need to know that it can
// be safely removed before we can optimize.
GenTree* thisVal = gtTryRemoveBoxUpstreamEffects(thisOp, BR_DONT_REMOVE);
if (thisVal == nullptr)
{
// Note we may fail here if the this operand comes from
// a call. We should be able to retry this post-inlining.
JITDUMP("bailing, can't undo box of 'this' operand\n");
return nullptr;
}
// Do likewise with flagOp.
GenTree* flagVal = gtTryRemoveBoxUpstreamEffects(flagOp, BR_DONT_REMOVE);
if (flagVal == nullptr)
{
// Note we may fail here if the flag operand comes from
// a call. We should be able to retry this post-inlining.
JITDUMP("bailing, can't undo box of 'flag' operand\n");
return nullptr;
}
// Only proceed when both box sources have the same actual type.
// (this rules out long/int mismatches)
if (genActualType(thisVal->TypeGet()) != genActualType(flagVal->TypeGet()))
{
JITDUMP("bailing, pre-boxed values have different types\n");
return nullptr;
}
// Yes, both boxes can be cleaned up. Optimize.
JITDUMP("Optimizing call to Enum.HasFlag\n");
// Undo the boxing of the Ops and prepare to operate directly
// on the pre-boxed values.
thisVal = gtTryRemoveBoxUpstreamEffects(thisOp, BR_REMOVE_BUT_NOT_NARROW);
flagVal = gtTryRemoveBoxUpstreamEffects(flagOp, BR_REMOVE_BUT_NOT_NARROW);
// Our trial removals above should guarantee successful removals here.
assert(thisVal != nullptr);
assert(flagVal != nullptr);
assert(genActualType(thisVal->TypeGet()) == genActualType(flagVal->TypeGet()));
// Type to use for optimized check
var_types type = genActualType(thisVal->TypeGet());
// The thisVal and flagVal trees come from earlier statements.
//
// Unless they are invariant values, we need to evaluate them both
// to temps at those points to safely transmit the values here.
//
// Also we need to use the flag twice, so we need two trees for it.
GenTree* thisValOpt = nullptr;
GenTree* flagValOpt = nullptr;
GenTree* flagValOptCopy = nullptr;
if (thisVal->IsIntegralConst())
{
thisValOpt = gtClone(thisVal);
assert(thisValOpt != nullptr);
}
else
{
const unsigned thisTmp = lvaGrabTemp(true DEBUGARG("Enum:HasFlag this temp"));
GenTree* thisAsg = gtNewTempAssign(thisTmp, thisVal);
Statement* thisAsgStmt = thisOp->AsBox()->gtCopyStmtWhenInlinedBoxValue;
thisAsgStmt->SetRootNode(thisAsg);
thisValOpt = gtNewLclvNode(thisTmp, type);
}
if (flagVal->IsIntegralConst())
{
flagValOpt = gtClone(flagVal);
assert(flagValOpt != nullptr);
flagValOptCopy = gtClone(flagVal);
assert(flagValOptCopy != nullptr);
}
else
{
const unsigned flagTmp = lvaGrabTemp(true DEBUGARG("Enum:HasFlag flag temp"));
GenTree* flagAsg = gtNewTempAssign(flagTmp, flagVal);
Statement* flagAsgStmt = flagOp->AsBox()->gtCopyStmtWhenInlinedBoxValue;
flagAsgStmt->SetRootNode(flagAsg);
flagValOpt = gtNewLclvNode(flagTmp, type);
flagValOptCopy = gtNewLclvNode(flagTmp, type);
}
// Turn the call into (thisValTmp & flagTmp) == flagTmp.
GenTree* andTree = gtNewOperNode(GT_AND, type, thisValOpt, flagValOpt);
GenTree* cmpTree = gtNewOperNode(GT_EQ, TYP_INT, andTree, flagValOptCopy);
JITDUMP("Optimized call to Enum.HasFlag\n");
return cmpTree;
}
/*****************************************************************************
*
* Fold the given constant tree.
*/
#ifdef _PREFAST_
#pragma warning(push)
#pragma warning(disable : 21000) // Suppress PREFast warning about overly large function
#endif
GenTree* Compiler::gtFoldExprConst(GenTree* tree)
{
SSIZE_T i1, i2, itemp;
INT64 lval1, lval2, ltemp;
float f1, f2;
double d1, d2;
var_types switchType;
FieldSeqNode* fieldSeq = FieldSeqStore::NotAField(); // default unless we override it when folding
assert(tree->OperIsUnary() || tree->OperIsBinary());
GenTree* op1 = tree->gtGetOp1();
GenTree* op2 = tree->gtGetOp2IfPresent();
if (!opts.OptEnabled(CLFLG_CONSTANTFOLD))
{
return tree;
}
if (tree->OperIs(GT_NOP, GT_ALLOCOBJ, GT_RUNTIMELOOKUP))
{
return tree;
}
// This condition exists to preserve previous behavior.
// TODO-CQ: enable folding for bounds checks nodes.
if (tree->OperIs(GT_BOUNDS_CHECK))
{
return tree;
}
#ifdef FEATURE_SIMD
if (tree->OperIs(GT_SIMD))
{
return tree;
}
#endif // FEATURE_SIMD
#ifdef FEATURE_HW_INTRINSICS
if (tree->OperIs(GT_HWINTRINSIC))
{
return tree;
}
#endif
if (tree->OperIsUnary())
{
assert(op1->OperIsConst());
switch (op1->TypeGet())
{
case TYP_INT:
// Fold constant INT unary operator.
if (!op1->AsIntCon()->ImmedValCanBeFolded(this, tree->OperGet()))
{
return tree;
}
i1 = (INT32)op1->AsIntCon()->IconValue();
// If we fold a unary oper, then the folded constant
// is considered a ConstantIndexField if op1 was one.
if ((op1->AsIntCon()->gtFieldSeq != nullptr) && op1->AsIntCon()->gtFieldSeq->IsConstantIndexFieldSeq())
{
fieldSeq = op1->AsIntCon()->gtFieldSeq;
}
switch (tree->OperGet())
{
case GT_NOT:
i1 = ~i1;
break;
case GT_NEG:
i1 = -i1;
break;
case GT_BSWAP:
i1 = ((i1 >> 24) & 0xFF) | ((i1 >> 8) & 0xFF00) | ((i1 << 8) & 0xFF0000) |
((i1 << 24) & 0xFF000000);
break;
case GT_BSWAP16:
i1 = ((i1 >> 8) & 0xFF) | ((i1 << 8) & 0xFF00);
break;
case GT_CAST:
// assert (genActualType(tree->CastToType()) == tree->TypeGet());
if (tree->gtOverflow() &&
CheckedOps::CastFromIntOverflows((INT32)i1, tree->CastToType(), tree->IsUnsigned()))
{
goto INTEGRAL_OVF;
}
switch (tree->CastToType())
{
case TYP_BYTE:
i1 = INT32(INT8(i1));
goto CNS_INT;
case TYP_SHORT:
i1 = INT32(INT16(i1));
goto CNS_INT;
case TYP_USHORT:
i1 = INT32(UINT16(i1));
goto CNS_INT;
case TYP_BOOL:
case TYP_UBYTE:
i1 = INT32(UINT8(i1));
goto CNS_INT;
case TYP_UINT:
case TYP_INT:
goto CNS_INT;
case TYP_ULONG:
if (tree->IsUnsigned())
{
lval1 = UINT64(UINT32(i1));
}
else
{
lval1 = UINT64(INT32(i1));
}
goto CNS_LONG;
case TYP_LONG:
if (tree->IsUnsigned())
{
lval1 = INT64(UINT32(i1));
}
else
{
lval1 = INT64(INT32(i1));
}
goto CNS_LONG;
case TYP_FLOAT:
if (tree->IsUnsigned())
{
f1 = forceCastToFloat(UINT32(i1));
}
else
{
f1 = forceCastToFloat(INT32(i1));
}
d1 = f1;
goto CNS_DOUBLE;
case TYP_DOUBLE:
if (tree->IsUnsigned())
{
d1 = (double)UINT32(i1);
}
else
{
d1 = (double)INT32(i1);
}
goto CNS_DOUBLE;
default:
assert(!"Bad CastToType() in gtFoldExprConst() for a cast from int");
return tree;
}
default:
return tree;
}
goto CNS_INT;
case TYP_LONG:
// Fold constant LONG unary operator.
if (!op1->AsIntConCommon()->ImmedValCanBeFolded(this, tree->OperGet()))
{
return tree;
}
lval1 = op1->AsIntConCommon()->LngValue();
switch (tree->OperGet())
{
case GT_NOT:
lval1 = ~lval1;
break;
case GT_NEG:
lval1 = -lval1;
break;
case GT_BSWAP:
lval1 = ((lval1 >> 56) & 0xFF) | ((lval1 >> 40) & 0xFF00) | ((lval1 >> 24) & 0xFF0000) |
((lval1 >> 8) & 0xFF000000) | ((lval1 << 8) & 0xFF00000000) |
((lval1 << 24) & 0xFF0000000000) | ((lval1 << 40) & 0xFF000000000000) |
((lval1 << 56) & 0xFF00000000000000);
break;
case GT_CAST:
assert(tree->TypeIs(genActualType(tree->CastToType())));
if (tree->gtOverflow() &&
CheckedOps::CastFromLongOverflows(lval1, tree->CastToType(), tree->IsUnsigned()))
{
goto INTEGRAL_OVF;
}
switch (tree->CastToType())
{
case TYP_BYTE:
i1 = INT32(INT8(lval1));
goto CNS_INT;
case TYP_SHORT:
i1 = INT32(INT16(lval1));
goto CNS_INT;
case TYP_USHORT:
i1 = INT32(UINT16(lval1));
goto CNS_INT;
case TYP_UBYTE:
i1 = INT32(UINT8(lval1));
goto CNS_INT;
case TYP_INT:
i1 = INT32(lval1);
goto CNS_INT;
case TYP_UINT:
i1 = UINT32(lval1);
goto CNS_INT;
case TYP_ULONG:
case TYP_LONG:
goto CNS_LONG;
case TYP_FLOAT:
case TYP_DOUBLE:
if (tree->IsUnsigned() && (lval1 < 0))
{
d1 = FloatingPointUtils::convertUInt64ToDouble((unsigned __int64)lval1);
}
else
{
d1 = (double)lval1;
}
if (tree->CastToType() == TYP_FLOAT)
{
f1 = forceCastToFloat(d1); // truncate precision
d1 = f1;
}
goto CNS_DOUBLE;
default:
assert(!"Bad CastToType() in gtFoldExprConst() for a cast from long");
return tree;
}
default:
return tree;
}
goto CNS_LONG;
case TYP_FLOAT:
case TYP_DOUBLE:
assert(op1->OperIs(GT_CNS_DBL));
// Fold constant DOUBLE unary operator.
d1 = op1->AsDblCon()->gtDconVal;
switch (tree->OperGet())
{
case GT_NEG:
d1 = -d1;
break;
case GT_CAST:
f1 = forceCastToFloat(d1);
if ((op1->TypeIs(TYP_DOUBLE) && CheckedOps::CastFromDoubleOverflows(d1, tree->CastToType())) ||
(op1->TypeIs(TYP_FLOAT) && CheckedOps::CastFromFloatOverflows(f1, tree->CastToType())))
{
// The conversion overflows. The ECMA spec says, in III 3.27, that
// "...if overflow occurs converting a floating point type to an integer, ...,
// the value returned is unspecified." However, it would at least be
// desirable to have the same value returned for casting an overflowing
// constant to an int as would be obtained by passing that constant as
// a parameter and then casting that parameter to an int type.
// Don't fold overflowing converions, as the value returned by
// JIT's codegen doesn't always match with the C compiler's cast result.
// We want the behavior to be the same with or without folding.
return tree;
}
assert(tree->TypeIs(genActualType(tree->CastToType())));
switch (tree->CastToType())
{
case TYP_BYTE:
i1 = INT32(INT8(d1));
goto CNS_INT;
case TYP_SHORT:
i1 = INT32(INT16(d1));
goto CNS_INT;
case TYP_USHORT:
i1 = INT32(UINT16(d1));
goto CNS_INT;
case TYP_UBYTE:
i1 = INT32(UINT8(d1));
goto CNS_INT;
case TYP_INT:
i1 = INT32(d1);
goto CNS_INT;
case TYP_UINT:
i1 = forceCastToUInt32(d1);
goto CNS_INT;
case TYP_LONG:
lval1 = INT64(d1);
goto CNS_LONG;
case TYP_ULONG:
lval1 = FloatingPointUtils::convertDoubleToUInt64(d1);
goto CNS_LONG;
case TYP_FLOAT:
d1 = forceCastToFloat(d1);
goto CNS_DOUBLE;
case TYP_DOUBLE:
if (op1->TypeIs(TYP_FLOAT))
{
d1 = forceCastToFloat(d1); // Truncate precision.
}
goto CNS_DOUBLE; // Redundant cast.
default:
assert(!"Bad CastToType() in gtFoldExprConst() for a cast from double/float");
break;
}
return tree;
default:
return tree;
}
goto CNS_DOUBLE;
default:
// Not a foldable typ - e.g. RET const.
return tree;
}
}
// We have a binary operator.
assert(tree->OperIsBinary());
assert(op2 != nullptr);
assert(op1->OperIsConst());
assert(op2->OperIsConst());
if (tree->OperIs(GT_COMMA))
{
return op2;
}
switchType = op1->TypeGet();
// Normally we will just switch on op1 types, but for the case where
// only op2 is a GC type and op1 is not a GC type, we use the op2 type.
// This makes us handle this as a case of folding for GC type.
if (varTypeIsGC(op2->gtType) && !varTypeIsGC(op1->gtType))
{
switchType = op2->TypeGet();
}
switch (switchType)
{
// Fold constant REF of BYREF binary operator.
// These can only be comparisons or null pointers.
case TYP_REF:
// String nodes are an RVA at this point.
if (op1->OperIs(GT_CNS_STR) || op2->OperIs(GT_CNS_STR))
{
// Fold "ldstr" ==/!= null.
if (op2->IsIntegralConst(0))
{
if (tree->OperIs(GT_EQ))
{
i1 = 0;
goto FOLD_COND;
}
if (tree->OperIs(GT_NE) || (tree->OperIs(GT_GT) && tree->IsUnsigned()))
{
i1 = 1;
goto FOLD_COND;
}
}
return tree;
}
FALLTHROUGH;
case TYP_BYREF:
i1 = op1->AsIntConCommon()->IconValue();
i2 = op2->AsIntConCommon()->IconValue();
switch (tree->OperGet())
{
case GT_EQ:
i1 = (i1 == i2);
goto FOLD_COND;
case GT_NE:
i1 = (i1 != i2);
goto FOLD_COND;
case GT_ADD:
noway_assert(!tree->TypeIs(TYP_REF));
// We only fold a GT_ADD that involves a null reference.
if ((op1->TypeIs(TYP_REF) && (i1 == 0)) || (op2->TypeIs(TYP_REF) && (i2 == 0)))
{
JITDUMP("\nFolding operator with constant nodes into a constant:\n");
DISPTREE(tree);
// Fold into GT_IND of null byref.
tree->BashToConst(0, TYP_BYREF);
if (vnStore != nullptr)
{
fgValueNumberTreeConst(tree);
}
JITDUMP("\nFolded to null byref:\n");
DISPTREE(tree);
goto DONE;
}
break;
default:
break;
}
return tree;
// Fold constant INT binary operator.
case TYP_INT:
assert(tree->TypeIs(TYP_INT) || varTypeIsGC(tree) || tree->OperIs(GT_MKREFANY));
// No GC pointer types should be folded here...
assert(!varTypeIsGC(op1->TypeGet()) && !varTypeIsGC(op2->TypeGet()));
if (!op1->AsIntConCommon()->ImmedValCanBeFolded(this, tree->OperGet()))
{
return tree;
}
if (!op2->AsIntConCommon()->ImmedValCanBeFolded(this, tree->OperGet()))
{
return tree;
}
i1 = op1->AsIntConCommon()->IconValue();
i2 = op2->AsIntConCommon()->IconValue();
switch (tree->OperGet())
{
case GT_EQ:
i1 = (INT32(i1) == INT32(i2));
break;
case GT_NE:
i1 = (INT32(i1) != INT32(i2));
break;
case GT_LT:
if (tree->IsUnsigned())
{
i1 = (UINT32(i1) < UINT32(i2));
}
else
{
i1 = (INT32(i1) < INT32(i2));
}
break;
case GT_LE:
if (tree->IsUnsigned())
{
i1 = (UINT32(i1) <= UINT32(i2));
}
else
{
i1 = (INT32(i1) <= INT32(i2));
}
break;
case GT_GE:
if (tree->IsUnsigned())
{
i1 = (UINT32(i1) >= UINT32(i2));
}
else
{
i1 = (INT32(i1) >= INT32(i2));
}
break;
case GT_GT:
if (tree->IsUnsigned())
{
i1 = (UINT32(i1) > UINT32(i2));
}
else
{
i1 = (INT32(i1) > INT32(i2));
}
break;
case GT_ADD:
itemp = i1 + i2;
if (tree->gtOverflow() && CheckedOps::AddOverflows(INT32(i1), INT32(i2), tree->IsUnsigned()))
{
goto INTEGRAL_OVF;
}
i1 = itemp;
fieldSeq = GetFieldSeqStore()->Append(op1->AsIntCon()->gtFieldSeq, op2->AsIntCon()->gtFieldSeq);
break;
case GT_SUB:
itemp = i1 - i2;
if (tree->gtOverflow() && CheckedOps::SubOverflows(INT32(i1), INT32(i2), tree->IsUnsigned()))
{
goto INTEGRAL_OVF;
}
i1 = itemp;
break;
case GT_MUL:
itemp = i1 * i2;
if (tree->gtOverflow() && CheckedOps::MulOverflows(INT32(i1), INT32(i2), tree->IsUnsigned()))
{
goto INTEGRAL_OVF;
}
// For the very particular case of the "constant array index" pseudo-field, we
// assume that multiplication is by the field width, and preserves that field.
// This could obviously be made more robust by a more complicated set of annotations...
if ((op1->AsIntCon()->gtFieldSeq != nullptr) &&
op1->AsIntCon()->gtFieldSeq->IsConstantIndexFieldSeq())
{
assert(op2->AsIntCon()->gtFieldSeq == FieldSeqStore::NotAField());
fieldSeq = op1->AsIntCon()->gtFieldSeq;
}
else if ((op2->AsIntCon()->gtFieldSeq != nullptr) &&
op2->AsIntCon()->gtFieldSeq->IsConstantIndexFieldSeq())
{
assert(op1->AsIntCon()->gtFieldSeq == FieldSeqStore::NotAField());
fieldSeq = op2->AsIntCon()->gtFieldSeq;
}
i1 = itemp;
break;
case GT_OR:
i1 |= i2;
break;
case GT_XOR:
i1 ^= i2;
break;
case GT_AND:
i1 &= i2;
break;
case GT_LSH:
i1 <<= (i2 & 0x1f);
break;
case GT_RSH:
i1 >>= (i2 & 0x1f);
break;
case GT_RSZ:
// logical shift -> make it unsigned to not propagate the sign bit.
i1 = UINT32(i1) >> (i2 & 0x1f);
break;
case GT_ROL:
i1 = (i1 << (i2 & 0x1f)) | (UINT32(i1) >> ((32 - i2) & 0x1f));
break;
case GT_ROR:
i1 = (i1 << ((32 - i2) & 0x1f)) | (UINT32(i1) >> (i2 & 0x1f));
break;
// DIV and MOD can throw an exception - if the division is by 0
// or there is overflow - when dividing MIN by -1.
case GT_DIV:
case GT_MOD:
case GT_UDIV:
case GT_UMOD:
if (INT32(i2) == 0)
{
// Division by zero.
// We have to evaluate this expression and throw an exception.
return tree;
}
else if ((INT32(i2) == -1) && (UINT32(i1) == 0x80000000))
{
// Overflow Division.
// We have to evaluate this expression and throw an exception.
return tree;
}
if (tree->OperIs(GT_DIV))
{
i1 = INT32(i1) / INT32(i2);
}
else if (tree->OperIs(GT_MOD))
{
i1 = INT32(i1) % INT32(i2);
}
else if (tree->OperIs(GT_UDIV))
{
i1 = UINT32(i1) / UINT32(i2);
}
else
{
assert(tree->OperIs(GT_UMOD));
i1 = UINT32(i1) % UINT32(i2);
}
break;
default:
return tree;
}
// We get here after folding to a GT_CNS_INT type.
// change the node to the new type / value and make sure the node sizes are OK.
CNS_INT:
FOLD_COND:
JITDUMP("\nFolding operator with constant nodes into a constant:\n");
DISPTREE(tree);
// Also all conditional folding jumps here since the node hanging from
// GT_JTRUE has to be a GT_CNS_INT - value 0 or 1.
// Some operations are performed as 64 bit instead of 32 bit so the upper 32 bits
// need to be discarded. Since constant values are stored as ssize_t and the node
// has TYP_INT the result needs to be sign extended rather than zero extended.
tree->BashToConst(static_cast<int>(i1));
tree->AsIntCon()->gtFieldSeq = fieldSeq;
if (vnStore != nullptr)
{
fgValueNumberTreeConst(tree);
}
JITDUMP("Bashed to int constant:\n");
DISPTREE(tree);
goto DONE;
// Fold constant LONG binary operator.
case TYP_LONG:
// No GC pointer types should be folded here...
assert(!varTypeIsGC(op1->TypeGet()) && !varTypeIsGC(op2->TypeGet()));
// op1 is known to be a TYP_LONG, op2 is normally a TYP_LONG, unless we have a shift operator in which case
// it is a TYP_INT.
assert(op2->TypeIs(TYP_LONG, TYP_INT));
if (!op1->AsIntConCommon()->ImmedValCanBeFolded(this, tree->OperGet()))
{
return tree;
}
if (!op2->AsIntConCommon()->ImmedValCanBeFolded(this, tree->OperGet()))
{
return tree;
}
lval1 = op1->AsIntConCommon()->LngValue();
// For the shift operators we can have a op2 that is a TYP_INT.
// Thus we cannot just use LngValue(), as it will assert on 32 bit if op2 is not GT_CNS_LNG.
lval2 = op2->AsIntConCommon()->IntegralValue();
switch (tree->OperGet())
{
case GT_EQ:
i1 = (lval1 == lval2);
goto FOLD_COND;
case GT_NE:
i1 = (lval1 != lval2);
goto FOLD_COND;
case GT_LT:
if (tree->IsUnsigned())
{
i1 = (UINT64(lval1) < UINT64(lval2));
}
else
{
i1 = (lval1 < lval2);
}
goto FOLD_COND;
case GT_LE:
if (tree->IsUnsigned())
{
i1 = (UINT64(lval1) <= UINT64(lval2));
}
else
{
i1 = (lval1 <= lval2);
}
goto FOLD_COND;
case GT_GE:
if (tree->IsUnsigned())
{
i1 = (UINT64(lval1) >= UINT64(lval2));
}
else
{
i1 = (lval1 >= lval2);
}
goto FOLD_COND;
case GT_GT:
if (tree->IsUnsigned())
{
i1 = (UINT64(lval1) > UINT64(lval2));
}
else
{
i1 = (lval1 > lval2);
}
goto FOLD_COND;
case GT_ADD:
ltemp = lval1 + lval2;
if (tree->gtOverflow() && CheckedOps::AddOverflows(lval1, lval2, tree->IsUnsigned()))
{
goto INTEGRAL_OVF;
}
lval1 = ltemp;
#ifdef TARGET_64BIT
fieldSeq = GetFieldSeqStore()->Append(op1->AsIntCon()->gtFieldSeq, op2->AsIntCon()->gtFieldSeq);
#endif
break;
case GT_SUB:
ltemp = lval1 - lval2;
if (tree->gtOverflow() && CheckedOps::SubOverflows(lval1, lval2, tree->IsUnsigned()))
{
goto INTEGRAL_OVF;
}
lval1 = ltemp;
break;
case GT_MUL:
ltemp = lval1 * lval2;
if (tree->gtOverflow() && CheckedOps::MulOverflows(lval1, lval2, tree->IsUnsigned()))
{
goto INTEGRAL_OVF;
}
lval1 = ltemp;
break;
case GT_OR:
lval1 |= lval2;
break;
case GT_XOR:
lval1 ^= lval2;
break;
case GT_AND:
lval1 &= lval2;
break;
case GT_LSH:
lval1 <<= (lval2 & 0x3f);
break;
case GT_RSH:
lval1 >>= (lval2 & 0x3f);
break;
case GT_RSZ:
// logical shift -> make it unsigned to not propagate the sign bit.
lval1 = UINT64(lval1) >> (lval2 & 0x3f);
break;
case GT_ROL:
lval1 = (lval1 << (lval2 & 0x3f)) | (UINT64(lval1) >> ((64 - lval2) & 0x3f));
break;
case GT_ROR:
lval1 = (lval1 << ((64 - lval2) & 0x3f)) | (UINT64(lval1) >> (lval2 & 0x3f));
break;
// Both DIV and IDIV on x86 raise an exception for min_int (and min_long) / -1. So we preserve
// that behavior here.
case GT_DIV:
if (lval2 == 0)
{
return tree;
}
if ((UINT64(lval1) == UINT64(0x8000000000000000)) && (lval2 == INT64(-1)))
{
return tree;
}
lval1 /= lval2;
break;
case GT_MOD:
if (lval2 == 0)
{
return tree;
}
if ((UINT64(lval1) == UINT64(0x8000000000000000)) && (lval2 == INT64(-1)))
{
return tree;
}
lval1 %= lval2;
break;
case GT_UDIV:
if (lval2 == 0)
{
return tree;
}
if ((UINT64(lval1) == UINT64(0x8000000000000000)) && (lval2 == INT64(-1)))
{
return tree;
}
lval1 = UINT64(lval1) / UINT64(lval2);
break;
case GT_UMOD:
if (lval2 == 0)
{
return tree;
}
if ((UINT64(lval1) == UINT64(0x8000000000000000)) && (lval2 == INT64(-1)))
{
return tree;
}
lval1 = UINT64(lval1) % UINT64(lval2);
break;
default:
return tree;
}
CNS_LONG:
#if !defined(TARGET_64BIT)
if (fieldSeq != FieldSeqStore::NotAField())
{
assert(!"Field sequences on CNS_LNG nodes!?");
return tree;
}
#endif // !defined(TARGET_64BIT)
JITDUMP("\nFolding long operator with constant nodes into a constant:\n");
DISPTREE(tree);
assert((GenTree::s_gtNodeSizes[GT_CNS_NATIVELONG] == TREE_NODE_SZ_SMALL) ||
(tree->gtDebugFlags & GTF_DEBUG_NODE_LARGE));
tree->BashToConst(lval1);
#ifdef TARGET_64BIT
tree->AsIntCon()->gtFieldSeq = fieldSeq;
#endif
if (vnStore != nullptr)
{
fgValueNumberTreeConst(tree);
}
JITDUMP("Bashed to long constant:\n");
DISPTREE(tree);
goto DONE;
// Fold constant FLOAT or DOUBLE binary operator
case TYP_FLOAT:
case TYP_DOUBLE:
if (tree->gtOverflowEx())
{
return tree;
}
assert(op1->OperIs(GT_CNS_DBL));
d1 = op1->AsDblCon()->gtDconVal;
assert(varTypeIsFloating(op2->TypeGet()));
assert(op2->OperIs(GT_CNS_DBL));
d2 = op2->AsDblCon()->gtDconVal;
// Special case - check if we have NaN operands.
// For comparisons if not an unordered operation always return 0.
// For unordered operations (i.e. the GTF_RELOP_NAN_UN flag is set)
// the result is always true - return 1.
if (_isnan(d1) || _isnan(d2))
{
JITDUMP("Double operator(s) is NaN\n");
if (tree->OperIsCompare())
{
if (tree->gtFlags & GTF_RELOP_NAN_UN)
{
// Unordered comparison with NaN always succeeds.
i1 = 1;
goto FOLD_COND;
}
else
{
// Normal comparison with NaN always fails.
i1 = 0;
goto FOLD_COND;
}
}
}
switch (tree->OperGet())
{
case GT_EQ:
i1 = (d1 == d2);
goto FOLD_COND;
case GT_NE:
i1 = (d1 != d2);
goto FOLD_COND;
case GT_LT:
i1 = (d1 < d2);
goto FOLD_COND;
case GT_LE:
i1 = (d1 <= d2);
goto FOLD_COND;
case GT_GE:
i1 = (d1 >= d2);
goto FOLD_COND;
case GT_GT:
i1 = (d1 > d2);
goto FOLD_COND;
// Floating point arithmetic should be done in declared
// precision while doing constant folding. For this reason though TYP_FLOAT
// constants are stored as double constants, while performing float arithmetic,
// double constants should be converted to float. Here is an example case
// where performing arithmetic in double precision would lead to incorrect
// results.
//
// Example:
// float a = float.MaxValue;
// float b = a*a; This will produce +inf in single precision and 1.1579207543382391e+077 in double
// precision.
// flaot c = b/b; This will produce NaN in single precision and 1 in double precision.
case GT_ADD:
if (op1->TypeIs(TYP_FLOAT))
{
f1 = forceCastToFloat(d1);
f2 = forceCastToFloat(d2);
d1 = forceCastToFloat(f1 + f2);
}
else
{
d1 += d2;
}
break;
case GT_SUB:
if (op1->TypeIs(TYP_FLOAT))
{
f1 = forceCastToFloat(d1);
f2 = forceCastToFloat(d2);
d1 = forceCastToFloat(f1 - f2);
}
else
{
d1 -= d2;
}
break;
case GT_MUL:
if (op1->TypeIs(TYP_FLOAT))
{
f1 = forceCastToFloat(d1);
f2 = forceCastToFloat(d2);
d1 = forceCastToFloat(f1 * f2);
}
else
{
d1 *= d2;
}
break;
case GT_DIV:
// We do not fold division by zero, even for floating point.
// This is because the result will be platform-dependent for an expression like 0d / 0d.
if (d2 == 0)
{
return tree;
}
if (op1->TypeIs(TYP_FLOAT))
{
f1 = forceCastToFloat(d1);
f2 = forceCastToFloat(d2);
d1 = forceCastToFloat(f1 / f2);
}
else
{
d1 /= d2;
}
break;
default:
return tree;
}
CNS_DOUBLE:
JITDUMP("\nFolding fp operator with constant nodes into a fp constant:\n");
DISPTREE(tree);
assert((GenTree::s_gtNodeSizes[GT_CNS_DBL] == TREE_NODE_SZ_SMALL) ||
(tree->gtDebugFlags & GTF_DEBUG_NODE_LARGE));
tree->BashToConst(d1, tree->TypeGet());
if (vnStore != nullptr)
{
fgValueNumberTreeConst(tree);
}
JITDUMP("Bashed to fp constant:\n");
DISPTREE(tree);
goto DONE;
default:
// Not a foldable type.
return tree;
}
DONE:
// Make sure no side effect flags are set on this constant node.
tree->gtFlags &= ~GTF_ALL_EFFECT;
return tree;
INTEGRAL_OVF:
// This operation is going to cause an overflow exception. Morph into
// an overflow helper. Put a dummy constant value for code generation.
//
// We could remove all subsequent trees in the current basic block,
// unless this node is a child of GT_COLON
//
// NOTE: Since the folded value is not constant we should not change the
// "tree" node - otherwise we confuse the logic that checks if the folding
// was successful - instead use one of the operands, e.g. op1.
// Don't fold overflow operations if not global morph phase.
// The reason for this is that this optimization is replacing a gentree node
// with another new gentree node. Say a GT_CALL(arglist) has one 'arg'
// involving overflow arithmetic. During assertion prop, it is possible
// that the 'arg' could be constant folded and the result could lead to an
// overflow. In such a case 'arg' will get replaced with GT_COMMA node
// but fgMorphArgs() - see the logic around "if(lateArgsComputed)" - doesn't
// update args table. For this reason this optimization is enabled only
// for global morphing phase.
//
// TODO-CQ: Once fgMorphArgs() is fixed this restriction could be removed.
if (!fgGlobalMorph)
{
assert(tree->gtOverflow());
return tree;
}
var_types type = genActualType(tree->TypeGet());
op1 = type == TYP_LONG ? gtNewLconNode(0) : gtNewIconNode(0);
if (vnStore != nullptr)
{
op1->gtVNPair.SetBoth(vnStore->VNZeroForType(type));
}
JITDUMP("\nFolding binary operator with constant nodes into a comma throw:\n");
DISPTREE(tree);
// We will change the cast to a GT_COMMA and attach the exception helper as AsOp()->gtOp1.
// The constant expression zero becomes op2.
assert(tree->gtOverflow());
assert(tree->OperIs(GT_ADD, GT_SUB, GT_CAST, GT_MUL));
assert(op1 != nullptr);
op2 = op1;
op1 = gtNewHelperCallNode(CORINFO_HELP_OVERFLOW, TYP_VOID, gtNewCallArgs(gtNewIconNode(compCurBB->bbTryIndex)));
// op1 is a call to the JIT helper that throws an Overflow exception.
// Attach the ExcSet for VNF_OverflowExc(Void) to this call.
if (vnStore != nullptr)
{
op1->gtVNPair = vnStore->VNPWithExc(ValueNumPair(ValueNumStore::VNForVoid(), ValueNumStore::VNForVoid()),
vnStore->VNPExcSetSingleton(vnStore->VNPairForFunc(TYP_REF, VNF_OverflowExc,
vnStore->VNPForVoid())));
}
tree = gtNewOperNode(GT_COMMA, tree->TypeGet(), op1, op2);
return tree;
}
#ifdef _PREFAST_
#pragma warning(pop)
#endif
//------------------------------------------------------------------------
// gtNewTempAssign: Create an assignment of the given value to a temp.
//
// Arguments:
// tmp - local number for a compiler temp
// val - value to assign to the temp
// pAfterStmt - statement to insert any additional statements after
// ilOffset - il offset for new statements
// block - block to insert any additional statements in
//
// Return Value:
// Normally a new assignment node.
// However may return a nop node if val is simply a reference to the temp.
//
// Notes:
// Self-assignments may be represented via NOPs.
//
// May update the type of the temp, if it was previously unknown.
//
// May set compFloatingPointUsed.
GenTree* Compiler::gtNewTempAssign(
unsigned tmp, GenTree* val, Statement** pAfterStmt, const DebugInfo& di, BasicBlock* block)
{
// Self-assignment is a nop.
if (val->OperGet() == GT_LCL_VAR && val->AsLclVarCommon()->GetLclNum() == tmp)
{
return gtNewNothingNode();
}
LclVarDsc* varDsc = lvaGetDesc(tmp);
if (varDsc->TypeGet() == TYP_I_IMPL && val->TypeGet() == TYP_BYREF)
{
impBashVarAddrsToI(val);
}
var_types valTyp = val->TypeGet();
if (val->OperGet() == GT_LCL_VAR && lvaTable[val->AsLclVar()->GetLclNum()].lvNormalizeOnLoad())
{
valTyp = lvaGetRealType(val->AsLclVar()->GetLclNum());
val->gtType = valTyp;
}
var_types dstTyp = varDsc->TypeGet();
/* If the variable's lvType is not yet set then set it here */
if (dstTyp == TYP_UNDEF)
{
varDsc->lvType = dstTyp = genActualType(valTyp);
#if FEATURE_SIMD
if (varTypeIsSIMD(dstTyp))
{
varDsc->lvSIMDType = 1;
}
#endif
}
#ifdef DEBUG
// Make sure the actual types match.
if (genActualType(valTyp) != genActualType(dstTyp))
{
// Plus some other exceptions that are apparently legal:
// 1) TYP_REF or BYREF = TYP_I_IMPL
bool ok = false;
if (varTypeIsGC(dstTyp) && (valTyp == TYP_I_IMPL))
{
ok = true;
}
// 2) TYP_DOUBLE = TYP_FLOAT or TYP_FLOAT = TYP_DOUBLE
else if (varTypeIsFloating(dstTyp) && varTypeIsFloating(valTyp))
{
ok = true;
}
// 3) TYP_BYREF = TYP_REF when object stack allocation is enabled
else if (JitConfig.JitObjectStackAllocation() && (dstTyp == TYP_BYREF) && (valTyp == TYP_REF))
{
ok = true;
}
else if (!varTypeIsGC(dstTyp) && (genTypeSize(valTyp) == genTypeSize(dstTyp)))
{
// We can have assignments that require a change of register file, e.g. for arguments
// and call returns. Lowering and Codegen will handle these.
ok = true;
}
else if ((dstTyp == TYP_STRUCT) && (valTyp == TYP_INT))
{
// It could come from `ASG(struct, 0)` that was propagated to `RETURN struct(0)`,
// and now it is merging to a struct again.
assert(tmp == genReturnLocal);
ok = true;
}
else if (varTypeIsSIMD(dstTyp) && (valTyp == TYP_STRUCT))
{
assert(val->IsCall());
ok = true;
}
if (!ok)
{
gtDispTree(val);
assert(!"Incompatible types for gtNewTempAssign");
}
}
#endif
// Added this noway_assert for runtime\issue 44895, to protect against silent bad codegen
//
if ((dstTyp == TYP_STRUCT) && (valTyp == TYP_REF))
{
noway_assert(!"Incompatible types for gtNewTempAssign");
}
// Floating Point assignments can be created during inlining
// see "Zero init inlinee locals:" in fgInlinePrependStatements
// thus we may need to set compFloatingPointUsed to true here.
//
if (varTypeUsesFloatReg(dstTyp) && (compFloatingPointUsed == false))
{
compFloatingPointUsed = true;
}
/* Create the assignment node */
GenTree* asg;
GenTree* dest = gtNewLclvNode(tmp, dstTyp);
dest->gtFlags |= GTF_VAR_DEF;
// With first-class structs, we should be propagating the class handle on all non-primitive
// struct types. We don't have a convenient way to do that for all SIMD temps, since some
// internal trees use SIMD types that are not used by the input IL. In this case, we allow
// a null type handle and derive the necessary information about the type from its varType.
CORINFO_CLASS_HANDLE valStructHnd = gtGetStructHandleIfPresent(val);
if (varTypeIsStruct(varDsc) && (valStructHnd == NO_CLASS_HANDLE) && !varTypeIsSIMD(valTyp))
{
// There are 2 special cases:
// 1. we have lost classHandle from a FIELD node because the parent struct has overlapping fields,
// the field was transformed as IND opr GT_LCL_FLD;
// 2. we are propagation `ASG(struct V01, 0)` to `RETURN(struct V01)`, `CNT_INT` doesn't `structHnd`;
// in these cases, we can use the type of the merge return for the assignment.
assert(val->gtEffectiveVal(true)->OperIs(GT_IND, GT_LCL_FLD, GT_CNS_INT));
assert(tmp == genReturnLocal);
valStructHnd = lvaGetStruct(genReturnLocal);
assert(valStructHnd != NO_CLASS_HANDLE);
}
if ((valStructHnd != NO_CLASS_HANDLE) && val->IsConstInitVal())
{
asg = gtNewAssignNode(dest, val);
}
else if (varTypeIsStruct(varDsc) && ((valStructHnd != NO_CLASS_HANDLE) || varTypeIsSIMD(valTyp)))
{
// The struct value may be be a child of a GT_COMMA due to explicit null checks of indirs/fields.
GenTree* valx = val->gtEffectiveVal(/*commaOnly*/ true);
if (valStructHnd != NO_CLASS_HANDLE)
{
lvaSetStruct(tmp, valStructHnd, false);
}
else
{
assert(valx->gtOper != GT_OBJ);
}
dest->gtFlags |= GTF_DONT_CSE;
valx->gtFlags |= GTF_DONT_CSE;
asg = impAssignStruct(dest, val, valStructHnd, (unsigned)CHECK_SPILL_NONE, pAfterStmt, di, block);
}
else
{
// We may have a scalar type variable assigned a struct value, e.g. a 'genReturnLocal'
// when the ABI calls for returning a struct as a primitive type.
// TODO-1stClassStructs: When we stop "lying" about the types for ABI purposes, the
// 'genReturnLocal' should be the original struct type.
assert(!varTypeIsStruct(valTyp) || ((valStructHnd != NO_CLASS_HANDLE) &&
(typGetObjLayout(valStructHnd)->GetSize() == genTypeSize(varDsc))));
asg = gtNewAssignNode(dest, val);
}
if (compRationalIRForm)
{
Rationalizer::RewriteAssignmentIntoStoreLcl(asg->AsOp());
}
return asg;
}
/*****************************************************************************
*
* Create a helper call to access a COM field (iff 'assg' is non-zero this is
* an assignment and 'assg' is the new value).
*/
GenTree* Compiler::gtNewRefCOMfield(GenTree* objPtr,
CORINFO_RESOLVED_TOKEN* pResolvedToken,
CORINFO_ACCESS_FLAGS access,
CORINFO_FIELD_INFO* pFieldInfo,
var_types lclTyp,
CORINFO_CLASS_HANDLE structType,
GenTree* assg)
{
assert(pFieldInfo->fieldAccessor == CORINFO_FIELD_INSTANCE_HELPER ||
pFieldInfo->fieldAccessor == CORINFO_FIELD_INSTANCE_ADDR_HELPER ||
pFieldInfo->fieldAccessor == CORINFO_FIELD_STATIC_ADDR_HELPER);
/* If we can't access it directly, we need to call a helper function */
GenTreeCall::Use* args = nullptr;
var_types helperType = TYP_BYREF;
if (pFieldInfo->fieldAccessor == CORINFO_FIELD_INSTANCE_HELPER)
{
if (access & CORINFO_ACCESS_SET)
{
assert(assg != nullptr);
// helper needs pointer to struct, not struct itself
if (pFieldInfo->helper == CORINFO_HELP_SETFIELDSTRUCT)
{
assert(structType != nullptr);
assg = impGetStructAddr(assg, structType, (unsigned)CHECK_SPILL_ALL, true);
}
else if (lclTyp == TYP_DOUBLE && assg->TypeGet() == TYP_FLOAT)
{
assg = gtNewCastNode(TYP_DOUBLE, assg, false, TYP_DOUBLE);
}
else if (lclTyp == TYP_FLOAT && assg->TypeGet() == TYP_DOUBLE)
{
assg = gtNewCastNode(TYP_FLOAT, assg, false, TYP_FLOAT);
}
args = gtNewCallArgs(assg);
helperType = TYP_VOID;
}
else if (access & CORINFO_ACCESS_GET)
{
helperType = lclTyp;
// The calling convention for the helper does not take into
// account optimization of primitive structs.
if ((pFieldInfo->helper == CORINFO_HELP_GETFIELDSTRUCT) && !varTypeIsStruct(lclTyp))
{
helperType = TYP_STRUCT;
}
}
}
if (pFieldInfo->helper == CORINFO_HELP_GETFIELDSTRUCT || pFieldInfo->helper == CORINFO_HELP_SETFIELDSTRUCT)
{
assert(pFieldInfo->structType != nullptr);
args = gtPrependNewCallArg(gtNewIconEmbClsHndNode(pFieldInfo->structType), args);
}
GenTree* fieldHnd = impTokenToHandle(pResolvedToken);
if (fieldHnd == nullptr)
{ // compDonotInline()
return nullptr;
}
args = gtPrependNewCallArg(fieldHnd, args);
// If it's a static field, we shouldn't have an object node
// If it's an instance field, we have an object node
assert((pFieldInfo->fieldAccessor != CORINFO_FIELD_STATIC_ADDR_HELPER) ^ (objPtr == nullptr));
if (objPtr != nullptr)
{
args = gtPrependNewCallArg(objPtr, args);
}
GenTreeCall* call = gtNewHelperCallNode(pFieldInfo->helper, genActualType(helperType), args);
#if FEATURE_MULTIREG_RET
if (varTypeIsStruct(call))
{
call->InitializeStructReturnType(this, structType, call->GetUnmanagedCallConv());
}
#endif // FEATURE_MULTIREG_RET
GenTree* result = call;
if (pFieldInfo->fieldAccessor == CORINFO_FIELD_INSTANCE_HELPER)
{
if (access & CORINFO_ACCESS_GET)
{
if (pFieldInfo->helper == CORINFO_HELP_GETFIELDSTRUCT)
{
if (!varTypeIsStruct(lclTyp))
{
// get the result as primitive type
result = impGetStructAddr(result, structType, (unsigned)CHECK_SPILL_ALL, true);
result = gtNewOperNode(GT_IND, lclTyp, result);
}
}
else if (varTypeIsIntegral(lclTyp) && genTypeSize(lclTyp) < genTypeSize(TYP_INT))
{
// The helper does not extend the small return types.
result = gtNewCastNode(genActualType(lclTyp), result, false, lclTyp);
}
}
}
else
{
// OK, now do the indirection
if (access & CORINFO_ACCESS_GET)
{
if (varTypeIsStruct(lclTyp))
{
result = gtNewObjNode(structType, result);
}
else
{
result = gtNewOperNode(GT_IND, lclTyp, result);
}
result->gtFlags |= (GTF_EXCEPT | GTF_GLOB_REF);
}
else if (access & CORINFO_ACCESS_SET)
{
if (varTypeIsStruct(lclTyp))
{
result = impAssignStructPtr(result, assg, structType, (unsigned)CHECK_SPILL_ALL);
}
else
{
result = gtNewOperNode(GT_IND, lclTyp, result);
result->gtFlags |= (GTF_EXCEPT | GTF_GLOB_REF | GTF_IND_TGTANYWHERE);
result = gtNewAssignNode(result, assg);
}
}
}
return result;
}
/*****************************************************************************
*
* Return true if the given node (excluding children trees) contains side effects.
* Note that it does not recurse, and children need to be handled separately.
* It may return false even if the node has GTF_SIDE_EFFECT (because of its children).
*
* Similar to OperMayThrow() (but handles GT_CALLs specially), but considers
* assignments too.
*/
bool Compiler::gtNodeHasSideEffects(GenTree* tree, GenTreeFlags flags)
{
if (flags & GTF_ASG)
{
// TODO-Bug: This only checks for GT_ASG/GT_STORE_DYN_BLK but according to OperRequiresAsgFlag
// there are many more opers that are considered to have an assignment side effect: atomic ops
// (GT_CMPXCHG & co.), GT_MEMORYBARRIER (not classified as an atomic op) and HW intrinsic
// memory stores. Atomic ops have special handling in gtExtractSideEffList but the others
// will simply be dropped is they are ever subject to an "extract side effects" operation.
// It is possible that the reason no bugs have yet been observed in this area is that the
// other nodes are likely to always be tree roots.
if (tree->OperIs(GT_ASG, GT_STORE_DYN_BLK))
{
return true;
}
}
// Are there only GTF_CALL side effects remaining? (and no other side effect kinds)
if (flags & GTF_CALL)
{
if (tree->OperGet() == GT_CALL)
{
GenTreeCall* const call = tree->AsCall();
const bool ignoreExceptions = (flags & GTF_EXCEPT) == 0;
const bool ignoreCctors = (flags & GTF_IS_IN_CSE) != 0; // We can CSE helpers that run cctors.
if (!call->HasSideEffects(this, ignoreExceptions, ignoreCctors))
{
// If this call is otherwise side effect free, check its arguments.
for (GenTreeCall::Use& use : call->Args())
{
if (gtTreeHasSideEffects(use.GetNode(), flags))
{
return true;
}
}
// I'm a little worried that args that assign to temps that are late args will look like
// side effects...but better to be conservative for now.
for (GenTreeCall::Use& use : call->LateArgs())
{
if (gtTreeHasSideEffects(use.GetNode(), flags))
{
return true;
}
}
// Otherwise:
return false;
}
// Otherwise the GT_CALL is considered to have side-effects.
return true;
}
}
if (flags & GTF_EXCEPT)
{
if (tree->OperMayThrow(this))
{
return true;
}
}
// Expressions declared as CSE by (e.g.) hoisting code are considered to have relevant side
// effects (if we care about GTF_MAKE_CSE).
if ((flags & GTF_MAKE_CSE) && (tree->gtFlags & GTF_MAKE_CSE))
{
return true;
}
return false;
}
/*****************************************************************************
* Returns true if the expr tree has any side effects.
*/
bool Compiler::gtTreeHasSideEffects(GenTree* tree, GenTreeFlags flags /* = GTF_SIDE_EFFECT*/)
{
// These are the side effect flags that we care about for this tree
GenTreeFlags sideEffectFlags = tree->gtFlags & flags;
// Does this tree have any Side-effect flags set that we care about?
if (sideEffectFlags == 0)
{
// no it doesn't..
return false;
}
if (sideEffectFlags == GTF_CALL)
{
if (tree->OperGet() == GT_CALL)
{
// Generally all trees that contain GT_CALL nodes are considered to have side-effects.
//
if (tree->AsCall()->gtCallType == CT_HELPER)
{
// If this node is a helper call we may not care about the side-effects.
// Note that gtNodeHasSideEffects checks the side effects of the helper itself
// as well as the side effects of its arguments.
return gtNodeHasSideEffects(tree, flags);
}
}
else if (tree->OperGet() == GT_INTRINSIC)
{
if (gtNodeHasSideEffects(tree, flags))
{
return true;
}
if (gtNodeHasSideEffects(tree->AsOp()->gtOp1, flags))
{
return true;
}
if ((tree->AsOp()->gtOp2 != nullptr) && gtNodeHasSideEffects(tree->AsOp()->gtOp2, flags))
{
return true;
}
return false;
}
}
return true;
}
GenTree* Compiler::gtBuildCommaList(GenTree* list, GenTree* expr)
{
// 'list' starts off as null,
// and when it is null we haven't started the list yet.
//
if (list != nullptr)
{
// Create a GT_COMMA that appends 'expr' in front of the remaining set of expressions in (*list)
GenTree* result = gtNewOperNode(GT_COMMA, TYP_VOID, expr, list);
// Set the flags in the comma node
result->gtFlags |= (list->gtFlags & GTF_ALL_EFFECT);
result->gtFlags |= (expr->gtFlags & GTF_ALL_EFFECT);
DBEXEC(fgGlobalMorph, result->gtDebugFlags |= GTF_DEBUG_NODE_MORPHED);
// 'list' and 'expr' should have valuenumbers defined for both or for neither one (unless we are remorphing,
// in which case a prior transform involving either node may have discarded or otherwise invalidated the value
// numbers).
assert((list->gtVNPair.BothDefined() == expr->gtVNPair.BothDefined()) || !fgGlobalMorph);
// Set the ValueNumber 'gtVNPair' for the new GT_COMMA node
//
if (list->gtVNPair.BothDefined() && expr->gtVNPair.BothDefined())
{
// The result of a GT_COMMA node is op2, the normal value number is op2vnp
// But we also need to include the union of side effects from op1 and op2.
// we compute this value into exceptions_vnp.
ValueNumPair op1vnp;
ValueNumPair op1Xvnp = ValueNumStore::VNPForEmptyExcSet();
ValueNumPair op2vnp;
ValueNumPair op2Xvnp = ValueNumStore::VNPForEmptyExcSet();
vnStore->VNPUnpackExc(expr->gtVNPair, &op1vnp, &op1Xvnp);
vnStore->VNPUnpackExc(list->gtVNPair, &op2vnp, &op2Xvnp);
ValueNumPair exceptions_vnp = ValueNumStore::VNPForEmptyExcSet();
exceptions_vnp = vnStore->VNPExcSetUnion(exceptions_vnp, op1Xvnp);
exceptions_vnp = vnStore->VNPExcSetUnion(exceptions_vnp, op2Xvnp);
result->gtVNPair = vnStore->VNPWithExc(op2vnp, exceptions_vnp);
}
return result;
}
else
{
// The 'expr' will start the list of expressions
return expr;
}
}
//------------------------------------------------------------------------
// gtExtractSideEffList: Extracts side effects from the given expression.
//
// Arguments:
// expr - the expression tree to extract side effects from
// pList - pointer to a (possibly null) GT_COMMA list that
// will contain the extracted side effects
// flags - side effect flags to be considered
// ignoreRoot - ignore side effects on the expression root node
//
// Notes:
// Side effects are prepended to the GT_COMMA list such that op1 of
// each comma node holds the side effect tree and op2 points to the
// next comma node. The original side effect execution order is preserved.
//
void Compiler::gtExtractSideEffList(GenTree* expr,
GenTree** pList,
GenTreeFlags flags /* = GTF_SIDE_EFFECT*/,
bool ignoreRoot /* = false */)
{
class SideEffectExtractor final : public GenTreeVisitor<SideEffectExtractor>
{
public:
const GenTreeFlags m_flags;
ArrayStack<GenTree*> m_sideEffects;
enum
{
DoPreOrder = true,
UseExecutionOrder = true
};
SideEffectExtractor(Compiler* compiler, GenTreeFlags flags)
: GenTreeVisitor(compiler), m_flags(flags), m_sideEffects(compiler->getAllocator(CMK_SideEffects))
{
}
fgWalkResult PreOrderVisit(GenTree** use, GenTree* user)
{
GenTree* node = *use;
bool treeHasSideEffects = m_compiler->gtTreeHasSideEffects(node, m_flags);
if (treeHasSideEffects)
{
if (m_compiler->gtNodeHasSideEffects(node, m_flags))
{
PushSideEffects(node);
if (node->OperIsBlk() && !node->OperIsStoreBlk())
{
JITDUMP("Replace an unused OBJ/BLK node [%06d] with a NULLCHECK\n", dspTreeID(node));
m_compiler->gtChangeOperToNullCheck(node, m_compiler->compCurBB);
}
return Compiler::WALK_SKIP_SUBTREES;
}
// TODO-Cleanup: These have GTF_ASG set but for some reason gtNodeHasSideEffects ignores
// them. See the related gtNodeHasSideEffects comment as well.
// Also, these nodes must always be preserved, no matter what side effect flags are passed
// in. But then it should never be the case that gtExtractSideEffList gets called without
// specifying GTF_ASG so there doesn't seem to be any reason to be inconsistent with
// gtNodeHasSideEffects and make this check unconditionally.
if (node->OperIsAtomicOp())
{
PushSideEffects(node);
return Compiler::WALK_SKIP_SUBTREES;
}
if ((m_flags & GTF_EXCEPT) != 0)
{
// Special case - GT_ADDR of GT_IND nodes of TYP_STRUCT have to be kept together.
if (node->OperIs(GT_ADDR) && node->gtGetOp1()->OperIsIndir() &&
(node->gtGetOp1()->TypeGet() == TYP_STRUCT))
{
JITDUMP("Keep the GT_ADDR and GT_IND together:\n");
PushSideEffects(node);
return Compiler::WALK_SKIP_SUBTREES;
}
}
// Generally all GT_CALL nodes are considered to have side-effects.
// So if we get here it must be a helper call that we decided it does
// not have side effects that we needed to keep.
assert(!node->OperIs(GT_CALL) || (node->AsCall()->gtCallType == CT_HELPER));
}
if ((m_flags & GTF_IS_IN_CSE) != 0)
{
// If we're doing CSE then we also need to unmark CSE nodes. This will fail for CSE defs,
// those need to be extracted as if they're side effects.
if (!UnmarkCSE(node))
{
PushSideEffects(node);
return Compiler::WALK_SKIP_SUBTREES;
}
// The existence of CSE defs and uses is not propagated up the tree like side
// effects are. We need to continue visiting the tree as if it has side effects.
treeHasSideEffects = true;
}
return treeHasSideEffects ? Compiler::WALK_CONTINUE : Compiler::WALK_SKIP_SUBTREES;
}
private:
bool UnmarkCSE(GenTree* node)
{
assert(m_compiler->optValnumCSE_phase);
if (m_compiler->optUnmarkCSE(node))
{
// The call to optUnmarkCSE(node) should have cleared any CSE info.
assert(!IS_CSE_INDEX(node->gtCSEnum));
return true;
}
else
{
assert(IS_CSE_DEF(node->gtCSEnum));
#ifdef DEBUG
if (m_compiler->verbose)
{
printf("Preserving the CSE def #%02d at ", GET_CSE_INDEX(node->gtCSEnum));
m_compiler->printTreeID(node);
}
#endif
return false;
}
}
void PushSideEffects(GenTree* node)
{
// The extracted side effect will no longer be an argument, so unmark it.
// This is safe to do because the side effects will be visited in pre-order,
// aborting as soon as any tree is extracted. Thus if an argument for a call
// is being extracted, it is guaranteed that the call itself will not be.
node->gtFlags &= ~GTF_LATE_ARG;
m_sideEffects.Push(node);
}
};
SideEffectExtractor extractor(this, flags);
if (ignoreRoot)
{
for (GenTree* op : expr->Operands())
{
extractor.WalkTree(&op, nullptr);
}
}
else
{
extractor.WalkTree(&expr, nullptr);
}
GenTree* list = *pList;
// The extractor returns side effects in execution order but gtBuildCommaList prepends
// to the comma-based side effect list so we have to build the list in reverse order.
// This is also why the list cannot be built while traversing the tree.
// The number of side effects is usually small (<= 4), less than the ArrayStack's
// built-in size, so memory allocation is avoided.
while (!extractor.m_sideEffects.Empty())
{
list = gtBuildCommaList(list, extractor.m_sideEffects.Pop());
}
*pList = list;
}
/*****************************************************************************
*
* For debugging only - displays a tree node list and makes sure all the
* links are correctly set.
*/
#ifdef DEBUG
void dispNodeList(GenTree* list, bool verbose)
{
GenTree* last = nullptr;
GenTree* next;
if (!list)
{
return;
}
for (;;)
{
next = list->gtNext;
if (verbose)
{
printf("%08X -> %08X -> %08X\n", last, list, next);
}
assert(!last || last->gtNext == list);
assert(next == nullptr || next->gtPrev == list);
if (!next)
{
break;
}
last = list;
list = next;
}
printf(""); // null string means flush
}
#endif
/*****************************************************************************
* Callback to mark the nodes of a qmark-colon subtree that are conditionally
* executed.
*/
/* static */
Compiler::fgWalkResult Compiler::gtMarkColonCond(GenTree** pTree, fgWalkData* data)
{
assert(data->pCallbackData == nullptr);
(*pTree)->gtFlags |= GTF_COLON_COND;
return WALK_CONTINUE;
}
/*****************************************************************************
* Callback to clear the conditionally executed flags of nodes that no longer
will be conditionally executed. Note that when we find another colon we must
stop, as the nodes below this one WILL be conditionally executed. This callback
is called when folding a qmark condition (ie the condition is constant).
*/
/* static */
Compiler::fgWalkResult Compiler::gtClearColonCond(GenTree** pTree, fgWalkData* data)
{
GenTree* tree = *pTree;
assert(data->pCallbackData == nullptr);
if (tree->OperGet() == GT_COLON)
{
// Nodes below this will be conditionally executed.
return WALK_SKIP_SUBTREES;
}
tree->gtFlags &= ~GTF_COLON_COND;
return WALK_CONTINUE;
}
/*****************************************************************************
*
* Callback used by the tree walker to implement fgFindLink()
*/
static Compiler::fgWalkResult gtFindLinkCB(GenTree** pTree, Compiler::fgWalkData* cbData)
{
Compiler::FindLinkData* data = (Compiler::FindLinkData*)cbData->pCallbackData;
if (*pTree == data->nodeToFind)
{
data->result = pTree;
data->parent = cbData->parent;
return Compiler::WALK_ABORT;
}
return Compiler::WALK_CONTINUE;
}
Compiler::FindLinkData Compiler::gtFindLink(Statement* stmt, GenTree* node)
{
FindLinkData data = {node, nullptr, nullptr};
fgWalkResult result = fgWalkTreePre(stmt->GetRootNodePointer(), gtFindLinkCB, &data);
if (result == WALK_ABORT)
{
assert(data.nodeToFind == *data.result);
return data;
}
else
{
return {node, nullptr, nullptr};
}
}
/*****************************************************************************
*
* Callback that checks if a tree node has oper type GT_CATCH_ARG
*/
static Compiler::fgWalkResult gtFindCatchArg(GenTree** pTree, Compiler::fgWalkData* /* data */)
{
return ((*pTree)->OperGet() == GT_CATCH_ARG) ? Compiler::WALK_ABORT : Compiler::WALK_CONTINUE;
}
/*****************************************************************************/
bool Compiler::gtHasCatchArg(GenTree* tree)
{
if (((tree->gtFlags & GTF_ORDER_SIDEEFF) != 0) && (fgWalkTreePre(&tree, gtFindCatchArg) == WALK_ABORT))
{
return true;
}
return false;
}
//------------------------------------------------------------------------
// gtHasCallOnStack:
//
// Arguments:
// parentStack: a context (stack of parent nodes)
//
// Return Value:
// returns true if any of the parent nodes are a GT_CALL
//
// Assumptions:
// We have a stack of parent nodes. This generally requires that
// we are performing a recursive tree walk using struct fgWalkData
//
//------------------------------------------------------------------------
/* static */ bool Compiler::gtHasCallOnStack(GenTreeStack* parentStack)
{
for (int i = 0; i < parentStack->Height(); i++)
{
GenTree* node = parentStack->Top(i);
if (node->OperGet() == GT_CALL)
{
return true;
}
}
return false;
}
//------------------------------------------------------------------------
// gtGetTypeProducerKind: determine if a tree produces a runtime type, and
// if so, how.
//
// Arguments:
// tree - tree to examine
//
// Return Value:
// TypeProducerKind for the tree.
//
// Notes:
// Checks to see if this tree returns a RuntimeType value, and if so,
// how that value is determined.
//
// Currently handles these cases
// 1) The result of Object::GetType
// 2) The result of typeof(...)
// 3) A null reference
// 4) Tree is otherwise known to have type RuntimeType
//
// The null reference case is surprisingly common because operator
// overloading turns the otherwise innocuous
//
// Type t = ....;
// if (t == null)
//
// into a method call.
Compiler::TypeProducerKind Compiler::gtGetTypeProducerKind(GenTree* tree)
{
if (tree->gtOper == GT_CALL)
{
if (tree->AsCall()->gtCallType == CT_HELPER)
{
if (gtIsTypeHandleToRuntimeTypeHelper(tree->AsCall()))
{
return TPK_Handle;
}
}
else if (tree->AsCall()->gtCallMoreFlags & GTF_CALL_M_SPECIAL_INTRINSIC)
{
if (lookupNamedIntrinsic(tree->AsCall()->gtCallMethHnd) == NI_System_Object_GetType)
{
return TPK_GetType;
}
}
}
else if ((tree->gtOper == GT_INTRINSIC) && (tree->AsIntrinsic()->gtIntrinsicName == NI_System_Object_GetType))
{
return TPK_GetType;
}
else if ((tree->gtOper == GT_CNS_INT) && (tree->AsIntCon()->gtIconVal == 0))
{
return TPK_Null;
}
else
{
bool isExact = false;
bool isNonNull = false;
CORINFO_CLASS_HANDLE clsHnd = gtGetClassHandle(tree, &isExact, &isNonNull);
if (clsHnd != NO_CLASS_HANDLE && clsHnd == info.compCompHnd->getBuiltinClass(CLASSID_RUNTIME_TYPE))
{
return TPK_Other;
}
}
return TPK_Unknown;
}
//------------------------------------------------------------------------
// gtIsTypeHandleToRuntimeTypeHelperCall -- see if tree is constructing
// a RuntimeType from a handle
//
// Arguments:
// tree - tree to examine
//
// Return Value:
// True if so
bool Compiler::gtIsTypeHandleToRuntimeTypeHelper(GenTreeCall* call)
{
return call->gtCallMethHnd == eeFindHelper(CORINFO_HELP_TYPEHANDLE_TO_RUNTIMETYPE) ||
call->gtCallMethHnd == eeFindHelper(CORINFO_HELP_TYPEHANDLE_TO_RUNTIMETYPE_MAYBENULL);
}
//------------------------------------------------------------------------
// gtIsTypeHandleToRuntimeTypeHandleHelperCall -- see if tree is constructing
// a RuntimeTypeHandle from a handle
//
// Arguments:
// tree - tree to examine
// pHelper - optional pointer to a variable that receives the type of the helper
//
// Return Value:
// True if so
bool Compiler::gtIsTypeHandleToRuntimeTypeHandleHelper(GenTreeCall* call, CorInfoHelpFunc* pHelper)
{
CorInfoHelpFunc helper = CORINFO_HELP_UNDEF;
if (call->gtCallMethHnd == eeFindHelper(CORINFO_HELP_TYPEHANDLE_TO_RUNTIMETYPEHANDLE))
{
helper = CORINFO_HELP_TYPEHANDLE_TO_RUNTIMETYPEHANDLE;
}
else if (call->gtCallMethHnd == eeFindHelper(CORINFO_HELP_TYPEHANDLE_TO_RUNTIMETYPEHANDLE_MAYBENULL))
{
helper = CORINFO_HELP_TYPEHANDLE_TO_RUNTIMETYPEHANDLE_MAYBENULL;
}
if (pHelper != nullptr)
{
*pHelper = helper;
}
return helper != CORINFO_HELP_UNDEF;
}
bool Compiler::gtIsActiveCSE_Candidate(GenTree* tree)
{
return (optValnumCSE_phase && IS_CSE_INDEX(tree->gtCSEnum));
}
/*****************************************************************************/
struct ComplexityStruct
{
unsigned m_numNodes;
unsigned m_nodeLimit;
ComplexityStruct(unsigned nodeLimit) : m_numNodes(0), m_nodeLimit(nodeLimit)
{
}
};
static Compiler::fgWalkResult ComplexityExceedsWalker(GenTree** pTree, Compiler::fgWalkData* data)
{
ComplexityStruct* pComplexity = (ComplexityStruct*)data->pCallbackData;
if (++pComplexity->m_numNodes > pComplexity->m_nodeLimit)
{
return Compiler::WALK_ABORT;
}
else
{
return Compiler::WALK_CONTINUE;
}
}
bool Compiler::gtComplexityExceeds(GenTree** tree, unsigned limit)
{
ComplexityStruct complexity(limit);
if (fgWalkTreePre(tree, &ComplexityExceedsWalker, &complexity) == WALK_ABORT)
{
return true;
}
else
{
return false;
}
}
bool GenTree::IsPhiNode()
{
return (OperGet() == GT_PHI_ARG) || (OperGet() == GT_PHI) || IsPhiDefn();
}
bool GenTree::IsPhiDefn()
{
bool res = ((OperGet() == GT_ASG) && (AsOp()->gtOp2 != nullptr) && (AsOp()->gtOp2->OperGet() == GT_PHI)) ||
((OperGet() == GT_STORE_LCL_VAR) && (AsOp()->gtOp1 != nullptr) && (AsOp()->gtOp1->OperGet() == GT_PHI));
assert(!res || OperGet() == GT_STORE_LCL_VAR || AsOp()->gtOp1->OperGet() == GT_LCL_VAR);
return res;
}
// IsPartialLclFld: Check for a GT_LCL_FLD whose type is a different size than the lclVar.
//
// Arguments:
// comp - the Compiler object.
//
// Return Value:
// Returns "true" iff 'this' is a GT_LCL_FLD or GT_STORE_LCL_FLD on which the type
// is not the same size as the type of the GT_LCL_VAR
bool GenTree::IsPartialLclFld(Compiler* comp)
{
return ((gtOper == GT_LCL_FLD) &&
(comp->lvaTable[this->AsLclVarCommon()->GetLclNum()].lvExactSize != genTypeSize(gtType)));
}
bool GenTree::DefinesLocal(Compiler* comp, GenTreeLclVarCommon** pLclVarTree, bool* pIsEntire)
{
GenTreeBlk* blkNode = nullptr;
if (OperIs(GT_ASG))
{
if (AsOp()->gtOp1->IsLocal())
{
GenTreeLclVarCommon* lclVarTree = AsOp()->gtOp1->AsLclVarCommon();
*pLclVarTree = lclVarTree;
if (pIsEntire != nullptr)
{
if (lclVarTree->IsPartialLclFld(comp))
{
*pIsEntire = false;
}
else
{
*pIsEntire = true;
}
}
return true;
}
else if (AsOp()->gtOp1->OperGet() == GT_IND)
{
GenTree* indArg = AsOp()->gtOp1->AsOp()->gtOp1;
return indArg->DefinesLocalAddr(comp, genTypeSize(AsOp()->gtOp1->TypeGet()), pLclVarTree, pIsEntire);
}
else if (AsOp()->gtOp1->OperIsBlk())
{
blkNode = AsOp()->gtOp1->AsBlk();
}
}
else if (OperIs(GT_CALL))
{
GenTree* retBufArg = AsCall()->GetLclRetBufArgNode();
if (retBufArg == nullptr)
{
return false;
}
unsigned size = comp->typGetObjLayout(AsCall()->gtRetClsHnd)->GetSize();
return retBufArg->DefinesLocalAddr(comp, size, pLclVarTree, pIsEntire);
}
else if (OperIsBlk())
{
blkNode = this->AsBlk();
}
if (blkNode != nullptr)
{
GenTree* destAddr = blkNode->Addr();
unsigned width = blkNode->Size();
// Do we care about whether this assigns the entire variable?
if (pIsEntire != nullptr && blkNode->OperIs(GT_STORE_DYN_BLK))
{
GenTree* blockWidth = blkNode->AsStoreDynBlk()->gtDynamicSize;
if (blockWidth->IsCnsIntOrI())
{
assert(blockWidth->AsIntConCommon()->FitsInI32());
width = static_cast<unsigned>(blockWidth->AsIntConCommon()->IconValue());
if (width == 0)
{
return false;
}
}
}
return destAddr->DefinesLocalAddr(comp, width, pLclVarTree, pIsEntire);
}
// Otherwise...
return false;
}
// Returns true if this GenTree defines a result which is based on the address of a local.
bool GenTree::DefinesLocalAddr(Compiler* comp, unsigned width, GenTreeLclVarCommon** pLclVarTree, bool* pIsEntire)
{
if (OperGet() == GT_ADDR || OperGet() == GT_LCL_VAR_ADDR)
{
GenTree* addrArg = this;
if (OperGet() == GT_ADDR)
{
addrArg = AsOp()->gtOp1;
}
if (addrArg->IsLocal() || addrArg->OperIsLocalAddr())
{
GenTreeLclVarCommon* addrArgLcl = addrArg->AsLclVarCommon();
*pLclVarTree = addrArgLcl;
if (pIsEntire != nullptr)
{
unsigned lclOffset = addrArgLcl->GetLclOffs();
if (lclOffset != 0)
{
// We aren't updating the bytes at [0..lclOffset-1] so *pIsEntire should be set to false
*pIsEntire = false;
}
else
{
unsigned lclNum = addrArgLcl->GetLclNum();
unsigned varWidth = comp->lvaLclExactSize(lclNum);
if (comp->lvaTable[lclNum].lvNormalizeOnStore())
{
// It's normalize on store, so use the full storage width -- writing to low bytes won't
// necessarily yield a normalized value.
varWidth = genTypeStSz(var_types(comp->lvaTable[lclNum].lvType)) * sizeof(int);
}
*pIsEntire = (varWidth == width);
}
}
return true;
}
else if (addrArg->OperGet() == GT_IND)
{
// A GT_ADDR of a GT_IND can both be optimized away, recurse using the child of the GT_IND
return addrArg->AsOp()->gtOp1->DefinesLocalAddr(comp, width, pLclVarTree, pIsEntire);
}
}
else if (OperGet() == GT_ADD)
{
if (AsOp()->gtOp1->IsCnsIntOrI())
{
// If we just adding a zero then we allow an IsEntire match against width
// otherwise we change width to zero to disallow an IsEntire Match
return AsOp()->gtOp2->DefinesLocalAddr(comp, AsOp()->gtOp1->IsIntegralConst(0) ? width : 0, pLclVarTree,
pIsEntire);
}
else if (AsOp()->gtOp2->IsCnsIntOrI())
{
// If we just adding a zero then we allow an IsEntire match against width
// otherwise we change width to zero to disallow an IsEntire Match
return AsOp()->gtOp1->DefinesLocalAddr(comp, AsOp()->gtOp2->IsIntegralConst(0) ? width : 0, pLclVarTree,
pIsEntire);
}
}
// Post rationalization we could have GT_IND(GT_LEA(..)) trees.
else if (OperGet() == GT_LEA)
{
// This method gets invoked during liveness computation and therefore it is critical
// that we don't miss 'use' of any local. The below logic is making the assumption
// that in case of LEA(base, index, offset) - only base can be a GT_LCL_VAR_ADDR
// and index is not.
CLANG_FORMAT_COMMENT_ANCHOR;
#ifdef DEBUG
GenTree* index = AsOp()->gtOp2;
if (index != nullptr)
{
assert(!index->DefinesLocalAddr(comp, width, pLclVarTree, pIsEntire));
}
#endif // DEBUG
// base
GenTree* base = AsOp()->gtOp1;
if (base != nullptr)
{
// Lea could have an Indir as its base.
if (base->OperGet() == GT_IND)
{
base = base->AsOp()->gtOp1->gtEffectiveVal(/*commas only*/ true);
}
return base->DefinesLocalAddr(comp, width, pLclVarTree, pIsEntire);
}
}
// Otherwise...
return false;
}
//------------------------------------------------------------------------
// IsLocalExpr: Determine if this is a LclVarCommon node and return some
// additional info about it in the two out parameters.
//
// Arguments:
// comp - The Compiler instance
// pLclVarTree - An "out" argument that returns the local tree as a
// LclVarCommon, if it is indeed local.
// pFldSeq - An "out" argument that returns the value numbering field
// sequence for the node, if any.
//
// Return Value:
// Returns true, and sets the out arguments accordingly, if this is
// a LclVarCommon node.
bool GenTree::IsLocalExpr(Compiler* comp, GenTreeLclVarCommon** pLclVarTree, FieldSeqNode** pFldSeq)
{
if (IsLocal()) // Note that this covers "GT_LCL_FLD."
{
*pLclVarTree = AsLclVarCommon();
if (OperGet() == GT_LCL_FLD)
{
// Otherwise, prepend this field to whatever we've already accumulated outside in.
*pFldSeq = comp->GetFieldSeqStore()->Append(AsLclFld()->GetFieldSeq(), *pFldSeq);
}
return true;
}
else
{
return false;
}
}
// If this tree evaluates some sum of a local address and some constants,
// return the node for the local being addressed
GenTreeLclVarCommon* GenTree::IsLocalAddrExpr()
{
if (OperGet() == GT_ADDR)
{
return AsOp()->gtOp1->IsLocal() ? AsOp()->gtOp1->AsLclVarCommon() : nullptr;
}
else if (OperIsLocalAddr())
{
return this->AsLclVarCommon();
}
else if (OperGet() == GT_ADD)
{
if (AsOp()->gtOp1->OperGet() == GT_CNS_INT)
{
return AsOp()->gtOp2->IsLocalAddrExpr();
}
else if (AsOp()->gtOp2->OperGet() == GT_CNS_INT)
{
return AsOp()->gtOp1->IsLocalAddrExpr();
}
}
// Otherwise...
return nullptr;
}
//------------------------------------------------------------------------
// IsLocalAddrExpr: finds if "this" is an address of a local var/fld.
//
// Arguments:
// comp - a compiler instance;
// pLclVarTree - [out] sets to the node indicating the local variable if found;
// pFldSeq - [out] sets to the field sequence representing the field, else null;
// pOffset - [out](optional) sets to the sum offset of the lcl/fld if found,
// note it does not include pLclVarTree->GetLclOffs().
//
// Returns:
// Returns true if "this" represents the address of a local, or a field of a local.
//
// Notes:
// It is mostly used for optimizations but assertion propagation depends on it for correctness.
// So if this function does not recognize a def of a LCL_VAR we can have an incorrect optimization.
//
bool GenTree::IsLocalAddrExpr(Compiler* comp,
GenTreeLclVarCommon** pLclVarTree,
FieldSeqNode** pFldSeq,
ssize_t* pOffset /* = nullptr */)
{
if (OperGet() == GT_ADDR)
{
assert(!comp->compRationalIRForm);
GenTree* addrArg = AsOp()->gtOp1;
if (addrArg->IsLocal()) // Note that this covers "GT_LCL_FLD."
{
FieldSeqNode* zeroOffsetFieldSeq = nullptr;
if (comp->GetZeroOffsetFieldMap()->Lookup(this, &zeroOffsetFieldSeq))
{
*pFldSeq = comp->GetFieldSeqStore()->Append(zeroOffsetFieldSeq, *pFldSeq);
}
*pLclVarTree = addrArg->AsLclVarCommon();
if (addrArg->OperGet() == GT_LCL_FLD)
{
// Otherwise, prepend this field to whatever we've already accumulated outside in.
*pFldSeq = comp->GetFieldSeqStore()->Append(addrArg->AsLclFld()->GetFieldSeq(), *pFldSeq);
}
return true;
}
else
{
return false;
}
}
else if (OperIsLocalAddr())
{
*pLclVarTree = this->AsLclVarCommon();
if (this->OperGet() == GT_LCL_FLD_ADDR)
{
*pFldSeq = comp->GetFieldSeqStore()->Append(this->AsLclFld()->GetFieldSeq(), *pFldSeq);
}
return true;
}
else if (OperGet() == GT_ADD)
{
if (AsOp()->gtOp1->OperGet() == GT_CNS_INT)
{
GenTreeIntCon* cnst = AsOp()->gtOp1->AsIntCon();
if (cnst->gtFieldSeq == nullptr)
{
return false;
}
// Otherwise, prepend this field to whatever we've already accumulated outside in.
*pFldSeq = comp->GetFieldSeqStore()->Append(cnst->gtFieldSeq, *pFldSeq);
if (pOffset != nullptr)
{
*pOffset += cnst->IconValue();
}
return AsOp()->gtOp2->IsLocalAddrExpr(comp, pLclVarTree, pFldSeq, pOffset);
}
else if (AsOp()->gtOp2->OperGet() == GT_CNS_INT)
{
GenTreeIntCon* cnst = AsOp()->gtOp2->AsIntCon();
if (cnst->gtFieldSeq == nullptr)
{
return false;
}
// Otherwise, prepend this field to whatever we've already accumulated outside in.
*pFldSeq = comp->GetFieldSeqStore()->Append(cnst->gtFieldSeq, *pFldSeq);
if (pOffset != nullptr)
{
*pOffset += cnst->IconValue();
}
return AsOp()->gtOp1->IsLocalAddrExpr(comp, pLclVarTree, pFldSeq, pOffset);
}
}
// Otherwise...
return false;
}
//------------------------------------------------------------------------
// IsImplicitByrefParameterValue: determine if this tree is the entire
// value of a local implicit byref parameter
//
// Arguments:
// compiler -- compiler instance
//
// Return Value:
// GenTreeLclVar node for the local, or nullptr.
//
GenTreeLclVar* GenTree::IsImplicitByrefParameterValue(Compiler* compiler)
{
#if defined(TARGET_AMD64) || defined(TARGET_ARM64)
GenTreeLclVar* lcl = nullptr;
if (OperIs(GT_LCL_VAR))
{
lcl = AsLclVar();
}
else if (OperIs(GT_OBJ))
{
GenTree* addr = AsIndir()->Addr();
if (addr->OperIs(GT_LCL_VAR))
{
lcl = addr->AsLclVar();
}
else if (addr->OperIs(GT_ADDR))
{
GenTree* base = addr->AsOp()->gtOp1;
if (base->OperIs(GT_LCL_VAR))
{
lcl = base->AsLclVar();
}
}
}
if ((lcl != nullptr) && compiler->lvaIsImplicitByRefLocal(lcl->GetLclNum()))
{
return lcl;
}
#endif // defined(TARGET_AMD64) || defined(TARGET_ARM64)
return nullptr;
}
//------------------------------------------------------------------------
// IsLclVarUpdateTree: Determine whether this is an assignment tree of the
// form Vn = Vn 'oper' 'otherTree' where Vn is a lclVar
//
// Arguments:
// pOtherTree - An "out" argument in which 'otherTree' will be returned.
// pOper - An "out" argument in which 'oper' will be returned.
//
// Return Value:
// If the tree is of the above form, the lclNum of the variable being
// updated is returned, and 'pOtherTree' and 'pOper' are set.
// Otherwise, returns BAD_VAR_NUM.
//
// Notes:
// 'otherTree' can have any shape.
// We avoid worrying about whether the op is commutative by only considering the
// first operand of the rhs. It is expected that most trees of this form will
// already have the lclVar on the lhs.
// TODO-CQ: Evaluate whether there are missed opportunities due to this, or
// whether gtSetEvalOrder will already have put the lclVar on the lhs in
// the cases of interest.
unsigned GenTree::IsLclVarUpdateTree(GenTree** pOtherTree, genTreeOps* pOper)
{
unsigned lclNum = BAD_VAR_NUM;
if (OperIs(GT_ASG))
{
GenTree* lhs = AsOp()->gtOp1;
GenTree* rhs = AsOp()->gtOp2;
if ((lhs->OperGet() == GT_LCL_VAR) && rhs->OperIsBinary())
{
unsigned lhsLclNum = lhs->AsLclVarCommon()->GetLclNum();
GenTree* rhsOp1 = rhs->AsOp()->gtOp1;
GenTree* rhsOp2 = rhs->AsOp()->gtOp2;
// Some operators, such as HWINTRINSIC, are currently declared as binary but
// may not have two operands. We must check that both operands actually exist.
if ((rhsOp1 != nullptr) && (rhsOp2 != nullptr) && (rhsOp1->OperGet() == GT_LCL_VAR) &&
(rhsOp1->AsLclVarCommon()->GetLclNum() == lhsLclNum))
{
lclNum = lhsLclNum;
*pOtherTree = rhsOp2;
*pOper = rhs->OperGet();
}
}
}
return lclNum;
}
#ifdef DEBUG
//------------------------------------------------------------------------
// canBeContained: check whether this tree node may be a subcomponent of its parent for purposes
// of code generation.
//
// Return Value:
// True if it is possible to contain this node and false otherwise.
//
bool GenTree::canBeContained() const
{
assert(OperIsLIR());
if (IsMultiRegLclVar())
{
return false;
}
if (gtHasReg(nullptr))
{
return false;
}
// It is not possible for nodes that do not produce values or that are not containable values to be contained.
if (!IsValue() || ((DebugOperKind() & DBK_NOCONTAIN) != 0) || (OperIsHWIntrinsic() && !isContainableHWIntrinsic()))
{
return false;
}
return true;
}
#endif // DEBUG
//------------------------------------------------------------------------
// isContained: check whether this tree node is a subcomponent of its parent for codegen purposes
//
// Return Value:
// Returns true if there is no code generated explicitly for this node.
// Essentially, it will be rolled into the code generation for the parent.
//
// Assumptions:
// This method relies upon the value of the GTF_CONTAINED flag.
// Therefore this method is only valid after Lowering.
// Also note that register allocation or other subsequent phases may cause
// nodes to become contained (or not) and therefore this property may change.
//
bool GenTree::isContained() const
{
assert(OperIsLIR());
const bool isMarkedContained = ((gtFlags & GTF_CONTAINED) != 0);
#ifdef DEBUG
if (!canBeContained())
{
assert(!isMarkedContained);
}
// these actually produce a register (the flags reg, we just don't model it)
// and are a separate instruction from the branch that consumes the result.
// They can only produce a result if the child is a SIMD equality comparison.
else if (OperIsCompare())
{
assert(isMarkedContained == false);
}
// if it's contained it can't be unused.
if (isMarkedContained)
{
assert(!IsUnusedValue());
}
#endif // DEBUG
return isMarkedContained;
}
// return true if node is contained and an indir
bool GenTree::isContainedIndir() const
{
return OperIsIndir() && isContained();
}
bool GenTree::isIndirAddrMode()
{
return OperIsIndir() && AsIndir()->Addr()->OperIsAddrMode() && AsIndir()->Addr()->isContained();
}
bool GenTree::isIndir() const
{
return OperGet() == GT_IND || OperGet() == GT_STOREIND;
}
bool GenTreeIndir::HasBase()
{
return Base() != nullptr;
}
bool GenTreeIndir::HasIndex()
{
return Index() != nullptr;
}
GenTree* GenTreeIndir::Base()
{
GenTree* addr = Addr();
if (isIndirAddrMode())
{
GenTree* result = addr->AsAddrMode()->Base();
if (result != nullptr)
{
result = result->gtEffectiveVal();
}
return result;
}
else
{
return addr; // TODO: why do we return 'addr' here, but we return 'nullptr' in the equivalent Index() case?
}
}
GenTree* GenTreeIndir::Index()
{
if (isIndirAddrMode())
{
GenTree* result = Addr()->AsAddrMode()->Index();
if (result != nullptr)
{
result = result->gtEffectiveVal();
}
return result;
}
else
{
return nullptr;
}
}
unsigned GenTreeIndir::Scale()
{
if (HasIndex())
{
return Addr()->AsAddrMode()->gtScale;
}
else
{
return 1;
}
}
ssize_t GenTreeIndir::Offset()
{
if (isIndirAddrMode())
{
return Addr()->AsAddrMode()->Offset();
}
else if (Addr()->gtOper == GT_CLS_VAR_ADDR)
{
return static_cast<ssize_t>(reinterpret_cast<intptr_t>(Addr()->AsClsVar()->gtClsVarHnd));
}
else if (Addr()->IsCnsIntOrI() && Addr()->isContained())
{
return Addr()->AsIntConCommon()->IconValue();
}
else
{
return 0;
}
}
//------------------------------------------------------------------------
// GenTreeIntConCommon::ImmedValNeedsReloc: does this immediate value needs recording a relocation with the VM?
//
// Arguments:
// comp - Compiler instance
//
// Return Value:
// True if this immediate value requires us to record a relocation for it; false otherwise.
bool GenTreeIntConCommon::ImmedValNeedsReloc(Compiler* comp)
{
return comp->opts.compReloc && (gtOper == GT_CNS_INT) && IsIconHandle();
}
//------------------------------------------------------------------------
// ImmedValCanBeFolded: can this immediate value be folded for op?
//
// Arguments:
// comp - Compiler instance
// op - Tree operator
//
// Return Value:
// True if this immediate value can be folded for op; false otherwise.
bool GenTreeIntConCommon::ImmedValCanBeFolded(Compiler* comp, genTreeOps op)
{
// In general, immediate values that need relocations can't be folded.
// There are cases where we do want to allow folding of handle comparisons
// (e.g., typeof(T) == typeof(int)).
return !ImmedValNeedsReloc(comp) || (op == GT_EQ) || (op == GT_NE);
}
#ifdef TARGET_AMD64
// Returns true if this absolute address fits within the base of an addr mode.
// On Amd64 this effectively means, whether an absolute indirect address can
// be encoded as 32-bit offset relative to IP or zero.
bool GenTreeIntConCommon::FitsInAddrBase(Compiler* comp)
{
#ifdef DEBUG
// Early out if PC-rel encoding of absolute addr is disabled.
if (!comp->opts.compEnablePCRelAddr)
{
return false;
}
#endif
if (comp->opts.compReloc)
{
// During Ngen JIT is always asked to generate relocatable code.
// Hence JIT will try to encode only icon handles as pc-relative offsets.
return IsIconHandle() && (IMAGE_REL_BASED_REL32 == comp->eeGetRelocTypeHint((void*)IconValue()));
}
else
{
// During Jitting, we are allowed to generate non-relocatable code.
// On Amd64 we can encode an absolute indirect addr as an offset relative to zero or RIP.
// An absolute indir addr that can fit within 32-bits can ben encoded as an offset relative
// to zero. All other absolute indir addr could be attempted to be encoded as RIP relative
// based on reloc hint provided by VM. RIP relative encoding is preferred over relative
// to zero, because the former is one byte smaller than the latter. For this reason
// we check for reloc hint first and then whether addr fits in 32-bits next.
//
// VM starts off with an initial state to allow both data and code address to be encoded as
// pc-relative offsets. Hence JIT will attempt to encode all absolute addresses as pc-relative
// offsets. It is possible while jitting a method, an address could not be encoded as a
// pc-relative offset. In that case VM will note the overflow and will trigger re-jitting
// of the method with reloc hints turned off for all future methods. Second time around
// jitting will succeed since JIT will not attempt to encode data addresses as pc-relative
// offsets. Note that JIT will always attempt to relocate code addresses (.e.g call addr).
// After an overflow, VM will assume any relocation recorded is for a code address and will
// emit jump thunk if it cannot be encoded as pc-relative offset.
return (IMAGE_REL_BASED_REL32 == comp->eeGetRelocTypeHint((void*)IconValue())) || FitsInI32();
}
}
// Returns true if this icon value is encoded as addr needs recording a relocation with VM
bool GenTreeIntConCommon::AddrNeedsReloc(Compiler* comp)
{
if (comp->opts.compReloc)
{
// During Ngen JIT is always asked to generate relocatable code.
// Hence JIT will try to encode only icon handles as pc-relative offsets.
return IsIconHandle() && (IMAGE_REL_BASED_REL32 == comp->eeGetRelocTypeHint((void*)IconValue()));
}
else
{
return IMAGE_REL_BASED_REL32 == comp->eeGetRelocTypeHint((void*)IconValue());
}
}
#elif defined(TARGET_X86)
// Returns true if this absolute address fits within the base of an addr mode.
// On x86 all addresses are 4-bytes and can be directly encoded in an addr mode.
bool GenTreeIntConCommon::FitsInAddrBase(Compiler* comp)
{
#ifdef DEBUG
// Early out if PC-rel encoding of absolute addr is disabled.
if (!comp->opts.compEnablePCRelAddr)
{
return false;
}
#endif
return IsCnsIntOrI();
}
// Returns true if this icon value is encoded as addr needs recording a relocation with VM
bool GenTreeIntConCommon::AddrNeedsReloc(Compiler* comp)
{
// If generating relocatable code, icons should be reported for recording relocatons.
return comp->opts.compReloc && IsIconHandle();
}
#endif // TARGET_X86
//------------------------------------------------------------------------
// IsFieldAddr: Is "this" a static or class field address?
//
// Recognizes the following patterns:
// this: ADD(baseAddr, CONST [FldSeq])
// this: ADD(CONST [FldSeq], baseAddr)
// this: CONST [FldSeq]
// this: Zero [FldSeq]
//
// Arguments:
// comp - the Compiler object
// pBaseAddr - [out] parameter for "the base address"
// pFldSeq - [out] parameter for the field sequence
//
// Return Value:
// If "this" matches patterns denoted above, and the FldSeq found is "full",
// i. e. starts with a class field or a static field, and includes all the
// struct fields that this tree represents the address of, this method will
// return "true" and set either "pBaseAddr" to some value, which must be used
// by the caller as the key into the "first field map" to obtain the actual
// value for the field. For instance fields, "base address" will be the object
// reference, for statics - the address to which the field offset with the
// field sequence is added, see "impImportStaticFieldAccess" and "fgMorphField".
//
bool GenTree::IsFieldAddr(Compiler* comp, GenTree** pBaseAddr, FieldSeqNode** pFldSeq)
{
assert(TypeIs(TYP_I_IMPL, TYP_BYREF, TYP_REF));
*pBaseAddr = nullptr;
*pFldSeq = FieldSeqStore::NotAField();
GenTree* baseAddr = nullptr;
FieldSeqNode* fldSeq = FieldSeqStore::NotAField();
if (OperIs(GT_ADD))
{
// If one operand has a field sequence, the other operand must not have one
// as the order of fields in that case would not be well-defined.
if (AsOp()->gtOp1->IsCnsIntOrI() && AsOp()->gtOp1->IsIconHandle())
{
assert(!AsOp()->gtOp2->IsCnsIntOrI() || !AsOp()->gtOp2->IsIconHandle());
fldSeq = AsOp()->gtOp1->AsIntCon()->gtFieldSeq;
baseAddr = AsOp()->gtOp2;
}
else if (AsOp()->gtOp2->IsCnsIntOrI())
{
assert(!AsOp()->gtOp1->IsCnsIntOrI() || !AsOp()->gtOp1->IsIconHandle());
fldSeq = AsOp()->gtOp2->AsIntCon()->gtFieldSeq;
baseAddr = AsOp()->gtOp1;
}
else
{
return false;
}
assert(!baseAddr->TypeIs(TYP_REF) || !comp->GetZeroOffsetFieldMap()->Lookup(baseAddr));
}
else if (IsCnsIntOrI() && IsIconHandle(GTF_ICON_STATIC_HDL))
{
assert(!comp->GetZeroOffsetFieldMap()->Lookup(this) && (AsIntCon()->gtFieldSeq != nullptr));
fldSeq = AsIntCon()->gtFieldSeq;
baseAddr = this;
}
else if (comp->GetZeroOffsetFieldMap()->Lookup(this, &fldSeq))
{
baseAddr = this;
}
else
{
return false;
}
assert((fldSeq != nullptr) && (baseAddr != nullptr));
if ((fldSeq == FieldSeqStore::NotAField()) || fldSeq->IsPseudoField())
{
return false;
}
// The above screens out obviously invalid cases, but we have more checks to perform. The
// sequence returned from this method *must* start with either a class (NOT struct) field
// or a static field. To avoid the expense of calling "getFieldClass" here, we will instead
// rely on the invariant that TYP_REF base addresses can never appear for struct fields - we
// will effectively treat such cases ("possible" in unsafe code) as undefined behavior.
if (fldSeq->IsStaticField())
{
// For shared statics, we must encode the logical instantiation argument.
if (fldSeq->IsSharedStaticField())
{
*pBaseAddr = baseAddr;
}
*pFldSeq = fldSeq;
return true;
}
if (baseAddr->TypeIs(TYP_REF))
{
assert(!comp->eeIsValueClass(comp->info.compCompHnd->getFieldClass(fldSeq->GetFieldHandle())));
*pBaseAddr = baseAddr;
*pFldSeq = fldSeq;
return true;
}
// This case is reached, for example, if we have a chain of struct fields that are based on
// some pointer. We do not model such cases because we do not model maps for ByrefExposed
// memory, as it does not have the non-aliasing property of GcHeap and reference types.
return false;
}
bool Compiler::gtIsStaticFieldPtrToBoxedStruct(var_types fieldNodeType, CORINFO_FIELD_HANDLE fldHnd)
{
if (fieldNodeType != TYP_REF)
{
return false;
}
noway_assert(fldHnd != nullptr);
CorInfoType cit = info.compCompHnd->getFieldType(fldHnd);
var_types fieldTyp = JITtype2varType(cit);
return fieldTyp != TYP_REF;
}
#ifdef FEATURE_SIMD
//------------------------------------------------------------------------
// gtGetSIMDZero: Get a zero value of the appropriate SIMD type.
//
// Arguments:
// var_types - The simdType
// simdBaseJitType - The SIMD base JIT type we need
// simdHandle - The handle for the SIMD type
//
// Return Value:
// A node generating the appropriate Zero, if we are able to discern it,
// otherwise null (note that this shouldn't happen, but callers should
// be tolerant of this case).
GenTree* Compiler::gtGetSIMDZero(var_types simdType, CorInfoType simdBaseJitType, CORINFO_CLASS_HANDLE simdHandle)
{
bool found = false;
bool isHWSIMD = true;
noway_assert(m_simdHandleCache != nullptr);
// First, determine whether this is Vector<T>.
if (simdType == getSIMDVectorType())
{
switch (simdBaseJitType)
{
case CORINFO_TYPE_FLOAT:
found = (simdHandle == m_simdHandleCache->SIMDFloatHandle);
break;
case CORINFO_TYPE_DOUBLE:
found = (simdHandle == m_simdHandleCache->SIMDDoubleHandle);
break;
case CORINFO_TYPE_INT:
found = (simdHandle == m_simdHandleCache->SIMDIntHandle);
break;
case CORINFO_TYPE_USHORT:
found = (simdHandle == m_simdHandleCache->SIMDUShortHandle);
break;
case CORINFO_TYPE_UBYTE:
found = (simdHandle == m_simdHandleCache->SIMDUByteHandle);
break;
case CORINFO_TYPE_SHORT:
found = (simdHandle == m_simdHandleCache->SIMDShortHandle);
break;
case CORINFO_TYPE_BYTE:
found = (simdHandle == m_simdHandleCache->SIMDByteHandle);
break;
case CORINFO_TYPE_LONG:
found = (simdHandle == m_simdHandleCache->SIMDLongHandle);
break;
case CORINFO_TYPE_UINT:
found = (simdHandle == m_simdHandleCache->SIMDUIntHandle);
break;
case CORINFO_TYPE_ULONG:
found = (simdHandle == m_simdHandleCache->SIMDULongHandle);
break;
case CORINFO_TYPE_NATIVEINT:
found = (simdHandle == m_simdHandleCache->SIMDNIntHandle);
break;
case CORINFO_TYPE_NATIVEUINT:
found = (simdHandle == m_simdHandleCache->SIMDNUIntHandle);
break;
default:
break;
}
if (found)
{
isHWSIMD = false;
}
}
if (!found)
{
// We must still have isHWSIMD set to true, and the only non-HW types left are the fixed types.
switch (simdType)
{
case TYP_SIMD8:
switch (simdBaseJitType)
{
case CORINFO_TYPE_FLOAT:
if (simdHandle == m_simdHandleCache->SIMDVector2Handle)
{
isHWSIMD = false;
}
#if defined(TARGET_ARM64) && defined(FEATURE_HW_INTRINSICS)
else
{
assert(simdHandle == m_simdHandleCache->Vector64FloatHandle);
}
break;
case CORINFO_TYPE_INT:
assert(simdHandle == m_simdHandleCache->Vector64IntHandle);
break;
case CORINFO_TYPE_USHORT:
assert(simdHandle == m_simdHandleCache->Vector64UShortHandle);
break;
case CORINFO_TYPE_UBYTE:
assert(simdHandle == m_simdHandleCache->Vector64UByteHandle);
break;
case CORINFO_TYPE_SHORT:
assert(simdHandle == m_simdHandleCache->Vector64ShortHandle);
break;
case CORINFO_TYPE_BYTE:
assert(simdHandle == m_simdHandleCache->Vector64ByteHandle);
break;
case CORINFO_TYPE_UINT:
assert(simdHandle == m_simdHandleCache->Vector64UIntHandle);
#endif // defined(TARGET_ARM64) && defined(FEATURE_HW_INTRINSICS)
break;
default:
break;
}
break;
case TYP_SIMD12:
assert((simdBaseJitType == CORINFO_TYPE_FLOAT) && (simdHandle == m_simdHandleCache->SIMDVector3Handle));
isHWSIMD = false;
break;
case TYP_SIMD16:
switch (simdBaseJitType)
{
case CORINFO_TYPE_FLOAT:
if (simdHandle == m_simdHandleCache->SIMDVector4Handle)
{
isHWSIMD = false;
}
#if defined(FEATURE_HW_INTRINSICS)
else
{
assert(simdHandle == m_simdHandleCache->Vector128FloatHandle);
}
break;
case CORINFO_TYPE_DOUBLE:
assert(simdHandle == m_simdHandleCache->Vector128DoubleHandle);
break;
case CORINFO_TYPE_INT:
assert(simdHandle == m_simdHandleCache->Vector128IntHandle);
break;
case CORINFO_TYPE_USHORT:
assert(simdHandle == m_simdHandleCache->Vector128UShortHandle);
break;
case CORINFO_TYPE_UBYTE:
assert(simdHandle == m_simdHandleCache->Vector128UByteHandle);
break;
case CORINFO_TYPE_SHORT:
assert(simdHandle == m_simdHandleCache->Vector128ShortHandle);
break;
case CORINFO_TYPE_BYTE:
assert(simdHandle == m_simdHandleCache->Vector128ByteHandle);
break;
case CORINFO_TYPE_LONG:
assert(simdHandle == m_simdHandleCache->Vector128LongHandle);
break;
case CORINFO_TYPE_UINT:
assert(simdHandle == m_simdHandleCache->Vector128UIntHandle);
break;
case CORINFO_TYPE_ULONG:
assert(simdHandle == m_simdHandleCache->Vector128ULongHandle);
break;
case CORINFO_TYPE_NATIVEINT:
assert(simdHandle == m_simdHandleCache->Vector128NIntHandle);
break;
case CORINFO_TYPE_NATIVEUINT:
assert(simdHandle == m_simdHandleCache->Vector128NUIntHandle);
break;
#endif // defined(FEATURE_HW_INTRINSICS)
default:
break;
}
break;
#if defined(TARGET_XARCH) && defined(FEATURE_HW_INTRINSICS)
case TYP_SIMD32:
switch (simdBaseJitType)
{
case CORINFO_TYPE_FLOAT:
assert(simdHandle == m_simdHandleCache->Vector256FloatHandle);
break;
case CORINFO_TYPE_DOUBLE:
assert(simdHandle == m_simdHandleCache->Vector256DoubleHandle);
break;
case CORINFO_TYPE_INT:
assert(simdHandle == m_simdHandleCache->Vector256IntHandle);
break;
case CORINFO_TYPE_USHORT:
assert(simdHandle == m_simdHandleCache->Vector256UShortHandle);
break;
case CORINFO_TYPE_UBYTE:
assert(simdHandle == m_simdHandleCache->Vector256UByteHandle);
break;
case CORINFO_TYPE_SHORT:
assert(simdHandle == m_simdHandleCache->Vector256ShortHandle);
break;
case CORINFO_TYPE_BYTE:
assert(simdHandle == m_simdHandleCache->Vector256ByteHandle);
break;
case CORINFO_TYPE_LONG:
assert(simdHandle == m_simdHandleCache->Vector256LongHandle);
break;
case CORINFO_TYPE_UINT:
assert(simdHandle == m_simdHandleCache->Vector256UIntHandle);
break;
case CORINFO_TYPE_ULONG:
assert(simdHandle == m_simdHandleCache->Vector256ULongHandle);
break;
case CORINFO_TYPE_NATIVEINT:
assert(simdHandle == m_simdHandleCache->Vector256NIntHandle);
break;
case CORINFO_TYPE_NATIVEUINT:
assert(simdHandle == m_simdHandleCache->Vector256NUIntHandle);
break;
default:
break;
}
break;
#endif // TARGET_XARCH && FEATURE_HW_INTRINSICS
default:
break;
}
}
unsigned size = genTypeSize(simdType);
if (isHWSIMD)
{
#if defined(FEATURE_HW_INTRINSICS)
return gtNewSimdZeroNode(simdType, simdBaseJitType, size, /* isSimdAsHWIntrinsic */ false);
#else
JITDUMP("Coudn't find the matching SIMD type for %s<%s> in gtGetSIMDZero\n", varTypeName(simdType),
varTypeName(JitType2PreciseVarType(simdBaseJitType)));
return nullptr;
#endif // FEATURE_HW_INTRINSICS
}
else
{
return gtNewSIMDVectorZero(simdType, simdBaseJitType, size);
}
}
#endif // FEATURE_SIMD
CORINFO_CLASS_HANDLE Compiler::gtGetStructHandleIfPresent(GenTree* tree)
{
CORINFO_CLASS_HANDLE structHnd = NO_CLASS_HANDLE;
tree = tree->gtEffectiveVal();
if (varTypeIsStruct(tree->gtType))
{
switch (tree->gtOper)
{
default:
break;
case GT_MKREFANY:
structHnd = impGetRefAnyClass();
break;
case GT_OBJ:
structHnd = tree->AsObj()->GetLayout()->GetClassHandle();
break;
case GT_BLK:
structHnd = tree->AsBlk()->GetLayout()->GetClassHandle();
break;
case GT_CALL:
structHnd = tree->AsCall()->gtRetClsHnd;
break;
case GT_RET_EXPR:
structHnd = tree->AsRetExpr()->gtRetClsHnd;
break;
case GT_ARGPLACE:
structHnd = tree->AsArgPlace()->gtArgPlaceClsHnd;
break;
case GT_INDEX:
structHnd = tree->AsIndex()->gtStructElemClass;
break;
case GT_FIELD:
info.compCompHnd->getFieldType(tree->AsField()->gtFldHnd, &structHnd);
break;
case GT_ASG:
structHnd = gtGetStructHandleIfPresent(tree->gtGetOp1());
break;
case GT_LCL_FLD:
#ifdef FEATURE_SIMD
if (varTypeIsSIMD(tree))
{
structHnd = gtGetStructHandleForSIMD(tree->gtType, CORINFO_TYPE_FLOAT);
#ifdef FEATURE_HW_INTRINSICS
if (structHnd == NO_CLASS_HANDLE)
{
structHnd = gtGetStructHandleForHWSIMD(tree->gtType, CORINFO_TYPE_FLOAT);
}
#endif
}
#endif
break;
case GT_LCL_VAR:
{
unsigned lclNum = tree->AsLclVarCommon()->GetLclNum();
structHnd = lvaGetStruct(lclNum);
break;
}
case GT_RETURN:
structHnd = gtGetStructHandleIfPresent(tree->AsOp()->gtOp1);
break;
case GT_IND:
#ifdef FEATURE_SIMD
if (varTypeIsSIMD(tree))
{
structHnd = gtGetStructHandleForSIMD(tree->gtType, CORINFO_TYPE_FLOAT);
#ifdef FEATURE_HW_INTRINSICS
if (structHnd == NO_CLASS_HANDLE)
{
structHnd = gtGetStructHandleForHWSIMD(tree->gtType, CORINFO_TYPE_FLOAT);
}
#endif
}
else
#endif
{
// Attempt to find a handle for this expression.
// We can do this for an array element indirection, or for a field indirection.
ArrayInfo arrInfo;
if (TryGetArrayInfo(tree->AsIndir(), &arrInfo))
{
structHnd = arrInfo.m_elemStructType;
}
else
{
GenTree* addr = tree->AsIndir()->Addr();
FieldSeqNode* fieldSeq = nullptr;
if ((addr->OperGet() == GT_ADD) && addr->gtGetOp2()->OperIs(GT_CNS_INT))
{
fieldSeq = addr->gtGetOp2()->AsIntCon()->gtFieldSeq;
}
else
{
GetZeroOffsetFieldMap()->Lookup(addr, &fieldSeq);
}
if (fieldSeq != nullptr)
{
fieldSeq = fieldSeq->GetTail();
if (fieldSeq != FieldSeqStore::NotAField() && !fieldSeq->IsPseudoField())
{
// Note we may have a primitive here (and correctly fail to obtain the handle)
eeGetFieldType(fieldSeq->GetFieldHandle(), &structHnd);
}
}
}
}
break;
#ifdef FEATURE_SIMD
case GT_SIMD:
structHnd = gtGetStructHandleForSIMD(tree->gtType, tree->AsSIMD()->GetSimdBaseJitType());
break;
#endif // FEATURE_SIMD
#ifdef FEATURE_HW_INTRINSICS
case GT_HWINTRINSIC:
if ((tree->gtFlags & GTF_SIMDASHW_OP) != 0)
{
structHnd = gtGetStructHandleForSIMD(tree->gtType, tree->AsHWIntrinsic()->GetSimdBaseJitType());
}
else
{
structHnd = gtGetStructHandleForHWSIMD(tree->gtType, tree->AsHWIntrinsic()->GetSimdBaseJitType());
}
break;
#endif
break;
}
// TODO-1stClassStructs: add a check that `structHnd != NO_CLASS_HANDLE`,
// nowadays it won't work because the right part of an ASG could have struct type without a handle
// (check `fgMorphBlockOperand(isBlkReqd`) and a few other cases.
}
return structHnd;
}
CORINFO_CLASS_HANDLE Compiler::gtGetStructHandle(GenTree* tree)
{
CORINFO_CLASS_HANDLE structHnd = gtGetStructHandleIfPresent(tree);
assert(structHnd != NO_CLASS_HANDLE);
return structHnd;
}
//------------------------------------------------------------------------
// gtGetClassHandle: find class handle for a ref type
//
// Arguments:
// tree -- tree to find handle for
// pIsExact [out] -- whether handle is exact type
// pIsNonNull [out] -- whether tree value is known not to be null
//
// Return Value:
// nullptr if class handle is unknown,
// otherwise the class handle.
// *pIsExact set true if tree type is known to be exactly the handle type,
// otherwise actual type may be a subtype.
// *pIsNonNull set true if tree value is known not to be null,
// otherwise a null value is possible.
CORINFO_CLASS_HANDLE Compiler::gtGetClassHandle(GenTree* tree, bool* pIsExact, bool* pIsNonNull)
{
// Set default values for our out params.
*pIsNonNull = false;
*pIsExact = false;
CORINFO_CLASS_HANDLE objClass = nullptr;
// Bail out if we're just importing and not generating code, since
// the jit uses TYP_REF for CORINFO_TYPE_VAR locals and args, but
// these may not be ref types.
if (compIsForImportOnly())
{
return objClass;
}
// Bail out if the tree is not a ref type.
var_types treeType = tree->TypeGet();
if (treeType != TYP_REF)
{
return objClass;
}
// Tunnel through commas.
GenTree* obj = tree->gtEffectiveVal(false);
const genTreeOps objOp = obj->OperGet();
switch (objOp)
{
case GT_COMMA:
{
// gtEffectiveVal above means we shouldn't see commas here.
assert(!"unexpected GT_COMMA");
break;
}
case GT_LCL_VAR:
{
// For locals, pick up type info from the local table.
const unsigned objLcl = obj->AsLclVar()->GetLclNum();
objClass = lvaTable[objLcl].lvClassHnd;
*pIsExact = lvaTable[objLcl].lvClassIsExact;
break;
}
case GT_FIELD:
{
// For fields, get the type from the field handle.
CORINFO_FIELD_HANDLE fieldHnd = obj->AsField()->gtFldHnd;
if (fieldHnd != nullptr)
{
objClass = gtGetFieldClassHandle(fieldHnd, pIsExact, pIsNonNull);
}
break;
}
case GT_RET_EXPR:
{
// If we see a RET_EXPR, recurse through to examine the
// return value expression.
GenTree* retExpr = tree->AsRetExpr()->gtInlineCandidate;
objClass = gtGetClassHandle(retExpr, pIsExact, pIsNonNull);
break;
}
case GT_CALL:
{
GenTreeCall* call = tree->AsCall();
if (call->gtCallMoreFlags & GTF_CALL_M_SPECIAL_INTRINSIC)
{
NamedIntrinsic ni = lookupNamedIntrinsic(call->gtCallMethHnd);
if ((ni == NI_System_Array_Clone) || (ni == NI_System_Object_MemberwiseClone))
{
objClass = gtGetClassHandle(call->gtCallThisArg->GetNode(), pIsExact, pIsNonNull);
break;
}
CORINFO_CLASS_HANDLE specialObjClass = impGetSpecialIntrinsicExactReturnType(call->gtCallMethHnd);
if (specialObjClass != nullptr)
{
objClass = specialObjClass;
*pIsExact = true;
*pIsNonNull = true;
break;
}
}
if (call->IsInlineCandidate())
{
// For inline candidates, we've already cached the return
// type class handle in the inline info.
InlineCandidateInfo* inlInfo = call->gtInlineCandidateInfo;
assert(inlInfo != nullptr);
// Grab it as our first cut at a return type.
assert(inlInfo->methInfo.args.retType == CORINFO_TYPE_CLASS);
objClass = inlInfo->methInfo.args.retTypeClass;
// If the method is shared, the above may not capture
// the most precise return type information (that is,
// it may represent a shared return type and as such,
// have instances of __Canon). See if we can use the
// context to get at something more definite.
//
// For now, we do this here on demand rather than when
// processing the call, but we could/should apply
// similar sharpening to the argument and local types
// of the inlinee.
const unsigned retClassFlags = info.compCompHnd->getClassAttribs(objClass);
if (retClassFlags & CORINFO_FLG_SHAREDINST)
{
CORINFO_CONTEXT_HANDLE context = inlInfo->exactContextHnd;
if (context != nullptr)
{
CORINFO_CLASS_HANDLE exactClass = eeGetClassFromContext(context);
// Grab the signature in this context.
CORINFO_SIG_INFO sig;
eeGetMethodSig(call->gtCallMethHnd, &sig, exactClass);
assert(sig.retType == CORINFO_TYPE_CLASS);
objClass = sig.retTypeClass;
}
}
}
else if (call->gtCallType == CT_USER_FUNC)
{
// For user calls, we can fetch the approximate return
// type info from the method handle. Unfortunately
// we've lost the exact context, so this is the best
// we can do for now.
CORINFO_METHOD_HANDLE method = call->gtCallMethHnd;
CORINFO_CLASS_HANDLE exactClass = nullptr;
CORINFO_SIG_INFO sig;
eeGetMethodSig(method, &sig, exactClass);
if (sig.retType == CORINFO_TYPE_VOID)
{
// This is a constructor call.
const unsigned methodFlags = info.compCompHnd->getMethodAttribs(method);
assert((methodFlags & CORINFO_FLG_CONSTRUCTOR) != 0);
objClass = info.compCompHnd->getMethodClass(method);
*pIsExact = true;
*pIsNonNull = true;
}
else
{
assert(sig.retType == CORINFO_TYPE_CLASS);
objClass = sig.retTypeClass;
}
}
else if (call->gtCallType == CT_HELPER)
{
objClass = gtGetHelperCallClassHandle(call, pIsExact, pIsNonNull);
}
break;
}
case GT_INTRINSIC:
{
GenTreeIntrinsic* intrinsic = obj->AsIntrinsic();
if (intrinsic->gtIntrinsicName == NI_System_Object_GetType)
{
CORINFO_CLASS_HANDLE runtimeType = info.compCompHnd->getBuiltinClass(CLASSID_RUNTIME_TYPE);
assert(runtimeType != NO_CLASS_HANDLE);
objClass = runtimeType;
*pIsExact = false;
*pIsNonNull = true;
}
break;
}
case GT_CNS_STR:
{
// For literal strings, we know the class and that the
// value is not null.
objClass = impGetStringClass();
*pIsExact = true;
*pIsNonNull = true;
break;
}
case GT_IND:
{
GenTreeIndir* indir = obj->AsIndir();
if (indir->HasBase() && !indir->HasIndex())
{
// indir(addr(lcl)) --> lcl
//
// This comes up during constrained callvirt on ref types.
GenTree* base = indir->Base();
GenTreeLclVarCommon* lcl = base->IsLocalAddrExpr();
if ((lcl != nullptr) && (base->OperGet() != GT_ADD))
{
const unsigned objLcl = lcl->GetLclNum();
objClass = lvaTable[objLcl].lvClassHnd;
*pIsExact = lvaTable[objLcl].lvClassIsExact;
}
else if (base->OperGet() == GT_ARR_ELEM)
{
// indir(arr_elem(...)) -> array element type
GenTree* array = base->AsArrElem()->gtArrObj;
objClass = gtGetArrayElementClassHandle(array);
*pIsExact = false;
*pIsNonNull = false;
}
else if (base->OperGet() == GT_ADD)
{
// TODO-VNTypes: use "IsFieldAddr" here instead.
// This could be a static field access.
//
// See if op1 is a static field base helper call
// and if so, op2 will have the field info.
GenTree* op1 = base->AsOp()->gtOp1;
GenTree* op2 = base->AsOp()->gtOp2;
const bool op1IsStaticFieldBase = gtIsStaticGCBaseHelperCall(op1);
if (op1IsStaticFieldBase && (op2->OperGet() == GT_CNS_INT))
{
FieldSeqNode* fieldSeq = op2->AsIntCon()->gtFieldSeq;
if (fieldSeq != nullptr)
{
fieldSeq = fieldSeq->GetTail();
// No benefit to calling gtGetFieldClassHandle here, as
// the exact field being accessed can vary.
CORINFO_FIELD_HANDLE fieldHnd = fieldSeq->GetFieldHandle();
CORINFO_CLASS_HANDLE fieldClass = NO_CLASS_HANDLE;
var_types fieldType = eeGetFieldType(fieldHnd, &fieldClass);
assert(fieldType == TYP_REF);
objClass = fieldClass;
}
}
}
}
break;
}
case GT_BOX:
{
// Box should just wrap a local var reference which has
// the type we're looking for. Also box only represents a
// non-nullable value type so result cannot be null.
GenTreeBox* box = obj->AsBox();
GenTree* boxTemp = box->BoxOp();
assert(boxTemp->IsLocal());
const unsigned boxTempLcl = boxTemp->AsLclVar()->GetLclNum();
objClass = lvaTable[boxTempLcl].lvClassHnd;
*pIsExact = lvaTable[boxTempLcl].lvClassIsExact;
*pIsNonNull = true;
break;
}
case GT_INDEX:
{
GenTree* array = obj->AsIndex()->Arr();
objClass = gtGetArrayElementClassHandle(array);
*pIsExact = false;
*pIsNonNull = false;
break;
}
default:
{
break;
}
}
return objClass;
}
//------------------------------------------------------------------------
// gtGetHelperCallClassHandle: find class handle for return value of a
// helper call
//
// Arguments:
// call - helper call to examine
// pIsExact - [OUT] true if type is known exactly
// pIsNonNull - [OUT] true if return value is not null
//
// Return Value:
// nullptr if helper call result is not a ref class, or the class handle
// is unknown, otherwise the class handle.
CORINFO_CLASS_HANDLE Compiler::gtGetHelperCallClassHandle(GenTreeCall* call, bool* pIsExact, bool* pIsNonNull)
{
assert(call->gtCallType == CT_HELPER);
*pIsNonNull = false;
*pIsExact = false;
CORINFO_CLASS_HANDLE objClass = nullptr;
const CorInfoHelpFunc helper = eeGetHelperNum(call->gtCallMethHnd);
switch (helper)
{
case CORINFO_HELP_TYPEHANDLE_TO_RUNTIMETYPE:
case CORINFO_HELP_TYPEHANDLE_TO_RUNTIMETYPE_MAYBENULL:
{
// Note for some runtimes these helpers return exact types.
//
// But in those cases the types are also sealed, so there's no
// need to claim exactness here.
const bool helperResultNonNull = (helper == CORINFO_HELP_TYPEHANDLE_TO_RUNTIMETYPE);
CORINFO_CLASS_HANDLE runtimeType = info.compCompHnd->getBuiltinClass(CLASSID_RUNTIME_TYPE);
assert(runtimeType != NO_CLASS_HANDLE);
objClass = runtimeType;
*pIsNonNull = helperResultNonNull;
break;
}
case CORINFO_HELP_CHKCASTCLASS:
case CORINFO_HELP_CHKCASTANY:
case CORINFO_HELP_CHKCASTARRAY:
case CORINFO_HELP_CHKCASTINTERFACE:
case CORINFO_HELP_CHKCASTCLASS_SPECIAL:
case CORINFO_HELP_ISINSTANCEOFINTERFACE:
case CORINFO_HELP_ISINSTANCEOFARRAY:
case CORINFO_HELP_ISINSTANCEOFCLASS:
case CORINFO_HELP_ISINSTANCEOFANY:
{
// Fetch the class handle from the helper call arglist
GenTreeCall::Use* args = call->gtCallArgs;
GenTree* typeArg = args->GetNode();
CORINFO_CLASS_HANDLE castHnd = gtGetHelperArgClassHandle(typeArg);
// We generally assume the type being cast to is the best type
// for the result, unless it is an interface type.
//
// TODO-CQ: when we have default interface methods then
// this might not be the best assumption. We could also
// explore calling something like mergeClasses to identify
// the more specific class. A similar issue arises when
// typing the temp in impCastClassOrIsInstToTree, when we
// expand the cast inline.
if (castHnd != nullptr)
{
DWORD attrs = info.compCompHnd->getClassAttribs(castHnd);
if ((attrs & CORINFO_FLG_INTERFACE) != 0)
{
castHnd = nullptr;
}
}
// If we don't have a good estimate for the type we can use the
// type from the value being cast instead.
if (castHnd == nullptr)
{
GenTree* valueArg = args->GetNext()->GetNode();
castHnd = gtGetClassHandle(valueArg, pIsExact, pIsNonNull);
}
// We don't know at jit time if the cast will succeed or fail, but if it
// fails at runtime then an exception is thrown for cast helpers, or the
// result is set null for instance helpers.
//
// So it safe to claim the result has the cast type.
// Note we don't know for sure that it is exactly this type.
if (castHnd != nullptr)
{
objClass = castHnd;
}
break;
}
case CORINFO_HELP_NEWARR_1_DIRECT:
case CORINFO_HELP_NEWARR_1_OBJ:
case CORINFO_HELP_NEWARR_1_VC:
case CORINFO_HELP_NEWARR_1_ALIGN8:
case CORINFO_HELP_READYTORUN_NEWARR_1:
{
CORINFO_CLASS_HANDLE arrayHnd = (CORINFO_CLASS_HANDLE)call->compileTimeHelperArgumentHandle;
if (arrayHnd != NO_CLASS_HANDLE)
{
objClass = arrayHnd;
*pIsExact = true;
*pIsNonNull = true;
}
break;
}
default:
break;
}
return objClass;
}
//------------------------------------------------------------------------
// gtGetArrayElementClassHandle: find class handle for elements of an array
// of ref types
//
// Arguments:
// array -- array to find handle for
//
// Return Value:
// nullptr if element class handle is unknown, otherwise the class handle.
CORINFO_CLASS_HANDLE Compiler::gtGetArrayElementClassHandle(GenTree* array)
{
bool isArrayExact = false;
bool isArrayNonNull = false;
CORINFO_CLASS_HANDLE arrayClassHnd = gtGetClassHandle(array, &isArrayExact, &isArrayNonNull);
if (arrayClassHnd != nullptr)
{
// We know the class of the reference
DWORD attribs = info.compCompHnd->getClassAttribs(arrayClassHnd);
if ((attribs & CORINFO_FLG_ARRAY) != 0)
{
// We know for sure it is an array
CORINFO_CLASS_HANDLE elemClassHnd = nullptr;
CorInfoType arrayElemType = info.compCompHnd->getChildType(arrayClassHnd, &elemClassHnd);
if (arrayElemType == CORINFO_TYPE_CLASS)
{
// We know it is an array of ref types
return elemClassHnd;
}
}
}
return nullptr;
}
//------------------------------------------------------------------------
// gtGetFieldClassHandle: find class handle for a field
//
// Arguments:
// fieldHnd - field handle for field in question
// pIsExact - [OUT] true if type is known exactly
// pIsNonNull - [OUT] true if field value is not null
//
// Return Value:
// nullptr if helper call result is not a ref class, or the class handle
// is unknown, otherwise the class handle.
//
// May examine runtime state of static field instances.
CORINFO_CLASS_HANDLE Compiler::gtGetFieldClassHandle(CORINFO_FIELD_HANDLE fieldHnd, bool* pIsExact, bool* pIsNonNull)
{
CORINFO_CLASS_HANDLE fieldClass = nullptr;
CorInfoType fieldCorType = info.compCompHnd->getFieldType(fieldHnd, &fieldClass);
if (fieldCorType == CORINFO_TYPE_CLASS)
{
// Optionally, look at the actual type of the field's value
bool queryForCurrentClass = true;
INDEBUG(queryForCurrentClass = (JitConfig.JitQueryCurrentStaticFieldClass() > 0););
if (queryForCurrentClass)
{
#if DEBUG
const char* fieldClassName = nullptr;
const char* fieldName = eeGetFieldName(fieldHnd, &fieldClassName);
JITDUMP("Querying runtime about current class of field %s.%s (declared as %s)\n", fieldClassName, fieldName,
eeGetClassName(fieldClass));
#endif // DEBUG
// Is this a fully initialized init-only static field?
//
// Note we're not asking for speculative results here, yet.
CORINFO_CLASS_HANDLE currentClass = info.compCompHnd->getStaticFieldCurrentClass(fieldHnd);
if (currentClass != NO_CLASS_HANDLE)
{
// Yes! We know the class exactly and can rely on this to always be true.
fieldClass = currentClass;
*pIsExact = true;
*pIsNonNull = true;
JITDUMP("Runtime reports field is init-only and initialized and has class %s\n",
eeGetClassName(fieldClass));
}
else
{
JITDUMP("Field's current class not available\n");
}
}
}
return fieldClass;
}
//------------------------------------------------------------------------
// gtIsGCStaticBaseHelperCall: true if tree is fetching the gc static base
// for a subsequent static field access
//
// Arguments:
// tree - tree to consider
//
// Return Value:
// true if the tree is a suitable helper call
//
// Notes:
// Excludes R2R helpers as they specify the target field in a way
// that is opaque to the jit.
bool Compiler::gtIsStaticGCBaseHelperCall(GenTree* tree)
{
if (tree->OperGet() != GT_CALL)
{
return false;
}
GenTreeCall* call = tree->AsCall();
if (call->gtCallType != CT_HELPER)
{
return false;
}
const CorInfoHelpFunc helper = eeGetHelperNum(call->gtCallMethHnd);
switch (helper)
{
// We are looking for a REF type so only need to check for the GC base helpers
case CORINFO_HELP_GETGENERICS_GCSTATIC_BASE:
case CORINFO_HELP_GETSHARED_GCSTATIC_BASE:
case CORINFO_HELP_GETSHARED_GCSTATIC_BASE_NOCTOR:
case CORINFO_HELP_GETSHARED_GCSTATIC_BASE_DYNAMICCLASS:
case CORINFO_HELP_GETGENERICS_GCTHREADSTATIC_BASE:
case CORINFO_HELP_GETSHARED_GCTHREADSTATIC_BASE:
case CORINFO_HELP_GETSHARED_GCTHREADSTATIC_BASE_NOCTOR:
case CORINFO_HELP_GETSHARED_GCTHREADSTATIC_BASE_DYNAMICCLASS:
return true;
default:
break;
}
return false;
}
void GenTree::ParseArrayAddress(
Compiler* comp, ArrayInfo* arrayInfo, GenTree** pArr, ValueNum* pInxVN, FieldSeqNode** pFldSeq)
{
*pArr = nullptr;
ValueNum inxVN = ValueNumStore::NoVN;
target_ssize_t offset = 0;
FieldSeqNode* fldSeq = nullptr;
ParseArrayAddressWork(comp, 1, pArr, &inxVN, &offset, &fldSeq);
// If we didn't find an array reference (perhaps it is the constant null?) we will give up.
if (*pArr == nullptr)
{
return;
}
// OK, new we have to figure out if any part of the "offset" is a constant contribution to the index.
// First, sum the offsets of any fields in fldSeq.
unsigned fieldOffsets = 0;
FieldSeqNode* fldSeqIter = fldSeq;
// Also, find the first non-pseudo field...
assert(*pFldSeq == nullptr);
while (fldSeqIter != nullptr)
{
if (fldSeqIter == FieldSeqStore::NotAField())
{
// TODO-Review: A NotAField here indicates a failure to properly maintain the field sequence
// See test case self_host_tests_x86\jit\regression\CLR-x86-JIT\v1-m12-beta2\ b70992\ b70992.exe
// Safest thing to do here is to drop back to MinOpts
CLANG_FORMAT_COMMENT_ANCHOR;
#ifdef DEBUG
if (comp->opts.optRepeat)
{
// We don't guarantee preserving these annotations through the entire optimizer, so
// just conservatively return null if under optRepeat.
*pArr = nullptr;
return;
}
#endif // DEBUG
noway_assert(!"fldSeqIter is NotAField() in ParseArrayAddress");
}
if (!FieldSeqStore::IsPseudoField(fldSeqIter->GetFieldHandleValue()))
{
if (*pFldSeq == nullptr)
{
*pFldSeq = fldSeqIter;
}
CORINFO_CLASS_HANDLE fldCls = nullptr;
noway_assert(fldSeqIter->GetFieldHandle() != NO_FIELD_HANDLE);
CorInfoType cit = comp->info.compCompHnd->getFieldType(fldSeqIter->GetFieldHandle(), &fldCls);
fieldOffsets += comp->compGetTypeSize(cit, fldCls);
}
fldSeqIter = fldSeqIter->GetNext();
}
// Is there some portion of the "offset" beyond the first-elem offset and the struct field suffix we just computed?
if (!FitsIn<target_ssize_t>(fieldOffsets + arrayInfo->m_elemOffset) ||
!FitsIn<target_ssize_t>(arrayInfo->m_elemSize))
{
// This seems unlikely, but no harm in being safe...
*pInxVN = comp->GetValueNumStore()->VNForExpr(nullptr, TYP_INT);
return;
}
// Otherwise...
target_ssize_t offsetAccountedFor = static_cast<target_ssize_t>(fieldOffsets + arrayInfo->m_elemOffset);
target_ssize_t elemSize = static_cast<target_ssize_t>(arrayInfo->m_elemSize);
target_ssize_t constIndOffset = offset - offsetAccountedFor;
// This should be divisible by the element size...
assert((constIndOffset % elemSize) == 0);
target_ssize_t constInd = constIndOffset / elemSize;
ValueNumStore* vnStore = comp->GetValueNumStore();
if (inxVN == ValueNumStore::NoVN)
{
// Must be a constant index.
*pInxVN = vnStore->VNForPtrSizeIntCon(constInd);
}
else
{
//
// Perform ((inxVN / elemSizeVN) + vnForConstInd)
//
// The value associated with the index value number (inxVN) is the offset into the array,
// which has been scaled by element size. We need to recover the array index from that offset
if (vnStore->IsVNConstant(inxVN))
{
target_ssize_t index = vnStore->CoercedConstantValue<target_ssize_t>(inxVN);
noway_assert(elemSize > 0 && ((index % elemSize) == 0));
*pInxVN = vnStore->VNForPtrSizeIntCon((index / elemSize) + constInd);
}
else
{
bool canFoldDiv = false;
// If the index VN is a MUL by elemSize, see if we can eliminate it instead of adding
// the division by elemSize.
VNFuncApp funcApp;
if (vnStore->GetVNFunc(inxVN, &funcApp) && funcApp.m_func == (VNFunc)GT_MUL)
{
ValueNum vnForElemSize = vnStore->VNForLongCon(elemSize);
// One of the multiply operand is elemSize, so the resulting
// index VN should simply be the other operand.
if (funcApp.m_args[1] == vnForElemSize)
{
*pInxVN = funcApp.m_args[0];
canFoldDiv = true;
}
else if (funcApp.m_args[0] == vnForElemSize)
{
*pInxVN = funcApp.m_args[1];
canFoldDiv = true;
}
}
// Perform ((inxVN / elemSizeVN) + vnForConstInd)
if (!canFoldDiv)
{
ValueNum vnForElemSize = vnStore->VNForPtrSizeIntCon(elemSize);
ValueNum vnForScaledInx = vnStore->VNForFunc(TYP_I_IMPL, VNFunc(GT_DIV), inxVN, vnForElemSize);
*pInxVN = vnForScaledInx;
}
if (constInd != 0)
{
ValueNum vnForConstInd = comp->GetValueNumStore()->VNForPtrSizeIntCon(constInd);
VNFunc vnFunc = VNFunc(GT_ADD);
*pInxVN = comp->GetValueNumStore()->VNForFunc(TYP_I_IMPL, vnFunc, *pInxVN, vnForConstInd);
}
}
}
}
void GenTree::ParseArrayAddressWork(Compiler* comp,
target_ssize_t inputMul,
GenTree** pArr,
ValueNum* pInxVN,
target_ssize_t* pOffset,
FieldSeqNode** pFldSeq)
{
if (TypeGet() == TYP_REF)
{
// This must be the array pointer.
*pArr = this;
assert(inputMul == 1); // Can't multiply the array pointer by anything.
}
else
{
switch (OperGet())
{
case GT_CNS_INT:
*pFldSeq = comp->GetFieldSeqStore()->Append(*pFldSeq, AsIntCon()->gtFieldSeq);
assert(!AsIntCon()->ImmedValNeedsReloc(comp));
// TODO-CrossBitness: we wouldn't need the cast below if GenTreeIntCon::gtIconVal had target_ssize_t
// type.
*pOffset += (inputMul * (target_ssize_t)(AsIntCon()->gtIconVal));
return;
case GT_ADD:
case GT_SUB:
AsOp()->gtOp1->ParseArrayAddressWork(comp, inputMul, pArr, pInxVN, pOffset, pFldSeq);
if (OperGet() == GT_SUB)
{
inputMul = -inputMul;
}
AsOp()->gtOp2->ParseArrayAddressWork(comp, inputMul, pArr, pInxVN, pOffset, pFldSeq);
return;
case GT_MUL:
{
// If one op is a constant, continue parsing down.
target_ssize_t subMul = 0;
GenTree* nonConst = nullptr;
if (AsOp()->gtOp1->IsCnsIntOrI())
{
// If the other arg is an int constant, and is a "not-a-field", choose
// that as the multiplier, thus preserving constant index offsets...
if (AsOp()->gtOp2->OperGet() == GT_CNS_INT &&
AsOp()->gtOp2->AsIntCon()->gtFieldSeq == FieldSeqStore::NotAField())
{
assert(!AsOp()->gtOp2->AsIntCon()->ImmedValNeedsReloc(comp));
// TODO-CrossBitness: we wouldn't need the cast below if GenTreeIntConCommon::gtIconVal had
// target_ssize_t type.
subMul = (target_ssize_t)AsOp()->gtOp2->AsIntConCommon()->IconValue();
nonConst = AsOp()->gtOp1;
}
else
{
assert(!AsOp()->gtOp1->AsIntCon()->ImmedValNeedsReloc(comp));
// TODO-CrossBitness: we wouldn't need the cast below if GenTreeIntConCommon::gtIconVal had
// target_ssize_t type.
subMul = (target_ssize_t)AsOp()->gtOp1->AsIntConCommon()->IconValue();
nonConst = AsOp()->gtOp2;
}
}
else if (AsOp()->gtOp2->IsCnsIntOrI())
{
assert(!AsOp()->gtOp2->AsIntCon()->ImmedValNeedsReloc(comp));
// TODO-CrossBitness: we wouldn't need the cast below if GenTreeIntConCommon::gtIconVal had
// target_ssize_t type.
subMul = (target_ssize_t)AsOp()->gtOp2->AsIntConCommon()->IconValue();
nonConst = AsOp()->gtOp1;
}
if (nonConst != nullptr)
{
nonConst->ParseArrayAddressWork(comp, inputMul * subMul, pArr, pInxVN, pOffset, pFldSeq);
return;
}
// Otherwise, exit the switch, treat as a contribution to the index.
}
break;
case GT_LSH:
// If one op is a constant, continue parsing down.
if (AsOp()->gtOp2->IsCnsIntOrI())
{
assert(!AsOp()->gtOp2->AsIntCon()->ImmedValNeedsReloc(comp));
// TODO-CrossBitness: we wouldn't need the cast below if GenTreeIntCon::gtIconVal had target_ssize_t
// type.
target_ssize_t shiftVal = (target_ssize_t)AsOp()->gtOp2->AsIntConCommon()->IconValue();
target_ssize_t subMul = target_ssize_t{1} << shiftVal;
AsOp()->gtOp1->ParseArrayAddressWork(comp, inputMul * subMul, pArr, pInxVN, pOffset, pFldSeq);
return;
}
// Otherwise, exit the switch, treat as a contribution to the index.
break;
case GT_COMMA:
// We don't care about exceptions for this purpose.
if (AsOp()->gtOp1->OperIs(GT_BOUNDS_CHECK) || AsOp()->gtOp1->IsNothingNode())
{
AsOp()->gtOp2->ParseArrayAddressWork(comp, inputMul, pArr, pInxVN, pOffset, pFldSeq);
return;
}
break;
default:
break;
}
// If we didn't return above, must be a contribution to the non-constant part of the index VN.
ValueNum vn = comp->GetValueNumStore()->VNLiberalNormalValue(gtVNPair);
if (inputMul != 1)
{
ValueNum mulVN = comp->GetValueNumStore()->VNForLongCon(inputMul);
vn = comp->GetValueNumStore()->VNForFunc(TypeGet(), VNFunc(GT_MUL), mulVN, vn);
}
if (*pInxVN == ValueNumStore::NoVN)
{
*pInxVN = vn;
}
else
{
*pInxVN = comp->GetValueNumStore()->VNForFunc(TypeGet(), VNFunc(GT_ADD), *pInxVN, vn);
}
}
}
bool GenTree::ParseArrayElemForm(Compiler* comp, ArrayInfo* arrayInfo, FieldSeqNode** pFldSeq)
{
if (OperIsIndir())
{
if (gtFlags & GTF_IND_ARR_INDEX)
{
bool b = comp->GetArrayInfoMap()->Lookup(this, arrayInfo);
assert(b);
return true;
}
// Otherwise...
GenTree* addr = AsIndir()->Addr();
return addr->ParseArrayElemAddrForm(comp, arrayInfo, pFldSeq);
}
else
{
return false;
}
}
bool GenTree::ParseArrayElemAddrForm(Compiler* comp, ArrayInfo* arrayInfo, FieldSeqNode** pFldSeq)
{
switch (OperGet())
{
case GT_ADD:
{
GenTree* arrAddr = nullptr;
GenTree* offset = nullptr;
if (AsOp()->gtOp1->TypeGet() == TYP_BYREF)
{
arrAddr = AsOp()->gtOp1;
offset = AsOp()->gtOp2;
}
else if (AsOp()->gtOp2->TypeGet() == TYP_BYREF)
{
arrAddr = AsOp()->gtOp2;
offset = AsOp()->gtOp1;
}
else
{
return false;
}
if (!offset->ParseOffsetForm(comp, pFldSeq))
{
return false;
}
return arrAddr->ParseArrayElemAddrForm(comp, arrayInfo, pFldSeq);
}
case GT_ADDR:
{
GenTree* addrArg = AsOp()->gtOp1;
if (addrArg->OperGet() != GT_IND)
{
return false;
}
else
{
// The "Addr" node might be annotated with a zero-offset field sequence.
FieldSeqNode* zeroOffsetFldSeq = nullptr;
if (comp->GetZeroOffsetFieldMap()->Lookup(this, &zeroOffsetFldSeq))
{
*pFldSeq = comp->GetFieldSeqStore()->Append(*pFldSeq, zeroOffsetFldSeq);
}
return addrArg->ParseArrayElemForm(comp, arrayInfo, pFldSeq);
}
}
default:
return false;
}
}
bool GenTree::ParseOffsetForm(Compiler* comp, FieldSeqNode** pFldSeq)
{
switch (OperGet())
{
case GT_CNS_INT:
{
GenTreeIntCon* icon = AsIntCon();
*pFldSeq = comp->GetFieldSeqStore()->Append(*pFldSeq, icon->gtFieldSeq);
return true;
}
case GT_ADD:
if (!AsOp()->gtOp1->ParseOffsetForm(comp, pFldSeq))
{
return false;
}
return AsOp()->gtOp2->ParseOffsetForm(comp, pFldSeq);
default:
return false;
}
}
void GenTree::LabelIndex(Compiler* comp, bool isConst)
{
switch (OperGet())
{
case GT_CNS_INT:
// If we got here, this is a contribution to the constant part of the index.
if (isConst)
{
AsIntCon()->gtFieldSeq =
comp->GetFieldSeqStore()->CreateSingleton(FieldSeqStore::ConstantIndexPseudoField);
}
return;
case GT_LCL_VAR:
gtFlags |= GTF_VAR_ARR_INDEX;
return;
case GT_ADD:
case GT_SUB:
AsOp()->gtOp1->LabelIndex(comp, isConst);
AsOp()->gtOp2->LabelIndex(comp, isConst);
break;
case GT_CAST:
AsOp()->gtOp1->LabelIndex(comp, isConst);
break;
case GT_ARR_LENGTH:
gtFlags |= GTF_ARRLEN_ARR_IDX;
return;
default:
// For all other operators, peel off one constant; and then label the other if it's also a constant.
if (OperIsArithmetic() || OperIsCompare())
{
if (AsOp()->gtOp2->OperGet() == GT_CNS_INT)
{
AsOp()->gtOp1->LabelIndex(comp, isConst);
break;
}
else if (AsOp()->gtOp1->OperGet() == GT_CNS_INT)
{
AsOp()->gtOp2->LabelIndex(comp, isConst);
break;
}
// Otherwise continue downward on both, labeling vars.
AsOp()->gtOp1->LabelIndex(comp, false);
AsOp()->gtOp2->LabelIndex(comp, false);
}
break;
}
}
// Note that the value of the below field doesn't matter; it exists only to provide a distinguished address.
//
// static
FieldSeqNode FieldSeqStore::s_notAField(nullptr, nullptr, FieldSeqNode::FieldKind::Instance);
// FieldSeqStore methods.
FieldSeqStore::FieldSeqStore(CompAllocator alloc) : m_alloc(alloc), m_canonMap(new (alloc) FieldSeqNodeCanonMap(alloc))
{
}
FieldSeqNode* FieldSeqStore::CreateSingleton(CORINFO_FIELD_HANDLE fieldHnd, FieldSeqNode::FieldKind fieldKind)
{
FieldSeqNode fsn(fieldHnd, nullptr, fieldKind);
FieldSeqNode* res = nullptr;
if (m_canonMap->Lookup(fsn, &res))
{
return res;
}
else
{
res = m_alloc.allocate<FieldSeqNode>(1);
*res = fsn;
m_canonMap->Set(fsn, res);
return res;
}
}
FieldSeqNode* FieldSeqStore::Append(FieldSeqNode* a, FieldSeqNode* b)
{
if (a == nullptr)
{
return b;
}
else if (a == NotAField())
{
return NotAField();
}
else if (b == nullptr)
{
return a;
}
else if (b == NotAField())
{
return NotAField();
// Extremely special case for ConstantIndex pseudo-fields -- appending consecutive such
// together collapse to one.
}
else if (a->GetNext() == nullptr && a->GetFieldHandleValue() == ConstantIndexPseudoField &&
b->GetFieldHandleValue() == ConstantIndexPseudoField)
{
return b;
}
else
{
// We should never add a duplicate FieldSeqNode
assert(a != b);
FieldSeqNode* tmp = Append(a->GetNext(), b);
FieldSeqNode fsn(a->GetFieldHandleValue(), tmp, a->GetKind());
FieldSeqNode* res = nullptr;
if (m_canonMap->Lookup(fsn, &res))
{
return res;
}
else
{
res = m_alloc.allocate<FieldSeqNode>(1);
*res = fsn;
m_canonMap->Set(fsn, res);
return res;
}
}
}
// Static vars.
int FieldSeqStore::FirstElemPseudoFieldStruct;
int FieldSeqStore::ConstantIndexPseudoFieldStruct;
CORINFO_FIELD_HANDLE FieldSeqStore::FirstElemPseudoField =
(CORINFO_FIELD_HANDLE)&FieldSeqStore::FirstElemPseudoFieldStruct;
CORINFO_FIELD_HANDLE FieldSeqStore::ConstantIndexPseudoField =
(CORINFO_FIELD_HANDLE)&FieldSeqStore::ConstantIndexPseudoFieldStruct;
FieldSeqNode::FieldSeqNode(CORINFO_FIELD_HANDLE fieldHnd, FieldSeqNode* next, FieldKind fieldKind) : m_next(next)
{
uintptr_t handleValue = reinterpret_cast<uintptr_t>(fieldHnd);
assert((handleValue & FIELD_KIND_MASK) == 0);
m_fieldHandleAndKind = handleValue | static_cast<uintptr_t>(fieldKind);
if (!FieldSeqStore::IsPseudoField(fieldHnd) && (fieldHnd != NO_FIELD_HANDLE))
{
assert(JitTls::GetCompiler()->eeIsFieldStatic(fieldHnd) == IsStaticField());
}
else
{
// Use the default for pseudo-fields.
assert(fieldKind == FieldKind::Instance);
}
}
bool FieldSeqNode::IsFirstElemFieldSeq() const
{
return GetFieldHandleValue() == FieldSeqStore::FirstElemPseudoField;
}
bool FieldSeqNode::IsConstantIndexFieldSeq() const
{
return GetFieldHandleValue() == FieldSeqStore::ConstantIndexPseudoField;
}
bool FieldSeqNode::IsPseudoField() const
{
return (GetFieldHandleValue() == FieldSeqStore::FirstElemPseudoField) ||
(GetFieldHandleValue() == FieldSeqStore::ConstantIndexPseudoField);
}
#ifdef FEATURE_SIMD
GenTreeSIMD* Compiler::gtNewSIMDNode(
var_types type, GenTree* op1, SIMDIntrinsicID simdIntrinsicID, CorInfoType simdBaseJitType, unsigned simdSize)
{
assert(op1 != nullptr);
SetOpLclRelatedToSIMDIntrinsic(op1);
GenTreeSIMD* simdNode = new (this, GT_SIMD)
GenTreeSIMD(type, getAllocator(CMK_ASTNode), op1, simdIntrinsicID, simdBaseJitType, simdSize);
return simdNode;
}
GenTreeSIMD* Compiler::gtNewSIMDNode(var_types type,
GenTree* op1,
GenTree* op2,
SIMDIntrinsicID simdIntrinsicID,
CorInfoType simdBaseJitType,
unsigned simdSize)
{
assert(op1 != nullptr);
SetOpLclRelatedToSIMDIntrinsic(op1);
SetOpLclRelatedToSIMDIntrinsic(op2);
GenTreeSIMD* simdNode = new (this, GT_SIMD)
GenTreeSIMD(type, getAllocator(CMK_ASTNode), op1, op2, simdIntrinsicID, simdBaseJitType, simdSize);
return simdNode;
}
//-------------------------------------------------------------------
// SetOpLclRelatedToSIMDIntrinsic: Determine if the tree has a local var that needs to be set
// as used by a SIMD intrinsic, and if so, set that local var appropriately.
//
// Arguments:
// op - The tree, to be an operand of a new GT_SIMD node, to check.
//
void Compiler::SetOpLclRelatedToSIMDIntrinsic(GenTree* op)
{
if (op == nullptr)
{
return;
}
if (op->OperIsLocal())
{
setLclRelatedToSIMDIntrinsic(op);
}
else if (op->OperIs(GT_OBJ))
{
GenTree* addr = op->AsIndir()->Addr();
if (addr->OperIs(GT_ADDR))
{
GenTree* addrOp1 = addr->AsOp()->gtGetOp1();
if (addrOp1->OperIsLocal())
{
setLclRelatedToSIMDIntrinsic(addrOp1);
}
}
}
}
bool GenTree::isCommutativeSIMDIntrinsic()
{
assert(gtOper == GT_SIMD);
switch (AsSIMD()->GetSIMDIntrinsicId())
{
case SIMDIntrinsicBitwiseAnd:
case SIMDIntrinsicBitwiseOr:
case SIMDIntrinsicEqual:
return true;
default:
return false;
}
}
void GenTreeMultiOp::ResetOperandArray(size_t newOperandCount,
Compiler* compiler,
GenTree** inlineOperands,
size_t inlineOperandCount)
{
size_t oldOperandCount = GetOperandCount();
GenTree** oldOperands = GetOperandArray();
if (newOperandCount > oldOperandCount)
{
if (newOperandCount <= inlineOperandCount)
{
assert(oldOperandCount <= inlineOperandCount);
assert(oldOperands == inlineOperands);
}
else
{
// The most difficult case: we need to recreate the dynamic array.
assert(compiler != nullptr);
m_operands = compiler->getAllocator(CMK_ASTNode).allocate<GenTree*>(newOperandCount);
}
}
else
{
// We are shrinking the array and may in process switch to an inline representation.
// We choose to do so for simplicity ("if a node has <= InlineOperandCount operands,
// then it stores them inline"), but actually it may be more profitable to not do that,
// it will save us a copy and a potential cache miss (though the latter seems unlikely).
if ((newOperandCount <= inlineOperandCount) && (oldOperands != inlineOperands))
{
m_operands = inlineOperands;
}
}
#ifdef DEBUG
for (size_t i = 0; i < newOperandCount; i++)
{
m_operands[i] = nullptr;
}
#endif // DEBUG
SetOperandCount(newOperandCount);
}
/* static */ bool GenTreeMultiOp::OperandsAreEqual(GenTreeMultiOp* op1, GenTreeMultiOp* op2)
{
if (op1->GetOperandCount() != op2->GetOperandCount())
{
return false;
}
for (size_t i = 1; i <= op1->GetOperandCount(); i++)
{
if (!Compare(op1->Op(i), op2->Op(i)))
{
return false;
}
}
return true;
}
void GenTreeMultiOp::InitializeOperands(GenTree** operands, size_t operandCount)
{
for (size_t i = 0; i < operandCount; i++)
{
m_operands[i] = operands[i];
gtFlags |= (operands[i]->gtFlags & GTF_ALL_EFFECT);
}
SetOperandCount(operandCount);
}
var_types GenTreeJitIntrinsic::GetAuxiliaryType() const
{
CorInfoType auxiliaryJitType = GetAuxiliaryJitType();
if (auxiliaryJitType == CORINFO_TYPE_UNDEF)
{
return TYP_UNKNOWN;
}
return JitType2PreciseVarType(auxiliaryJitType);
}
var_types GenTreeJitIntrinsic::GetSimdBaseType() const
{
CorInfoType simdBaseJitType = GetSimdBaseJitType();
if (simdBaseJitType == CORINFO_TYPE_UNDEF)
{
return TYP_UNKNOWN;
}
return JitType2PreciseVarType(simdBaseJitType);
}
// Returns true for the SIMD Intrinsic instructions that have MemoryLoad semantics, false otherwise
bool GenTreeSIMD::OperIsMemoryLoad() const
{
if (GetSIMDIntrinsicId() == SIMDIntrinsicInitArray)
{
return true;
}
return false;
}
// TODO-Review: why are layouts not compared here?
/* static */ bool GenTreeSIMD::Equals(GenTreeSIMD* op1, GenTreeSIMD* op2)
{
return (op1->TypeGet() == op2->TypeGet()) && (op1->GetSIMDIntrinsicId() == op2->GetSIMDIntrinsicId()) &&
(op1->GetSimdBaseType() == op2->GetSimdBaseType()) && (op1->GetSimdSize() == op2->GetSimdSize()) &&
OperandsAreEqual(op1, op2);
}
#endif // FEATURE_SIMD
#ifdef FEATURE_HW_INTRINSICS
bool GenTree::isCommutativeHWIntrinsic() const
{
assert(gtOper == GT_HWINTRINSIC);
#ifdef TARGET_XARCH
return HWIntrinsicInfo::IsCommutative(AsHWIntrinsic()->GetHWIntrinsicId());
#else
return false;
#endif // TARGET_XARCH
}
bool GenTree::isContainableHWIntrinsic() const
{
assert(gtOper == GT_HWINTRINSIC);
#ifdef TARGET_XARCH
switch (AsHWIntrinsic()->GetHWIntrinsicId())
{
case NI_SSE_LoadAlignedVector128:
case NI_SSE_LoadScalarVector128:
case NI_SSE_LoadVector128:
case NI_SSE2_LoadAlignedVector128:
case NI_SSE2_LoadScalarVector128:
case NI_SSE2_LoadVector128:
case NI_AVX_LoadAlignedVector256:
case NI_AVX_LoadVector256:
case NI_AVX_ExtractVector128:
case NI_AVX2_ExtractVector128:
{
return true;
}
default:
{
return false;
}
}
#elif TARGET_ARM64
switch (AsHWIntrinsic()->GetHWIntrinsicId())
{
case NI_Vector64_get_Zero:
case NI_Vector128_get_Zero:
{
return true;
}
default:
{
return false;
}
}
#else
return false;
#endif // TARGET_XARCH
}
bool GenTree::isRMWHWIntrinsic(Compiler* comp)
{
assert(gtOper == GT_HWINTRINSIC);
assert(comp != nullptr);
#if defined(TARGET_XARCH)
if (!comp->canUseVexEncoding())
{
return HWIntrinsicInfo::HasRMWSemantics(AsHWIntrinsic()->GetHWIntrinsicId());
}
switch (AsHWIntrinsic()->GetHWIntrinsicId())
{
// TODO-XArch-Cleanup: Move this switch block to be table driven.
case NI_SSE42_Crc32:
case NI_SSE42_X64_Crc32:
case NI_FMA_MultiplyAdd:
case NI_FMA_MultiplyAddNegated:
case NI_FMA_MultiplyAddNegatedScalar:
case NI_FMA_MultiplyAddScalar:
case NI_FMA_MultiplyAddSubtract:
case NI_FMA_MultiplySubtract:
case NI_FMA_MultiplySubtractAdd:
case NI_FMA_MultiplySubtractNegated:
case NI_FMA_MultiplySubtractNegatedScalar:
case NI_FMA_MultiplySubtractScalar:
{
return true;
}
default:
{
return false;
}
}
#elif defined(TARGET_ARM64)
return HWIntrinsicInfo::HasRMWSemantics(AsHWIntrinsic()->GetHWIntrinsicId());
#else
return false;
#endif
}
GenTreeHWIntrinsic* Compiler::gtNewSimdHWIntrinsicNode(var_types type,
NamedIntrinsic hwIntrinsicID,
CorInfoType simdBaseJitType,
unsigned simdSize,
bool isSimdAsHWIntrinsic)
{
return new (this, GT_HWINTRINSIC) GenTreeHWIntrinsic(type, getAllocator(CMK_ASTNode), hwIntrinsicID,
simdBaseJitType, simdSize, isSimdAsHWIntrinsic);
}
GenTreeHWIntrinsic* Compiler::gtNewSimdHWIntrinsicNode(var_types type,
GenTree* op1,
NamedIntrinsic hwIntrinsicID,
CorInfoType simdBaseJitType,
unsigned simdSize,
bool isSimdAsHWIntrinsic)
{
SetOpLclRelatedToSIMDIntrinsic(op1);
return new (this, GT_HWINTRINSIC) GenTreeHWIntrinsic(type, getAllocator(CMK_ASTNode), hwIntrinsicID,
simdBaseJitType, simdSize, isSimdAsHWIntrinsic, op1);
}
GenTreeHWIntrinsic* Compiler::gtNewSimdHWIntrinsicNode(var_types type,
GenTree* op1,
GenTree* op2,
NamedIntrinsic hwIntrinsicID,
CorInfoType simdBaseJitType,
unsigned simdSize,
bool isSimdAsHWIntrinsic)
{
SetOpLclRelatedToSIMDIntrinsic(op1);
SetOpLclRelatedToSIMDIntrinsic(op2);
return new (this, GT_HWINTRINSIC) GenTreeHWIntrinsic(type, getAllocator(CMK_ASTNode), hwIntrinsicID,
simdBaseJitType, simdSize, isSimdAsHWIntrinsic, op1, op2);
}
GenTreeHWIntrinsic* Compiler::gtNewSimdHWIntrinsicNode(var_types type,
GenTree* op1,
GenTree* op2,
GenTree* op3,
NamedIntrinsic hwIntrinsicID,
CorInfoType simdBaseJitType,
unsigned simdSize,
bool isSimdAsHWIntrinsic)
{
SetOpLclRelatedToSIMDIntrinsic(op1);
SetOpLclRelatedToSIMDIntrinsic(op2);
SetOpLclRelatedToSIMDIntrinsic(op3);
return new (this, GT_HWINTRINSIC) GenTreeHWIntrinsic(type, getAllocator(CMK_ASTNode), hwIntrinsicID,
simdBaseJitType, simdSize, isSimdAsHWIntrinsic, op1, op2, op3);
}
GenTreeHWIntrinsic* Compiler::gtNewSimdHWIntrinsicNode(var_types type,
GenTree* op1,
GenTree* op2,
GenTree* op3,
GenTree* op4,
NamedIntrinsic hwIntrinsicID,
CorInfoType simdBaseJitType,
unsigned simdSize,
bool isSimdAsHWIntrinsic)
{
SetOpLclRelatedToSIMDIntrinsic(op1);
SetOpLclRelatedToSIMDIntrinsic(op2);
SetOpLclRelatedToSIMDIntrinsic(op3);
SetOpLclRelatedToSIMDIntrinsic(op4);
return new (this, GT_HWINTRINSIC)
GenTreeHWIntrinsic(type, getAllocator(CMK_ASTNode), hwIntrinsicID, simdBaseJitType, simdSize,
isSimdAsHWIntrinsic, op1, op2, op3, op4);
}
GenTreeHWIntrinsic* Compiler::gtNewSimdHWIntrinsicNode(var_types type,
GenTree** operands,
size_t operandCount,
NamedIntrinsic hwIntrinsicID,
CorInfoType simdBaseJitType,
unsigned simdSize,
bool isSimdAsHWIntrinsic)
{
IntrinsicNodeBuilder nodeBuilder(getAllocator(CMK_ASTNode), operandCount);
for (size_t i = 0; i < operandCount; i++)
{
nodeBuilder.AddOperand(i, operands[i]);
SetOpLclRelatedToSIMDIntrinsic(operands[i]);
}
return new (this, GT_HWINTRINSIC)
GenTreeHWIntrinsic(type, std::move(nodeBuilder), hwIntrinsicID, simdBaseJitType, simdSize, isSimdAsHWIntrinsic);
}
GenTreeHWIntrinsic* Compiler::gtNewSimdHWIntrinsicNode(var_types type,
IntrinsicNodeBuilder&& nodeBuilder,
NamedIntrinsic hwIntrinsicID,
CorInfoType simdBaseJitType,
unsigned simdSize,
bool isSimdAsHWIntrinsic)
{
for (size_t i = 0; i < nodeBuilder.GetOperandCount(); i++)
{
SetOpLclRelatedToSIMDIntrinsic(nodeBuilder.GetOperand(i));
}
return new (this, GT_HWINTRINSIC)
GenTreeHWIntrinsic(type, std::move(nodeBuilder), hwIntrinsicID, simdBaseJitType, simdSize, isSimdAsHWIntrinsic);
}
GenTree* Compiler::gtNewSimdAbsNode(
var_types type, GenTree* op1, CorInfoType simdBaseJitType, unsigned simdSize, bool isSimdAsHWIntrinsic)
{
assert(IsBaselineSimdIsaSupportedDebugOnly());
assert(varTypeIsSIMD(type));
assert(getSIMDTypeForSize(simdSize) == type);
assert(op1 != nullptr);
assert(op1->TypeGet() == type);
var_types simdBaseType = JitType2PreciseVarType(simdBaseJitType);
assert(varTypeIsArithmetic(simdBaseType));
if (varTypeIsUnsigned(simdBaseType))
{
return op1;
}
#if defined(TARGET_XARCH)
if (varTypeIsFloating(simdBaseType))
{
// Abs(v) = v & ~new vector<T>(-0.0);
assert((simdSize != 32) || compIsaSupportedDebugOnly(InstructionSet_AVX));
GenTree* bitMask = gtNewDconNode(-0.0, simdBaseType);
bitMask = gtNewSimdCreateBroadcastNode(type, bitMask, simdBaseJitType, simdSize, isSimdAsHWIntrinsic);
return gtNewSimdBinOpNode(GT_AND_NOT, type, op1, bitMask, simdBaseJitType, simdSize, isSimdAsHWIntrinsic);
}
assert((simdSize != 32) || compIsaSupportedDebugOnly(InstructionSet_AVX2));
if ((simdBaseType != TYP_LONG) && ((simdSize == 32) || compOpportunisticallyDependsOn(InstructionSet_SSSE3)))
{
NamedIntrinsic intrinsic = (simdSize == 32) ? NI_AVX2_Abs : NI_SSSE3_Abs;
return gtNewSimdHWIntrinsicNode(type, op1, intrinsic, simdBaseJitType, simdSize, isSimdAsHWIntrinsic);
}
else
{
GenTree* tmp;
CORINFO_CLASS_HANDLE clsHnd = gtGetStructHandleForSIMD(type, simdBaseJitType);
GenTree* op1Dup1;
op1 = impCloneExpr(op1, &op1Dup1, clsHnd, (unsigned)CHECK_SPILL_ALL,
nullptr DEBUGARG("Clone op1 for vector abs"));
GenTree* op1Dup2;
op1Dup1 = impCloneExpr(op1Dup1, &op1Dup2, clsHnd, (unsigned)CHECK_SPILL_ALL,
nullptr DEBUGARG("Clone op1 for vector abs"));
// op1 = op1 < Zero
tmp = gtNewSimdZeroNode(type, simdBaseJitType, simdSize, isSimdAsHWIntrinsic);
op1 = gtNewSimdCmpOpNode(GT_LT, type, op1, tmp, simdBaseJitType, simdSize, isSimdAsHWIntrinsic);
// tmp = Zero - op1Dup1
tmp = gtNewSimdZeroNode(type, simdBaseJitType, simdSize, isSimdAsHWIntrinsic);
tmp = gtNewSimdBinOpNode(GT_SUB, type, tmp, op1Dup1, simdBaseJitType, simdSize, isSimdAsHWIntrinsic);
// result = ConditionalSelect(op1, tmp, op1Dup2)
return gtNewSimdCndSelNode(type, op1, tmp, op1Dup2, simdBaseJitType, simdSize, isSimdAsHWIntrinsic);
}
#elif defined(TARGET_ARM64)
NamedIntrinsic intrinsic = NI_AdvSimd_Abs;
if (simdBaseType == TYP_DOUBLE)
{
intrinsic = (simdSize == 8) ? NI_AdvSimd_AbsScalar : NI_AdvSimd_Arm64_Abs;
}
else if (varTypeIsLong(simdBaseType))
{
intrinsic = (simdSize == 8) ? NI_AdvSimd_Arm64_AbsScalar : NI_AdvSimd_Arm64_Abs;
}
assert(intrinsic != NI_Illegal);
return gtNewSimdHWIntrinsicNode(type, op1, intrinsic, simdBaseJitType, simdSize, isSimdAsHWIntrinsic);
#else
#error Unsupported platform
#endif
}
GenTree* Compiler::gtNewSimdBinOpNode(genTreeOps op,
var_types type,
GenTree* op1,
GenTree* op2,
CorInfoType simdBaseJitType,
unsigned simdSize,
bool isSimdAsHWIntrinsic)
{
assert(IsBaselineSimdIsaSupportedDebugOnly());
assert(varTypeIsSIMD(type));
assert(getSIMDTypeForSize(simdSize) == type);
var_types simdBaseType = JitType2PreciseVarType(simdBaseJitType);
assert(varTypeIsArithmetic(simdBaseType));
assert(op1 != nullptr);
assert(op1->TypeIs(type, simdBaseType, genActualType(simdBaseType)));
assert(op2 != nullptr);
if ((op == GT_LSH) || (op == GT_RSH) || (op == GT_RSZ))
{
assert(op2->TypeIs(TYP_INT));
}
else
{
assert(op2->TypeIs(type, simdBaseType, genActualType(simdBaseType)));
}
NamedIntrinsic intrinsic = NI_Illegal;
CORINFO_CLASS_HANDLE clsHnd = gtGetStructHandleForSIMD(type, simdBaseJitType);
switch (op)
{
#if defined(TARGET_XARCH)
case GT_ADD:
{
if (simdSize == 32)
{
assert(compIsaSupportedDebugOnly(InstructionSet_AVX));
if (varTypeIsFloating(simdBaseType))
{
intrinsic = NI_AVX_Add;
}
else
{
assert(compIsaSupportedDebugOnly(InstructionSet_AVX2));
intrinsic = NI_AVX2_Add;
}
}
else if (simdBaseType == TYP_FLOAT)
{
intrinsic = NI_SSE_Add;
}
else
{
intrinsic = NI_SSE2_Add;
}
break;
}
case GT_AND:
{
if (simdSize == 32)
{
assert(compIsaSupportedDebugOnly(InstructionSet_AVX));
if (varTypeIsFloating(simdBaseType))
{
intrinsic = NI_AVX_And;
}
else if (compOpportunisticallyDependsOn(InstructionSet_AVX2))
{
intrinsic = NI_AVX2_And;
}
else
{
// Since this is a bitwise operation, we can still support it by lying
// about the type and doing the operation using a supported instruction
intrinsic = NI_AVX_And;
simdBaseJitType = CORINFO_TYPE_FLOAT;
}
}
else if (simdBaseType == TYP_FLOAT)
{
intrinsic = NI_SSE_And;
}
else
{
intrinsic = NI_SSE2_And;
}
break;
}
case GT_AND_NOT:
{
if (simdSize == 32)
{
assert(compIsaSupportedDebugOnly(InstructionSet_AVX));
if (varTypeIsFloating(simdBaseType))
{
intrinsic = NI_AVX_AndNot;
}
else if (compOpportunisticallyDependsOn(InstructionSet_AVX2))
{
intrinsic = NI_AVX2_AndNot;
}
else
{
// Since this is a bitwise operation, we can still support it by lying
// about the type and doing the operation using a supported instruction
intrinsic = NI_AVX_AndNot;
simdBaseJitType = CORINFO_TYPE_FLOAT;
}
}
else if (simdBaseType == TYP_FLOAT)
{
intrinsic = NI_SSE_AndNot;
}
else
{
intrinsic = NI_SSE2_AndNot;
}
// GT_AND_NOT expects `op1 & ~op2`, but xarch does `~op1 & op2`
std::swap(op1, op2);
break;
}
case GT_DIV:
{
// TODO-XARCH-CQ: We could support division by constant for integral types
assert(varTypeIsFloating(simdBaseType));
if (simdSize == 32)
{
assert(compIsaSupportedDebugOnly(InstructionSet_AVX));
intrinsic = NI_AVX_Divide;
}
else if (simdBaseType == TYP_FLOAT)
{
intrinsic = NI_SSE_Divide;
}
else
{
intrinsic = NI_SSE2_Divide;
}
break;
}
case GT_LSH:
case GT_RSH:
case GT_RSZ:
{
assert(!varTypeIsByte(simdBaseType));
assert(!varTypeIsFloating(simdBaseType));
assert((op != GT_RSH) || !varTypeIsUnsigned(simdBaseType));
// "over shifting" is platform specific behavior. We will match the C# behavior
// this requires we mask with (sizeof(T) * 8) - 1 which ensures the shift cannot
// exceed the number of bits available in `T`. This is roughly equivalent to
// x % (sizeof(T) * 8), but that is "more expensive" and only the same for unsigned
// inputs, where-as we have a signed-input and so negative values would differ.
unsigned shiftCountMask = (genTypeSize(simdBaseType) * 8) - 1;
if (op2->IsCnsIntOrI())
{
op2->AsIntCon()->gtIconVal &= shiftCountMask;
}
else
{
op2 = gtNewOperNode(GT_AND, TYP_INT, op2, gtNewIconNode(shiftCountMask));
op2 = gtNewSimdHWIntrinsicNode(TYP_SIMD16, op2, NI_SSE2_ConvertScalarToVector128Int32, CORINFO_TYPE_INT,
16, isSimdAsHWIntrinsic);
}
if (simdSize == 32)
{
assert(compIsaSupportedDebugOnly(InstructionSet_AVX2));
if (op == GT_LSH)
{
intrinsic = NI_AVX2_ShiftLeftLogical;
}
else if (op == GT_RSH)
{
intrinsic = NI_AVX2_ShiftRightArithmetic;
}
else
{
assert(op == GT_RSZ);
intrinsic = NI_AVX2_ShiftRightLogical;
}
}
else if (op == GT_LSH)
{
intrinsic = NI_SSE2_ShiftLeftLogical;
}
else if (op == GT_RSH)
{
intrinsic = NI_SSE2_ShiftRightArithmetic;
}
else
{
assert(op == GT_RSZ);
intrinsic = NI_SSE2_ShiftRightLogical;
}
break;
}
case GT_MUL:
{
GenTree** broadcastOp = nullptr;
if (varTypeIsArithmetic(op1))
{
broadcastOp = &op1;
}
else if (varTypeIsArithmetic(op2))
{
broadcastOp = &op2;
}
if (broadcastOp != nullptr)
{
*broadcastOp =
gtNewSimdCreateBroadcastNode(type, *broadcastOp, simdBaseJitType, simdSize, isSimdAsHWIntrinsic);
}
switch (simdBaseType)
{
case TYP_SHORT:
case TYP_USHORT:
{
if (simdSize == 32)
{
assert(compIsaSupportedDebugOnly(InstructionSet_AVX2));
intrinsic = NI_AVX2_MultiplyLow;
}
else
{
intrinsic = NI_SSE2_MultiplyLow;
}
break;
}
case TYP_INT:
case TYP_UINT:
{
if (simdSize == 32)
{
assert(compIsaSupportedDebugOnly(InstructionSet_AVX2));
intrinsic = NI_AVX2_MultiplyLow;
}
else if (compOpportunisticallyDependsOn(InstructionSet_SSE41))
{
intrinsic = NI_SSE41_MultiplyLow;
}
else
{
// op1Dup = op1
GenTree* op1Dup;
op1 = impCloneExpr(op1, &op1Dup, clsHnd, (unsigned)CHECK_SPILL_ALL,
nullptr DEBUGARG("Clone op1 for vector multiply"));
// op2Dup = op2
GenTree* op2Dup;
op2 = impCloneExpr(op2, &op2Dup, clsHnd, (unsigned)CHECK_SPILL_ALL,
nullptr DEBUGARG("Clone op2 for vector multiply"));
// op1 = Sse2.ShiftRightLogical128BitLane(op1, 4)
op1 = gtNewSimdHWIntrinsicNode(type, op1, gtNewIconNode(4, TYP_INT),
NI_SSE2_ShiftRightLogical128BitLane, simdBaseJitType, simdSize,
isSimdAsHWIntrinsic);
// op2 = Sse2.ShiftRightLogical128BitLane(op1, 4)
op2 = gtNewSimdHWIntrinsicNode(type, op2, gtNewIconNode(4, TYP_INT),
NI_SSE2_ShiftRightLogical128BitLane, simdBaseJitType, simdSize,
isSimdAsHWIntrinsic);
// op2 = Sse2.Multiply(op2.AsUInt32(), op1.AsUInt32()).AsInt32()
op2 = gtNewSimdHWIntrinsicNode(type, op2, op1, NI_SSE2_Multiply, CORINFO_TYPE_ULONG, simdSize,
isSimdAsHWIntrinsic);
// op2 = Sse2.Shuffle(op2, (0, 0, 2, 0))
op2 = gtNewSimdHWIntrinsicNode(type, op2, gtNewIconNode(SHUFFLE_XXZX, TYP_INT), NI_SSE2_Shuffle,
simdBaseJitType, simdSize, isSimdAsHWIntrinsic);
// op1 = Sse2.Multiply(op1Dup.AsUInt32(), op2Dup.AsUInt32()).AsInt32()
op1 = gtNewSimdHWIntrinsicNode(type, op1Dup, op2Dup, NI_SSE2_Multiply, CORINFO_TYPE_ULONG,
simdSize, isSimdAsHWIntrinsic);
// op1 = Sse2.Shuffle(op1, (0, 0, 2, 0))
op1 = gtNewSimdHWIntrinsicNode(type, op1, gtNewIconNode(SHUFFLE_XXZX, TYP_INT), NI_SSE2_Shuffle,
simdBaseJitType, simdSize, isSimdAsHWIntrinsic);
// result = Sse2.UnpackLow(op1, op2)
intrinsic = NI_SSE2_UnpackLow;
}
break;
}
case TYP_FLOAT:
{
if (simdSize == 32)
{
assert(compIsaSupportedDebugOnly(InstructionSet_AVX));
intrinsic = NI_AVX_Multiply;
}
else
{
intrinsic = NI_SSE_Multiply;
}
break;
}
case TYP_DOUBLE:
{
if (simdSize == 32)
{
assert(compIsaSupportedDebugOnly(InstructionSet_AVX));
intrinsic = NI_AVX_Multiply;
}
else
{
intrinsic = NI_SSE2_Multiply;
}
break;
}
default:
{
unreached();
}
}
break;
}
case GT_OR:
{
if (simdSize == 32)
{
assert(compIsaSupportedDebugOnly(InstructionSet_AVX));
if (varTypeIsFloating(simdBaseType))
{
intrinsic = NI_AVX_Or;
}
else if (compOpportunisticallyDependsOn(InstructionSet_AVX2))
{
intrinsic = NI_AVX2_Or;
}
else
{
// Since this is a bitwise operation, we can still support it by lying
// about the type and doing the operation using a supported instruction
intrinsic = NI_AVX_Or;
simdBaseJitType = CORINFO_TYPE_FLOAT;
}
}
else if (simdBaseType == TYP_FLOAT)
{
intrinsic = NI_SSE_Or;
}
else
{
intrinsic = NI_SSE2_Or;
}
break;
}
case GT_SUB:
{
if (simdSize == 32)
{
assert(compIsaSupportedDebugOnly(InstructionSet_AVX));
if (varTypeIsFloating(simdBaseType))
{
intrinsic = NI_AVX_Subtract;
}
else
{
assert(compIsaSupportedDebugOnly(InstructionSet_AVX2));
intrinsic = NI_AVX2_Subtract;
}
}
else if (simdBaseType == TYP_FLOAT)
{
intrinsic = NI_SSE_Subtract;
}
else
{
intrinsic = NI_SSE2_Subtract;
}
break;
}
case GT_XOR:
{
if (simdSize == 32)
{
assert(compIsaSupportedDebugOnly(InstructionSet_AVX));
if (varTypeIsFloating(simdBaseType))
{
intrinsic = NI_AVX_Xor;
}
else if (compOpportunisticallyDependsOn(InstructionSet_AVX2))
{
intrinsic = NI_AVX2_Xor;
}
else
{
// Since this is a bitwise operation, we can still support it by lying
// about the type and doing the operation using a supported instruction
intrinsic = NI_AVX_Xor;
simdBaseJitType = CORINFO_TYPE_FLOAT;
}
}
else if (simdBaseType == TYP_FLOAT)
{
intrinsic = NI_SSE_Xor;
}
else
{
intrinsic = NI_SSE2_Xor;
}
break;
}
#elif defined(TARGET_ARM64)
case GT_ADD:
{
if (simdBaseType == TYP_DOUBLE)
{
intrinsic = (simdSize == 8) ? NI_AdvSimd_AddScalar : NI_AdvSimd_Arm64_Add;
}
else if ((simdSize == 8) && varTypeIsLong(simdBaseType))
{
intrinsic = NI_AdvSimd_AddScalar;
}
else
{
intrinsic = NI_AdvSimd_Add;
}
break;
}
case GT_AND:
{
intrinsic = NI_AdvSimd_And;
break;
}
case GT_AND_NOT:
{
intrinsic = NI_AdvSimd_BitwiseClear;
break;
}
case GT_DIV:
{
// TODO-AARCH-CQ: We could support division by constant for integral types
assert(varTypeIsFloating(simdBaseType));
if ((simdSize == 8) && (simdBaseType == TYP_DOUBLE))
{
intrinsic = NI_AdvSimd_DivideScalar;
}
else
{
intrinsic = NI_AdvSimd_Arm64_Divide;
}
break;
}
case GT_LSH:
case GT_RSH:
case GT_RSZ:
{
assert(!varTypeIsFloating(simdBaseType));
assert((op != GT_RSH) || !varTypeIsUnsigned(simdBaseType));
// "over shifting" is platform specific behavior. We will match the C# behavior
// this requires we mask with (sizeof(T) * 8) - 1 which ensures the shift cannot
// exceed the number of bits available in `T`. This is roughly equivalent to
// x % (sizeof(T) * 8), but that is "more expensive" and only the same for unsigned
// inputs, where-as we have a signed-input and so negative values would differ.
unsigned shiftCountMask = (genTypeSize(simdBaseType) * 8) - 1;
if (op2->IsCnsIntOrI())
{
op2->AsIntCon()->gtIconVal &= shiftCountMask;
if ((simdSize == 8) && varTypeIsLong(simdBaseType))
{
if (op == GT_LSH)
{
intrinsic = NI_AdvSimd_ShiftLeftLogicalScalar;
}
else if (op == GT_RSH)
{
intrinsic = NI_AdvSimd_ShiftRightArithmeticScalar;
}
else
{
assert(op == GT_RSZ);
intrinsic = NI_AdvSimd_ShiftRightLogicalScalar;
}
}
else if (op == GT_LSH)
{
intrinsic = NI_AdvSimd_ShiftLeftLogical;
}
else if (op == GT_RSH)
{
intrinsic = NI_AdvSimd_ShiftRightArithmetic;
}
else
{
assert(op == GT_RSZ);
intrinsic = NI_AdvSimd_ShiftRightLogical;
}
}
else
{
op2 = gtNewOperNode(GT_AND, TYP_INT, op2, gtNewIconNode(shiftCountMask));
if (op != GT_LSH)
{
op2 = gtNewOperNode(GT_NEG, TYP_INT, op2);
}
op2 = gtNewSimdCreateBroadcastNode(type, op2, simdBaseJitType, simdSize, isSimdAsHWIntrinsic);
if ((simdSize == 8) && varTypeIsLong(simdBaseType))
{
if (op == GT_LSH)
{
intrinsic = NI_AdvSimd_ShiftLogicalScalar;
}
else if (op == GT_RSH)
{
intrinsic = NI_AdvSimd_ShiftArithmeticScalar;
}
else
{
intrinsic = NI_AdvSimd_ShiftLogicalScalar;
}
}
else if (op == GT_LSH)
{
intrinsic = NI_AdvSimd_ShiftLogical;
}
else if (op == GT_RSH)
{
intrinsic = NI_AdvSimd_ShiftArithmetic;
}
else
{
assert(op == GT_RSZ);
intrinsic = NI_AdvSimd_ShiftLogical;
}
}
break;
}
case GT_MUL:
{
assert(!varTypeIsLong(simdBaseType));
GenTree** scalarOp = nullptr;
if (varTypeIsArithmetic(op1))
{
// MultiplyByScalar requires the scalar op to be op2
std::swap(op1, op2);
scalarOp = &op2;
}
else if (varTypeIsArithmetic(op2))
{
scalarOp = &op2;
}
switch (JitType2PreciseVarType(simdBaseJitType))
{
case TYP_BYTE:
case TYP_UBYTE:
{
if (scalarOp != nullptr)
{
*scalarOp = gtNewSimdCreateBroadcastNode(type, *scalarOp, simdBaseJitType, simdSize,
isSimdAsHWIntrinsic);
}
intrinsic = NI_AdvSimd_Multiply;
break;
}
case TYP_SHORT:
case TYP_USHORT:
case TYP_INT:
case TYP_UINT:
case TYP_FLOAT:
{
if (scalarOp != nullptr)
{
intrinsic = NI_AdvSimd_MultiplyByScalar;
*scalarOp = gtNewSimdHWIntrinsicNode(TYP_SIMD8, *scalarOp, NI_Vector64_CreateScalarUnsafe,
simdBaseJitType, 8, isSimdAsHWIntrinsic);
}
else
{
intrinsic = NI_AdvSimd_Multiply;
}
break;
}
case TYP_DOUBLE:
{
if (scalarOp != nullptr)
{
intrinsic = NI_AdvSimd_Arm64_MultiplyByScalar;
*scalarOp = gtNewSimdHWIntrinsicNode(TYP_SIMD8, *scalarOp, NI_Vector64_Create, simdBaseJitType,
8, isSimdAsHWIntrinsic);
}
else
{
intrinsic = NI_AdvSimd_Arm64_Multiply;
}
if (simdSize == 8)
{
intrinsic = NI_AdvSimd_MultiplyScalar;
}
break;
}
default:
{
unreached();
}
}
break;
}
case GT_OR:
{
intrinsic = NI_AdvSimd_Or;
break;
}
case GT_SUB:
{
if (simdBaseType == TYP_DOUBLE)
{
intrinsic = (simdSize == 8) ? NI_AdvSimd_SubtractScalar : NI_AdvSimd_Arm64_Subtract;
}
else if ((simdSize == 8) && varTypeIsLong(simdBaseType))
{
intrinsic = NI_AdvSimd_SubtractScalar;
}
else
{
intrinsic = NI_AdvSimd_Subtract;
}
break;
}
case GT_XOR:
{
intrinsic = NI_AdvSimd_Xor;
break;
}
#else
#error Unsupported platform
#endif // !TARGET_XARCH && !TARGET_ARM64
default:
{
unreached();
}
}
assert(intrinsic != NI_Illegal);
return gtNewSimdHWIntrinsicNode(type, op1, op2, intrinsic, simdBaseJitType, simdSize, isSimdAsHWIntrinsic);
}
GenTree* Compiler::gtNewSimdCeilNode(
var_types type, GenTree* op1, CorInfoType simdBaseJitType, unsigned simdSize, bool isSimdAsHWIntrinsic)
{
assert(IsBaselineSimdIsaSupportedDebugOnly());
assert(varTypeIsSIMD(type));
assert(getSIMDTypeForSize(simdSize) == type);
assert(op1 != nullptr);
assert(op1->TypeIs(type));
var_types simdBaseType = JitType2PreciseVarType(simdBaseJitType);
assert(varTypeIsFloating(simdBaseType));
NamedIntrinsic intrinsic = NI_Illegal;
#if defined(TARGET_XARCH)
if (simdSize == 32)
{
assert(compIsaSupportedDebugOnly(InstructionSet_AVX));
intrinsic = NI_AVX_Ceiling;
}
else
{
assert(compIsaSupportedDebugOnly(InstructionSet_SSE41));
intrinsic = NI_SSE41_Ceiling;
}
#elif defined(TARGET_ARM64)
if (simdBaseType == TYP_DOUBLE)
{
intrinsic = (simdSize == 8) ? NI_AdvSimd_CeilingScalar : NI_AdvSimd_Arm64_Ceiling;
}
else
{
intrinsic = NI_AdvSimd_Ceiling;
}
#else
#error Unsupported platform
#endif // !TARGET_XARCH && !TARGET_ARM64
assert(intrinsic != NI_Illegal);
return gtNewSimdHWIntrinsicNode(type, op1, intrinsic, simdBaseJitType, simdSize, isSimdAsHWIntrinsic);
}
GenTree* Compiler::gtNewSimdCmpOpNode(genTreeOps op,
var_types type,
GenTree* op1,
GenTree* op2,
CorInfoType simdBaseJitType,
unsigned simdSize,
bool isSimdAsHWIntrinsic)
{
assert(IsBaselineSimdIsaSupportedDebugOnly());
assert(varTypeIsSIMD(type));
assert(getSIMDTypeForSize(simdSize) == type);
assert(op1 != nullptr);
assert(op1->TypeIs(type));
assert(op2 != nullptr);
assert(op2->TypeIs(type));
var_types simdBaseType = JitType2PreciseVarType(simdBaseJitType);
assert(varTypeIsArithmetic(simdBaseType));
NamedIntrinsic intrinsic = NI_Illegal;
CORINFO_CLASS_HANDLE clsHnd = gtGetStructHandleForSIMD(type, simdBaseJitType);
switch (op)
{
#if defined(TARGET_XARCH)
case GT_EQ:
{
if (simdSize == 32)
{
assert(compIsaSupportedDebugOnly(InstructionSet_AVX));
if (varTypeIsFloating(simdBaseType))
{
intrinsic = NI_AVX_CompareEqual;
}
else
{
assert(compIsaSupportedDebugOnly(InstructionSet_AVX2));
intrinsic = NI_AVX2_CompareEqual;
}
}
else if (simdBaseType == TYP_FLOAT)
{
intrinsic = NI_SSE_CompareEqual;
}
else if (varTypeIsLong(simdBaseType))
{
if (compOpportunisticallyDependsOn(InstructionSet_SSE41))
{
intrinsic = NI_SSE41_CompareEqual;
}
else
{
// There is no direct SSE2 support for comparing TYP_LONG vectors.
// These have to be implemented in terms of TYP_INT vector comparison operations.
//
// tmp = (op1 == op2) i.e. compare for equality as if op1 and op2 are vector of int
// op1 = tmp
// op2 = Shuffle(tmp, (2, 3, 0, 1))
// result = BitwiseAnd(op1, op2)
//
// Shuffle is meant to swap the comparison results of low-32-bits and high 32-bits of
// respective long elements.
GenTree* tmp =
gtNewSimdCmpOpNode(op, type, op1, op2, CORINFO_TYPE_INT, simdSize, isSimdAsHWIntrinsic);
tmp = impCloneExpr(tmp, &op1, clsHnd, (unsigned)CHECK_SPILL_ALL,
nullptr DEBUGARG("Clone tmp for vector Equals"));
op2 = gtNewSimdHWIntrinsicNode(type, tmp, gtNewIconNode(SHUFFLE_ZWXY), NI_SSE2_Shuffle,
CORINFO_TYPE_INT, simdSize, isSimdAsHWIntrinsic);
return gtNewSimdBinOpNode(GT_AND, type, op1, op2, simdBaseJitType, simdSize, isSimdAsHWIntrinsic);
}
}
else
{
intrinsic = NI_SSE2_CompareEqual;
}
break;
}
case GT_GE:
{
if (simdSize == 32)
{
assert(compIsaSupportedDebugOnly(InstructionSet_AVX));
if (varTypeIsFloating(simdBaseType))
{
intrinsic = NI_AVX_CompareGreaterThanOrEqual;
}
}
else if (simdBaseType == TYP_FLOAT)
{
intrinsic = NI_SSE_CompareGreaterThanOrEqual;
}
else if (simdBaseType == TYP_DOUBLE)
{
intrinsic = NI_SSE2_CompareGreaterThanOrEqual;
}
if (intrinsic == NI_Illegal)
{
// There is no direct support for doing a combined comparison and equality for integral types.
// These have to be implemented by performing both halves and combining their results.
//
// op1Dup = op1
// op2Dup = op2
//
// op1 = GreaterThan(op1, op2)
// op2 = Equals(op1Dup, op2Dup)
//
// result = BitwiseOr(op1, op2)
GenTree* op1Dup;
op1 = impCloneExpr(op1, &op1Dup, clsHnd, (unsigned)CHECK_SPILL_ALL,
nullptr DEBUGARG("Clone op1 for vector GreaterThanOrEqual"));
GenTree* op2Dup;
op2 = impCloneExpr(op2, &op2Dup, clsHnd, (unsigned)CHECK_SPILL_ALL,
nullptr DEBUGARG("Clone op2 for vector GreaterThanOrEqual"));
op1 = gtNewSimdCmpOpNode(GT_GT, type, op1, op2, simdBaseJitType, simdSize, isSimdAsHWIntrinsic);
op2 = gtNewSimdCmpOpNode(GT_EQ, type, op1Dup, op2Dup, simdBaseJitType, simdSize, isSimdAsHWIntrinsic);
return gtNewSimdBinOpNode(GT_OR, type, op1, op2, simdBaseJitType, simdSize, isSimdAsHWIntrinsic);
}
break;
}
case GT_GT:
{
if (varTypeIsUnsigned(simdBaseType))
{
// Vector of byte, ushort, uint and ulong:
// Hardware supports > for signed comparison. Therefore, to use it for
// comparing unsigned numbers, we subtract a constant from both the
// operands such that the result fits within the corresponding signed
// type. The resulting signed numbers are compared using signed comparison.
//
// Vector of byte: constant to be subtracted is 2^7
// Vector of ushort: constant to be subtracted is 2^15
// Vector of uint: constant to be subtracted is 2^31
// Vector of ulong: constant to be subtracted is 2^63
//
// We need to treat op1 and op2 as signed for comparison purpose after
// the transformation.
GenTree* constVal = nullptr;
CorInfoType opJitType = simdBaseJitType;
var_types opType = simdBaseType;
CorInfoType constValJitType = CORINFO_TYPE_INT;
switch (simdBaseType)
{
case TYP_UBYTE:
{
constVal = gtNewIconNode(0x80808080);
simdBaseJitType = CORINFO_TYPE_BYTE;
simdBaseType = TYP_BYTE;
break;
}
case TYP_USHORT:
{
constVal = gtNewIconNode(0x80008000);
simdBaseJitType = CORINFO_TYPE_SHORT;
simdBaseType = TYP_SHORT;
break;
}
case TYP_UINT:
{
constVal = gtNewIconNode(0x80000000);
simdBaseJitType = CORINFO_TYPE_INT;
simdBaseType = TYP_INT;
break;
}
case TYP_ULONG:
{
constVal = gtNewLconNode(0x8000000000000000);
constValJitType = CORINFO_TYPE_LONG;
simdBaseJitType = CORINFO_TYPE_LONG;
simdBaseType = TYP_LONG;
break;
}
default:
{
unreached();
}
}
GenTree* constVector =
gtNewSimdCreateBroadcastNode(type, constVal, constValJitType, simdSize, isSimdAsHWIntrinsic);
GenTree* constVectorDup;
constVector = impCloneExpr(constVector, &constVectorDup, clsHnd, (unsigned)CHECK_SPILL_ALL,
nullptr DEBUGARG("Clone constVector for vector GreaterThan"));
// op1 = op1 - constVector
op1 = gtNewSimdBinOpNode(GT_SUB, type, op1, constVector, opJitType, simdSize, isSimdAsHWIntrinsic);
// op2 = op2 - constVector
op2 = gtNewSimdBinOpNode(GT_SUB, type, op2, constVectorDup, opJitType, simdSize, isSimdAsHWIntrinsic);
}
// This should have been mutated by the above path
assert(!varTypeIsUnsigned(simdBaseType));
if (simdSize == 32)
{
assert(compIsaSupportedDebugOnly(InstructionSet_AVX));
if (varTypeIsFloating(simdBaseType))
{
intrinsic = NI_AVX_CompareGreaterThan;
}
else
{
assert(compIsaSupportedDebugOnly(InstructionSet_AVX2));
intrinsic = NI_AVX2_CompareGreaterThan;
}
}
else if (simdBaseType == TYP_FLOAT)
{
intrinsic = NI_SSE_CompareGreaterThan;
}
else if (varTypeIsLong(simdBaseType))
{
if (compOpportunisticallyDependsOn(InstructionSet_SSE42))
{
intrinsic = NI_SSE42_CompareGreaterThan;
}
else
{
// There is no direct SSE2 support for comparing TYP_LONG vectors.
// These have to be implemented in terms of TYP_INT vector comparison operations.
//
// Let us consider the case of single long element comparison.
// Say op1 = (x1, y1) and op2 = (x2, y2) where x1, y1, x2, and y2 are 32-bit integers that comprise
// the
// longs op1 and op2.
//
// GreaterThan(op1, op2) can be expressed in terms of > relationship between 32-bit integers that
// comprise op1 and op2 as
// = (x1, y1) > (x2, y2)
// = (x1 > x2) || [(x1 == x2) && (y1 > y2)] - eq (1)
//
// op1Dup1 = op1
// op1Dup2 = op1Dup1
// op2Dup1 = op2
// op2Dup2 = op2Dup1
//
// t = (op1 > op2) - 32-bit signed comparison
// u = (op1Dup1 == op2Dup1) - 32-bit equality comparison
// v = (op1Dup2 > op2Dup2) - 32-bit unsigned comparison
//
// op1 = Shuffle(t, (3, 3, 1, 1)) - This corresponds to (x1 > x2) in eq(1) above
// v = Shuffle(v, (2, 2, 0, 0)) - This corresponds to (y1 > y2) in eq(1) above
// u = Shuffle(u, (3, 3, 1, 1)) - This corresponds to (x1 == x2) in eq(1) above
// op2 = BitwiseAnd(v, u) - This corresponds to [(x1 == x2) && (y1 > y2)] in eq(1) above
//
// result = BitwiseOr(op1, op2)
GenTree* op1Dup1;
op1 = impCloneExpr(op1, &op1Dup1, clsHnd, (unsigned)CHECK_SPILL_ALL,
nullptr DEBUGARG("Clone op1 for vector GreaterThan"));
GenTree* op1Dup2;
op1Dup1 = impCloneExpr(op1Dup1, &op1Dup2, clsHnd, (unsigned)CHECK_SPILL_ALL,
nullptr DEBUGARG("Clone op1 for vector GreaterThan"));
GenTree* op2Dup1;
op2 = impCloneExpr(op2, &op2Dup1, clsHnd, (unsigned)CHECK_SPILL_ALL,
nullptr DEBUGARG("Clone op2 for vector GreaterThan"));
GenTree* op2Dup2;
op2Dup1 = impCloneExpr(op2Dup1, &op2Dup2, clsHnd, (unsigned)CHECK_SPILL_ALL,
nullptr DEBUGARG("Clone op2 vector GreaterThan"));
GenTree* t =
gtNewSimdCmpOpNode(op, type, op1, op2, CORINFO_TYPE_INT, simdSize, isSimdAsHWIntrinsic);
GenTree* u = gtNewSimdCmpOpNode(GT_EQ, type, op1Dup1, op2Dup1, CORINFO_TYPE_INT, simdSize,
isSimdAsHWIntrinsic);
GenTree* v = gtNewSimdCmpOpNode(op, type, op1Dup2, op2Dup2, CORINFO_TYPE_UINT, simdSize,
isSimdAsHWIntrinsic);
op1 = gtNewSimdHWIntrinsicNode(type, t, gtNewIconNode(SHUFFLE_WWYY, TYP_INT), NI_SSE2_Shuffle,
CORINFO_TYPE_INT, simdSize, isSimdAsHWIntrinsic);
v = gtNewSimdHWIntrinsicNode(type, v, gtNewIconNode(SHUFFLE_ZZXX, TYP_INT), NI_SSE2_Shuffle,
CORINFO_TYPE_INT, simdSize, isSimdAsHWIntrinsic);
u = gtNewSimdHWIntrinsicNode(type, u, gtNewIconNode(SHUFFLE_WWYY, TYP_INT), NI_SSE2_Shuffle,
CORINFO_TYPE_INT, simdSize, isSimdAsHWIntrinsic);
op2 = gtNewSimdBinOpNode(GT_AND, type, v, u, simdBaseJitType, simdSize, isSimdAsHWIntrinsic);
return gtNewSimdBinOpNode(GT_OR, type, op1, op2, simdBaseJitType, simdSize, isSimdAsHWIntrinsic);
}
}
else
{
intrinsic = NI_SSE2_CompareGreaterThan;
}
break;
}
case GT_LE:
{
if (simdSize == 32)
{
assert(compIsaSupportedDebugOnly(InstructionSet_AVX));
if (varTypeIsFloating(simdBaseType))
{
intrinsic = NI_AVX_CompareLessThanOrEqual;
}
}
else if (simdBaseType == TYP_FLOAT)
{
intrinsic = NI_SSE_CompareLessThanOrEqual;
}
else if (simdBaseType == TYP_DOUBLE)
{
intrinsic = NI_SSE2_CompareLessThanOrEqual;
}
if (intrinsic == NI_Illegal)
{
// There is no direct support for doing a combined comparison and equality for integral types.
// These have to be implemented by performing both halves and combining their results.
//
// op1Dup = op1
// op2Dup = op2
//
// op1 = LessThan(op1, op2)
// op2 = Equals(op1Dup, op2Dup)
//
// result = BitwiseOr(op1, op2)
GenTree* op1Dup;
op1 = impCloneExpr(op1, &op1Dup, clsHnd, (unsigned)CHECK_SPILL_ALL,
nullptr DEBUGARG("Clone op1 for vector LessThanOrEqual"));
GenTree* op2Dup;
op2 = impCloneExpr(op2, &op2Dup, clsHnd, (unsigned)CHECK_SPILL_ALL,
nullptr DEBUGARG("Clone op2 for vector LessThanOrEqual"));
op1 = gtNewSimdCmpOpNode(GT_LT, type, op1, op2, simdBaseJitType, simdSize, isSimdAsHWIntrinsic);
op2 = gtNewSimdCmpOpNode(GT_EQ, type, op1Dup, op2Dup, simdBaseJitType, simdSize, isSimdAsHWIntrinsic);
return gtNewSimdBinOpNode(GT_OR, type, op1, op2, simdBaseJitType, simdSize, isSimdAsHWIntrinsic);
}
break;
}
case GT_LT:
{
if (varTypeIsUnsigned(simdBaseType))
{
// Vector of byte, ushort, uint and ulong:
// Hardware supports < for signed comparison. Therefore, to use it for
// comparing unsigned numbers, we subtract a constant from both the
// operands such that the result fits within the corresponding signed
// type. The resulting signed numbers are compared using signed comparison.
//
// Vector of byte: constant to be subtracted is 2^7
// Vector of ushort: constant to be subtracted is 2^15
// Vector of uint: constant to be subtracted is 2^31
// Vector of ulong: constant to be subtracted is 2^63
//
// We need to treat op1 and op2 as signed for comparison purpose after
// the transformation.
GenTree* constVal = nullptr;
CorInfoType opJitType = simdBaseJitType;
var_types opType = simdBaseType;
CorInfoType constValJitType = CORINFO_TYPE_INT;
switch (simdBaseType)
{
case TYP_UBYTE:
{
constVal = gtNewIconNode(0x80808080);
simdBaseJitType = CORINFO_TYPE_BYTE;
simdBaseType = TYP_BYTE;
break;
}
case TYP_USHORT:
{
constVal = gtNewIconNode(0x80008000);
simdBaseJitType = CORINFO_TYPE_SHORT;
simdBaseType = TYP_SHORT;
break;
}
case TYP_UINT:
{
constVal = gtNewIconNode(0x80000000);
simdBaseJitType = CORINFO_TYPE_INT;
simdBaseType = TYP_INT;
break;
}
case TYP_ULONG:
{
constVal = gtNewLconNode(0x8000000000000000);
constValJitType = CORINFO_TYPE_LONG;
simdBaseJitType = CORINFO_TYPE_LONG;
simdBaseType = TYP_LONG;
break;
}
default:
{
unreached();
}
}
GenTree* constVector =
gtNewSimdCreateBroadcastNode(type, constVal, constValJitType, simdSize, isSimdAsHWIntrinsic);
GenTree* constVectorDup;
constVector = impCloneExpr(constVector, &constVectorDup, clsHnd, (unsigned)CHECK_SPILL_ALL,
nullptr DEBUGARG("Clone constVector for vector LessThan"));
// op1 = op1 - constVector
op1 = gtNewSimdBinOpNode(GT_SUB, type, op1, constVector, opJitType, simdSize, isSimdAsHWIntrinsic);
// op2 = op2 - constVector
op2 = gtNewSimdBinOpNode(GT_SUB, type, op2, constVectorDup, opJitType, simdSize, isSimdAsHWIntrinsic);
}
// This should have been mutated by the above path
assert(!varTypeIsUnsigned(simdBaseType));
if (simdSize == 32)
{
assert(compIsaSupportedDebugOnly(InstructionSet_AVX));
if (varTypeIsFloating(simdBaseType))
{
intrinsic = NI_AVX_CompareLessThan;
}
else
{
assert(compIsaSupportedDebugOnly(InstructionSet_AVX2));
intrinsic = NI_AVX2_CompareLessThan;
}
}
else if (simdBaseType == TYP_FLOAT)
{
intrinsic = NI_SSE_CompareLessThan;
}
else if (varTypeIsLong(simdBaseType))
{
if (compOpportunisticallyDependsOn(InstructionSet_SSE42))
{
intrinsic = NI_SSE42_CompareLessThan;
}
else
{
// There is no direct SSE2 support for comparing TYP_LONG vectors.
// These have to be implemented in terms of TYP_INT vector comparison operations.
//
// Let us consider the case of single long element comparison.
// Say op1 = (x1, y1) and op2 = (x2, y2) where x1, y1, x2, and y2 are 32-bit integers that comprise
// the
// longs op1 and op2.
//
// LessThan(op1, op2) can be expressed in terms of > relationship between 32-bit integers that
// comprise op1 and op2 as
// = (x1, y1) > (x2, y2)
// = (x1 > x2) || [(x1 == x2) && (y1 > y2)] - eq (1)
//
// op1Dup1 = op1
// op1Dup2 = op1Dup1
// op2Dup1 = op2
// op2Dup2 = op2Dup1
//
// t = (op1 > op2) - 32-bit signed comparison
// u = (op1Dup1 == op2Dup1) - 32-bit equality comparison
// v = (op1Dup2 > op2Dup2) - 32-bit unsigned comparison
//
// op1 = Shuffle(t, (3, 3, 1, 1)) - This corresponds to (x1 > x2) in eq(1) above
// v = Shuffle(v, (2, 2, 0, 0)) - This corresponds to (y1 > y2) in eq(1) above
// u = Shuffle(u, (3, 3, 1, 1)) - This corresponds to (x1 == x2) in eq(1) above
// op2 = BitwiseAnd(v, u) - This corresponds to [(x1 == x2) && (y1 > y2)] in eq(1) above
//
// result = BitwiseOr(op1, op2)
GenTree* op1Dup1;
op1 = impCloneExpr(op1, &op1Dup1, clsHnd, (unsigned)CHECK_SPILL_ALL,
nullptr DEBUGARG("Clone op1 for vector LessThan"));
GenTree* op1Dup2;
op1Dup1 = impCloneExpr(op1Dup1, &op1Dup2, clsHnd, (unsigned)CHECK_SPILL_ALL,
nullptr DEBUGARG("Clone op1 for vector LessThan"));
GenTree* op2Dup1;
op2 = impCloneExpr(op2, &op2Dup1, clsHnd, (unsigned)CHECK_SPILL_ALL,
nullptr DEBUGARG("Clone op2 for vector LessThan"));
GenTree* op2Dup2;
op2Dup1 = impCloneExpr(op2Dup1, &op2Dup2, clsHnd, (unsigned)CHECK_SPILL_ALL,
nullptr DEBUGARG("Clone op2 vector LessThan"));
GenTree* t =
gtNewSimdCmpOpNode(op, type, op1, op2, CORINFO_TYPE_INT, simdSize, isSimdAsHWIntrinsic);
GenTree* u = gtNewSimdCmpOpNode(GT_EQ, type, op1Dup1, op2Dup1, CORINFO_TYPE_INT, simdSize,
isSimdAsHWIntrinsic);
GenTree* v = gtNewSimdCmpOpNode(op, type, op1Dup2, op2Dup2, CORINFO_TYPE_UINT, simdSize,
isSimdAsHWIntrinsic);
op1 = gtNewSimdHWIntrinsicNode(type, t, gtNewIconNode(SHUFFLE_WWYY, TYP_INT), NI_SSE2_Shuffle,
CORINFO_TYPE_INT, simdSize, isSimdAsHWIntrinsic);
v = gtNewSimdHWIntrinsicNode(type, v, gtNewIconNode(SHUFFLE_ZZXX, TYP_INT), NI_SSE2_Shuffle,
CORINFO_TYPE_INT, simdSize, isSimdAsHWIntrinsic);
u = gtNewSimdHWIntrinsicNode(type, u, gtNewIconNode(SHUFFLE_WWYY, TYP_INT), NI_SSE2_Shuffle,
CORINFO_TYPE_INT, simdSize, isSimdAsHWIntrinsic);
op2 = gtNewSimdBinOpNode(GT_AND, type, v, u, simdBaseJitType, simdSize, isSimdAsHWIntrinsic);
return gtNewSimdBinOpNode(GT_OR, type, op1, op2, simdBaseJitType, simdSize, isSimdAsHWIntrinsic);
}
}
else
{
intrinsic = NI_SSE2_CompareLessThan;
}
break;
}
#elif defined(TARGET_ARM64)
case GT_EQ:
{
if ((varTypeIsLong(simdBaseType) || (simdBaseType == TYP_DOUBLE)))
{
intrinsic = (simdSize == 8) ? NI_AdvSimd_Arm64_CompareEqualScalar : NI_AdvSimd_Arm64_CompareEqual;
}
else
{
intrinsic = NI_AdvSimd_CompareEqual;
}
break;
}
case GT_GE:
{
if ((varTypeIsLong(simdBaseType) || (simdBaseType == TYP_DOUBLE)))
{
intrinsic = (simdSize == 8) ? NI_AdvSimd_Arm64_CompareGreaterThanOrEqualScalar
: NI_AdvSimd_Arm64_CompareGreaterThanOrEqual;
}
else
{
intrinsic = NI_AdvSimd_CompareGreaterThanOrEqual;
}
break;
}
case GT_GT:
{
if ((varTypeIsLong(simdBaseType) || (simdBaseType == TYP_DOUBLE)))
{
intrinsic =
(simdSize == 8) ? NI_AdvSimd_Arm64_CompareGreaterThanScalar : NI_AdvSimd_Arm64_CompareGreaterThan;
}
else
{
intrinsic = NI_AdvSimd_CompareGreaterThan;
}
break;
}
case GT_LE:
{
if ((varTypeIsLong(simdBaseType) || (simdBaseType == TYP_DOUBLE)))
{
intrinsic = (simdSize == 8) ? NI_AdvSimd_Arm64_CompareLessThanOrEqualScalar
: NI_AdvSimd_Arm64_CompareLessThanOrEqual;
}
else
{
intrinsic = NI_AdvSimd_CompareLessThanOrEqual;
}
break;
}
case GT_LT:
{
if ((varTypeIsLong(simdBaseType) || (simdBaseType == TYP_DOUBLE)))
{
intrinsic = (simdSize == 8) ? NI_AdvSimd_Arm64_CompareLessThanScalar : NI_AdvSimd_Arm64_CompareLessThan;
}
else
{
intrinsic = NI_AdvSimd_CompareLessThan;
}
break;
}
#else
#error Unsupported platform
#endif // !TARGET_XARCH && !TARGET_ARM64
default:
{
unreached();
}
}
assert(intrinsic != NI_Illegal);
return gtNewSimdHWIntrinsicNode(type, op1, op2, intrinsic, simdBaseJitType, simdSize, isSimdAsHWIntrinsic);
}
GenTree* Compiler::gtNewSimdCmpOpAllNode(genTreeOps op,
var_types type,
GenTree* op1,
GenTree* op2,
CorInfoType simdBaseJitType,
unsigned simdSize,
bool isSimdAsHWIntrinsic)
{
assert(IsBaselineSimdIsaSupportedDebugOnly());
assert(type == TYP_BOOL);
var_types simdType = getSIMDTypeForSize(simdSize);
assert(varTypeIsSIMD(simdType));
assert(op1 != nullptr);
assert(op1->TypeIs(simdType));
assert(op2 != nullptr);
assert(op2->TypeIs(simdType));
var_types simdBaseType = JitType2PreciseVarType(simdBaseJitType);
assert(varTypeIsArithmetic(simdBaseType));
NamedIntrinsic intrinsic = NI_Illegal;
switch (op)
{
#if defined(TARGET_XARCH)
case GT_EQ:
{
if (simdSize == 32)
{
assert(compIsaSupportedDebugOnly(InstructionSet_AVX));
assert(varTypeIsFloating(simdBaseType) || compIsaSupportedDebugOnly(InstructionSet_AVX2));
intrinsic = NI_Vector256_op_Equality;
}
else
{
intrinsic = NI_Vector128_op_Equality;
}
break;
}
case GT_GE:
case GT_GT:
case GT_LE:
case GT_LT:
{
// We want to generate a comparison along the lines of
// GT_XX(op1, op2).As<T, TInteger>() == Vector128<TInteger>.AllBitsSet
NamedIntrinsic getAllBitsSet = NI_Illegal;
if (simdSize == 32)
{
// TODO-XArch-CQ: It's a non-trivial amount of work to support these
// for floating-point while only utilizing AVX. It would require, among
// other things, inverting the comparison and potentially support for a
// new Avx.TestNotZ intrinsic to ensure the codegen remains efficient.
assert(compIsaSupportedDebugOnly(InstructionSet_AVX2));
intrinsic = NI_Vector256_op_Equality;
getAllBitsSet = NI_Vector256_get_AllBitsSet;
}
else
{
intrinsic = NI_Vector128_op_Equality;
getAllBitsSet = NI_Vector128_get_AllBitsSet;
}
op1 = gtNewSimdCmpOpNode(op, simdType, op1, op2, simdBaseJitType, simdSize,
/* isSimdAsHWIntrinsic */ false);
if (simdBaseType == TYP_FLOAT)
{
simdBaseType = TYP_INT;
simdBaseJitType = CORINFO_TYPE_INT;
}
else if (simdBaseType == TYP_DOUBLE)
{
simdBaseType = TYP_LONG;
simdBaseJitType = CORINFO_TYPE_LONG;
}
op2 = gtNewSimdHWIntrinsicNode(simdType, getAllBitsSet, simdBaseJitType, simdSize);
break;
}
#elif defined(TARGET_ARM64)
case GT_EQ:
{
intrinsic = (simdSize == 8) ? NI_Vector64_op_Equality : NI_Vector128_op_Equality;
break;
}
case GT_GE:
case GT_GT:
case GT_LE:
case GT_LT:
{
// We want to generate a comparison along the lines of
// GT_XX(op1, op2).As<T, TInteger>() == Vector128<TInteger>.AllBitsSet
NamedIntrinsic getAllBitsSet = NI_Illegal;
if (simdSize == 8)
{
intrinsic = NI_Vector64_op_Equality;
getAllBitsSet = NI_Vector64_get_AllBitsSet;
}
else
{
intrinsic = NI_Vector128_op_Equality;
getAllBitsSet = NI_Vector128_get_AllBitsSet;
}
op1 = gtNewSimdCmpOpNode(op, simdType, op1, op2, simdBaseJitType, simdSize,
/* isSimdAsHWIntrinsic */ false);
if (simdBaseType == TYP_FLOAT)
{
simdBaseType = TYP_INT;
simdBaseJitType = CORINFO_TYPE_INT;
}
else if (simdBaseType == TYP_DOUBLE)
{
simdBaseType = TYP_LONG;
simdBaseJitType = CORINFO_TYPE_LONG;
}
op2 = gtNewSimdHWIntrinsicNode(simdType, getAllBitsSet, simdBaseJitType, simdSize);
break;
}
#else
#error Unsupported platform
#endif // !TARGET_XARCH && !TARGET_ARM64
default:
{
unreached();
}
}
assert(intrinsic != NI_Illegal);
return gtNewSimdHWIntrinsicNode(type, op1, op2, intrinsic, simdBaseJitType, simdSize, isSimdAsHWIntrinsic);
}
GenTree* Compiler::gtNewSimdCmpOpAnyNode(genTreeOps op,
var_types type,
GenTree* op1,
GenTree* op2,
CorInfoType simdBaseJitType,
unsigned simdSize,
bool isSimdAsHWIntrinsic)
{
assert(IsBaselineSimdIsaSupportedDebugOnly());
assert(type == TYP_BOOL);
var_types simdType = getSIMDTypeForSize(simdSize);
assert(varTypeIsSIMD(simdType));
assert(op1 != nullptr);
assert(op1->TypeIs(simdType));
assert(op2 != nullptr);
assert(op2->TypeIs(simdType));
var_types simdBaseType = JitType2PreciseVarType(simdBaseJitType);
assert(varTypeIsArithmetic(simdBaseType));
NamedIntrinsic intrinsic = NI_Illegal;
switch (op)
{
#if defined(TARGET_XARCH)
case GT_EQ:
case GT_GE:
case GT_GT:
case GT_LE:
case GT_LT:
{
// We want to generate a comparison along the lines of
// GT_XX(op1, op2).As<T, TInteger>() != Vector128<TInteger>.Zero
if (simdSize == 32)
{
// TODO-XArch-CQ: It's a non-trivial amount of work to support these
// for floating-point while only utilizing AVX. It would require, among
// other things, inverting the comparison and potentially support for a
// new Avx.TestNotZ intrinsic to ensure the codegen remains efficient.
assert(compIsaSupportedDebugOnly(InstructionSet_AVX2));
intrinsic = NI_Vector256_op_Inequality;
}
else
{
intrinsic = NI_Vector128_op_Inequality;
}
op1 = gtNewSimdCmpOpNode(op, simdType, op1, op2, simdBaseJitType, simdSize,
/* isSimdAsHWIntrinsic */ false);
if (simdBaseType == TYP_FLOAT)
{
simdBaseType = TYP_INT;
simdBaseJitType = CORINFO_TYPE_INT;
}
else if (simdBaseType == TYP_DOUBLE)
{
simdBaseType = TYP_LONG;
simdBaseJitType = CORINFO_TYPE_LONG;
}
op2 = gtNewSimdZeroNode(simdType, simdBaseJitType, simdSize, /* isSimdAsHWIntrinsic */ false);
break;
}
case GT_NE:
{
if (simdSize == 32)
{
assert(compIsaSupportedDebugOnly(InstructionSet_AVX));
assert(varTypeIsFloating(simdBaseType) || compIsaSupportedDebugOnly(InstructionSet_AVX2));
intrinsic = NI_Vector256_op_Inequality;
}
else
{
intrinsic = NI_Vector128_op_Inequality;
}
break;
}
#elif defined(TARGET_ARM64)
case GT_EQ:
case GT_GE:
case GT_GT:
case GT_LE:
case GT_LT:
{
// We want to generate a comparison along the lines of
// GT_XX(op1, op2).As<T, TInteger>() != Vector128<TInteger>.Zero
intrinsic = (simdSize == 8) ? NI_Vector64_op_Inequality : NI_Vector128_op_Inequality;
op1 = gtNewSimdCmpOpNode(op, simdType, op1, op2, simdBaseJitType, simdSize,
/* isSimdAsHWIntrinsic */ false);
if (simdBaseType == TYP_FLOAT)
{
simdBaseType = TYP_INT;
simdBaseJitType = CORINFO_TYPE_INT;
}
else if (simdBaseType == TYP_DOUBLE)
{
simdBaseType = TYP_LONG;
simdBaseJitType = CORINFO_TYPE_LONG;
}
op2 = gtNewSimdZeroNode(simdType, simdBaseJitType, simdSize, /* isSimdAsHWIntrinsic */ false);
break;
}
case GT_NE:
{
intrinsic = (simdSize == 8) ? NI_Vector64_op_Inequality : NI_Vector128_op_Inequality;
break;
}
#else
#error Unsupported platform
#endif // !TARGET_XARCH && !TARGET_ARM64
default:
{
unreached();
}
}
assert(intrinsic != NI_Illegal);
return gtNewSimdHWIntrinsicNode(type, op1, op2, intrinsic, simdBaseJitType, simdSize, isSimdAsHWIntrinsic);
}
GenTree* Compiler::gtNewSimdCndSelNode(var_types type,
GenTree* op1,
GenTree* op2,
GenTree* op3,
CorInfoType simdBaseJitType,
unsigned simdSize,
bool isSimdAsHWIntrinsic)
{
assert(IsBaselineSimdIsaSupportedDebugOnly());
assert(varTypeIsSIMD(type));
assert(getSIMDTypeForSize(simdSize) == type);
assert(op1 != nullptr);
assert(op1->TypeIs(type));
assert(op2 != nullptr);
assert(op2->TypeIs(type));
assert(op3 != nullptr);
assert(op3->TypeIs(type));
var_types simdBaseType = JitType2PreciseVarType(simdBaseJitType);
assert(varTypeIsArithmetic(simdBaseType));
NamedIntrinsic intrinsic = NI_Illegal;
#if defined(TARGET_XARCH)
// TODO-XARCH-CQ: It's likely beneficial to have a dedicated CndSel node so we
// can special case when the condition is the result of various compare operations.
//
// When it is, the condition is AllBitsSet or Zero on a per-element basis and we
// could change this to be a Blend operation in lowering as an optimization.
assert((simdSize != 32) || compIsaSupportedDebugOnly(InstructionSet_AVX));
CORINFO_CLASS_HANDLE clsHnd = gtGetStructHandleForSIMD(type, simdBaseJitType);
GenTree* op1Dup;
op1 = impCloneExpr(op1, &op1Dup, clsHnd, (unsigned)CHECK_SPILL_ALL,
nullptr DEBUGARG("Clone op1 for vector conditional select"));
// op2 = op2 & op1
op2 = gtNewSimdBinOpNode(GT_AND, type, op2, op1, simdBaseJitType, simdSize, isSimdAsHWIntrinsic);
// op3 = op3 & ~op1Dup
op3 = gtNewSimdBinOpNode(GT_AND_NOT, type, op3, op1Dup, simdBaseJitType, simdSize, isSimdAsHWIntrinsic);
// result = op2 | op3
return gtNewSimdBinOpNode(GT_OR, type, op2, op3, simdBaseJitType, simdSize, isSimdAsHWIntrinsic);
#elif defined(TARGET_ARM64)
return gtNewSimdHWIntrinsicNode(type, op1, op2, op3, NI_AdvSimd_BitwiseSelect, simdBaseJitType, simdSize,
isSimdAsHWIntrinsic);
#else
#error Unsupported platform
#endif // !TARGET_XARCH && !TARGET_ARM64
}
GenTree* Compiler::gtNewSimdCreateBroadcastNode(
var_types type, GenTree* op1, CorInfoType simdBaseJitType, unsigned simdSize, bool isSimdAsHWIntrinsic)
{
NamedIntrinsic hwIntrinsicID = NI_Vector128_Create;
var_types simdBaseType = JitType2PreciseVarType(simdBaseJitType);
#if defined(TARGET_XARCH)
#if defined(TARGET_X86)
if (varTypeIsLong(simdBaseType) && !op1->IsIntegralConst())
{
// TODO-XARCH-CQ: It may be beneficial to emit the movq
// instruction, which takes a 64-bit memory address and
// works on 32-bit x86 systems.
unreached();
}
#endif // TARGET_X86
if (simdSize == 32)
{
hwIntrinsicID = NI_Vector256_Create;
}
#elif defined(TARGET_ARM64)
if (simdSize == 8)
{
hwIntrinsicID = NI_Vector64_Create;
}
#else
#error Unsupported platform
#endif // !TARGET_XARCH && !TARGET_ARM64
return gtNewSimdHWIntrinsicNode(type, op1, hwIntrinsicID, simdBaseJitType, simdSize, isSimdAsHWIntrinsic);
}
GenTree* Compiler::gtNewSimdDotProdNode(var_types type,
GenTree* op1,
GenTree* op2,
CorInfoType simdBaseJitType,
unsigned simdSize,
bool isSimdAsHWIntrinsic)
{
assert(IsBaselineSimdIsaSupportedDebugOnly());
assert(varTypeIsArithmetic(type));
var_types simdType = getSIMDTypeForSize(simdSize);
assert(varTypeIsSIMD(simdType));
assert(op1 != nullptr);
assert(op1->TypeIs(simdType));
assert(op2 != nullptr);
assert(op2->TypeIs(simdType));
var_types simdBaseType = JitType2PreciseVarType(simdBaseJitType);
assert(JITtype2varType(simdBaseJitType) == type);
NamedIntrinsic intrinsic = NI_Illegal;
#if defined(TARGET_XARCH)
assert(!varTypeIsByte(simdBaseType) && !varTypeIsLong(simdBaseType));
if (simdSize == 32)
{
assert(varTypeIsFloating(simdBaseType) || compIsaSupportedDebugOnly(InstructionSet_AVX2));
intrinsic = NI_Vector256_Dot;
}
else
{
assert(((simdBaseType != TYP_INT) && (simdBaseType != TYP_UINT)) ||
compIsaSupportedDebugOnly(InstructionSet_SSE41));
intrinsic = NI_Vector128_Dot;
}
#elif defined(TARGET_ARM64)
assert(!varTypeIsLong(simdBaseType));
intrinsic = (simdSize == 8) ? NI_Vector64_Dot : NI_Vector128_Dot;
#else
#error Unsupported platform
#endif // !TARGET_XARCH && !TARGET_ARM64
assert(intrinsic != NI_Illegal);
return gtNewSimdHWIntrinsicNode(type, op1, op2, intrinsic, simdBaseJitType, simdSize, isSimdAsHWIntrinsic);
}
GenTree* Compiler::gtNewSimdFloorNode(
var_types type, GenTree* op1, CorInfoType simdBaseJitType, unsigned simdSize, bool isSimdAsHWIntrinsic)
{
assert(IsBaselineSimdIsaSupportedDebugOnly());
assert(varTypeIsSIMD(type));
assert(getSIMDTypeForSize(simdSize) == type);
assert(op1 != nullptr);
assert(op1->TypeIs(type));
var_types simdBaseType = JitType2PreciseVarType(simdBaseJitType);
assert(varTypeIsFloating(simdBaseType));
NamedIntrinsic intrinsic = NI_Illegal;
#if defined(TARGET_XARCH)
if (simdSize == 32)
{
intrinsic = NI_AVX_Floor;
}
else
{
assert(compIsaSupportedDebugOnly(InstructionSet_SSE41));
intrinsic = NI_SSE41_Floor;
}
#elif defined(TARGET_ARM64)
if (simdBaseType == TYP_DOUBLE)
{
intrinsic = (simdSize == 8) ? NI_AdvSimd_FloorScalar : NI_AdvSimd_Arm64_Floor;
}
else
{
intrinsic = NI_AdvSimd_Floor;
}
#else
#error Unsupported platform
#endif // !TARGET_XARCH && !TARGET_ARM64
assert(intrinsic != NI_Illegal);
return gtNewSimdHWIntrinsicNode(type, op1, intrinsic, simdBaseJitType, simdSize, isSimdAsHWIntrinsic);
}
GenTree* Compiler::gtNewSimdGetElementNode(var_types type,
GenTree* op1,
GenTree* op2,
CorInfoType simdBaseJitType,
unsigned simdSize,
bool isSimdAsHWIntrinsic)
{
NamedIntrinsic intrinsicId = NI_Vector128_GetElement;
var_types simdBaseType = JitType2PreciseVarType(simdBaseJitType);
assert(varTypeIsArithmetic(simdBaseType));
#if defined(TARGET_XARCH)
switch (simdBaseType)
{
// Using software fallback if simdBaseType is not supported by hardware
case TYP_BYTE:
case TYP_UBYTE:
case TYP_INT:
case TYP_UINT:
case TYP_LONG:
case TYP_ULONG:
assert(compIsaSupportedDebugOnly(InstructionSet_SSE41));
break;
case TYP_DOUBLE:
case TYP_FLOAT:
case TYP_SHORT:
case TYP_USHORT:
assert(compIsaSupportedDebugOnly(InstructionSet_SSE2));
break;
default:
unreached();
}
if (simdSize == 32)
{
intrinsicId = NI_Vector256_GetElement;
}
#elif defined(TARGET_ARM64)
if (simdSize == 8)
{
intrinsicId = NI_Vector64_GetElement;
}
#else
#error Unsupported platform
#endif // !TARGET_XARCH && !TARGET_ARM64
int immUpperBound = getSIMDVectorLength(simdSize, simdBaseType) - 1;
bool rangeCheckNeeded = !op2->OperIsConst();
if (!rangeCheckNeeded)
{
ssize_t imm8 = op2->AsIntCon()->IconValue();
rangeCheckNeeded = (imm8 < 0) || (imm8 > immUpperBound);
}
if (rangeCheckNeeded)
{
op2 = addRangeCheckForHWIntrinsic(op2, 0, immUpperBound);
}
return gtNewSimdHWIntrinsicNode(type, op1, op2, intrinsicId, simdBaseJitType, simdSize, isSimdAsHWIntrinsic);
}
GenTree* Compiler::gtNewSimdMaxNode(var_types type,
GenTree* op1,
GenTree* op2,
CorInfoType simdBaseJitType,
unsigned simdSize,
bool isSimdAsHWIntrinsic)
{
assert(IsBaselineSimdIsaSupportedDebugOnly());
assert(varTypeIsSIMD(type));
assert(getSIMDTypeForSize(simdSize) == type);
assert(op1 != nullptr);
assert(op1->TypeIs(type));
assert(op2 != nullptr);
assert(op2->TypeIs(type));
var_types simdBaseType = JitType2PreciseVarType(simdBaseJitType);
assert(varTypeIsArithmetic(simdBaseType));
NamedIntrinsic intrinsic = NI_Illegal;
CORINFO_CLASS_HANDLE clsHnd = gtGetStructHandleForSIMD(type, simdBaseJitType);
#if defined(TARGET_XARCH)
if (simdSize == 32)
{
assert(compIsaSupportedDebugOnly(InstructionSet_AVX));
if (varTypeIsFloating(simdBaseType))
{
intrinsic = NI_AVX_Max;
}
else
{
assert(compIsaSupportedDebugOnly(InstructionSet_AVX2));
if (!varTypeIsLong(simdBaseType))
{
intrinsic = NI_AVX2_Max;
}
}
}
else
{
switch (simdBaseType)
{
case TYP_BYTE:
case TYP_USHORT:
{
GenTree* constVal = nullptr;
CorInfoType opJitType = simdBaseJitType;
var_types opType = simdBaseType;
genTreeOps fixupOp1 = GT_NONE;
genTreeOps fixupOp2 = GT_NONE;
switch (simdBaseType)
{
case TYP_BYTE:
{
constVal = gtNewIconNode(0x80808080);
fixupOp1 = GT_SUB;
fixupOp2 = GT_ADD;
simdBaseJitType = CORINFO_TYPE_UBYTE;
simdBaseType = TYP_UBYTE;
break;
}
case TYP_USHORT:
{
constVal = gtNewIconNode(0x80008000);
fixupOp1 = GT_ADD;
fixupOp2 = GT_SUB;
simdBaseJitType = CORINFO_TYPE_SHORT;
simdBaseType = TYP_SHORT;
break;
}
default:
{
unreached();
}
}
assert(constVal != nullptr);
assert(fixupOp1 != GT_NONE);
assert(fixupOp2 != GT_NONE);
assert(opJitType != simdBaseJitType);
assert(opType != simdBaseType);
GenTree* constVector =
gtNewSimdCreateBroadcastNode(type, constVal, CORINFO_TYPE_INT, simdSize, isSimdAsHWIntrinsic);
GenTree* constVectorDup1;
constVector = impCloneExpr(constVector, &constVectorDup1, clsHnd, (unsigned)CHECK_SPILL_ALL,
nullptr DEBUGARG("Clone constVector for vector Max"));
GenTree* constVectorDup2;
constVectorDup1 = impCloneExpr(constVectorDup1, &constVectorDup2, clsHnd, (unsigned)CHECK_SPILL_ALL,
nullptr DEBUGARG("Clone constVector for vector Max"));
// op1 = op1 - constVector
// -or-
// op1 = op1 + constVector
op1 = gtNewSimdBinOpNode(fixupOp1, type, op1, constVector, opJitType, simdSize, isSimdAsHWIntrinsic);
// op2 = op2 - constVectorDup1
// -or-
// op2 = op2 + constVectorDup1
op2 =
gtNewSimdBinOpNode(fixupOp1, type, op2, constVectorDup1, opJitType, simdSize, isSimdAsHWIntrinsic);
// op1 = Max(op1, op2)
op1 = gtNewSimdMaxNode(type, op1, op2, simdBaseJitType, simdSize, isSimdAsHWIntrinsic);
// result = op1 + constVectorDup2
// -or-
// result = op1 - constVectorDup2
return gtNewSimdBinOpNode(fixupOp2, type, op1, constVectorDup2, opJitType, simdSize,
isSimdAsHWIntrinsic);
}
case TYP_INT:
case TYP_UINT:
case TYP_LONG:
case TYP_ULONG:
{
break;
}
case TYP_FLOAT:
{
intrinsic = NI_SSE_Max;
break;
}
case TYP_UBYTE:
case TYP_SHORT:
case TYP_DOUBLE:
{
intrinsic = NI_SSE2_Max;
break;
}
default:
{
unreached();
}
}
}
#elif defined(TARGET_ARM64)
if (!varTypeIsLong(simdBaseType))
{
if (simdBaseType == TYP_DOUBLE)
{
intrinsic = (simdSize == 8) ? NI_AdvSimd_Arm64_MaxScalar : NI_AdvSimd_Arm64_Max;
}
else
{
intrinsic = NI_AdvSimd_Max;
}
}
#else
#error Unsupported platform
#endif // !TARGET_XARCH && !TARGET_ARM64
if (intrinsic != NI_Illegal)
{
return gtNewSimdHWIntrinsicNode(type, op1, op2, intrinsic, simdBaseJitType, simdSize, isSimdAsHWIntrinsic);
}
GenTree* op1Dup;
op1 = impCloneExpr(op1, &op1Dup, clsHnd, (unsigned)CHECK_SPILL_ALL, nullptr DEBUGARG("Clone op1 for vector Max"));
GenTree* op2Dup;
op2 = impCloneExpr(op2, &op2Dup, clsHnd, (unsigned)CHECK_SPILL_ALL, nullptr DEBUGARG("Clone op2 for vector Max"));
// op1 = op1 > op2
op1 = gtNewSimdCmpOpNode(GT_GT, type, op1, op2, simdBaseJitType, simdSize, isSimdAsHWIntrinsic);
// result = ConditionalSelect(op1, op1Dup, op2Dup)
return gtNewSimdCndSelNode(type, op1, op1Dup, op2Dup, simdBaseJitType, simdSize, isSimdAsHWIntrinsic);
}
GenTree* Compiler::gtNewSimdMinNode(var_types type,
GenTree* op1,
GenTree* op2,
CorInfoType simdBaseJitType,
unsigned simdSize,
bool isSimdAsHWIntrinsic)
{
assert(IsBaselineSimdIsaSupportedDebugOnly());
assert(varTypeIsSIMD(type));
assert(getSIMDTypeForSize(simdSize) == type);
assert(op1 != nullptr);
assert(op1->TypeIs(type));
assert(op2 != nullptr);
assert(op2->TypeIs(type));
var_types simdBaseType = JitType2PreciseVarType(simdBaseJitType);
assert(varTypeIsArithmetic(simdBaseType));
NamedIntrinsic intrinsic = NI_Illegal;
CORINFO_CLASS_HANDLE clsHnd = gtGetStructHandleForSIMD(type, simdBaseJitType);
#if defined(TARGET_XARCH)
if (simdSize == 32)
{
assert(compIsaSupportedDebugOnly(InstructionSet_AVX));
if (varTypeIsFloating(simdBaseType))
{
intrinsic = NI_AVX_Min;
}
else
{
assert(compIsaSupportedDebugOnly(InstructionSet_AVX2));
if (!varTypeIsLong(simdBaseType))
{
intrinsic = NI_AVX2_Min;
}
}
}
else
{
switch (simdBaseType)
{
case TYP_BYTE:
case TYP_USHORT:
{
GenTree* constVal = nullptr;
CorInfoType opJitType = simdBaseJitType;
var_types opType = simdBaseType;
genTreeOps fixupOp1 = GT_NONE;
genTreeOps fixupOp2 = GT_NONE;
switch (simdBaseType)
{
case TYP_BYTE:
{
constVal = gtNewIconNode(0x80808080);
fixupOp1 = GT_SUB;
fixupOp2 = GT_ADD;
simdBaseJitType = CORINFO_TYPE_UBYTE;
simdBaseType = TYP_UBYTE;
break;
}
case TYP_USHORT:
{
constVal = gtNewIconNode(0x80008000);
fixupOp1 = GT_ADD;
fixupOp2 = GT_SUB;
simdBaseJitType = CORINFO_TYPE_SHORT;
simdBaseType = TYP_SHORT;
break;
}
default:
{
unreached();
}
}
assert(constVal != nullptr);
assert(fixupOp1 != GT_NONE);
assert(fixupOp2 != GT_NONE);
assert(opJitType != simdBaseJitType);
assert(opType != simdBaseType);
GenTree* constVector =
gtNewSimdCreateBroadcastNode(type, constVal, CORINFO_TYPE_INT, simdSize, isSimdAsHWIntrinsic);
GenTree* constVectorDup1;
constVector = impCloneExpr(constVector, &constVectorDup1, clsHnd, (unsigned)CHECK_SPILL_ALL,
nullptr DEBUGARG("Clone constVector for vector Min"));
GenTree* constVectorDup2;
constVectorDup1 = impCloneExpr(constVectorDup1, &constVectorDup2, clsHnd, (unsigned)CHECK_SPILL_ALL,
nullptr DEBUGARG("Clone constVector for vector Min"));
// op1 = op1 - constVector
// -or-
// op1 = op1 + constVector
op1 = gtNewSimdBinOpNode(fixupOp1, type, op1, constVector, opJitType, simdSize, isSimdAsHWIntrinsic);
// op2 = op2 - constVectorDup1
// -or-
// op2 = op2 + constVectorDup1
op2 =
gtNewSimdBinOpNode(fixupOp1, type, op2, constVectorDup1, opJitType, simdSize, isSimdAsHWIntrinsic);
// op1 = Min(op1, op2)
op1 = gtNewSimdMinNode(type, op1, op2, simdBaseJitType, simdSize, isSimdAsHWIntrinsic);
// result = op1 + constVectorDup2
// -or-
// result = op1 - constVectorDup2
return gtNewSimdBinOpNode(fixupOp2, type, op1, constVectorDup2, opJitType, simdSize,
isSimdAsHWIntrinsic);
}
case TYP_INT:
case TYP_UINT:
case TYP_LONG:
case TYP_ULONG:
{
break;
}
case TYP_FLOAT:
{
intrinsic = NI_SSE_Min;
break;
}
case TYP_UBYTE:
case TYP_SHORT:
case TYP_DOUBLE:
{
intrinsic = NI_SSE2_Min;
break;
}
default:
{
unreached();
}
}
}
#elif defined(TARGET_ARM64)
if (!varTypeIsLong(simdBaseType))
{
if (simdBaseType == TYP_DOUBLE)
{
intrinsic = (simdSize == 8) ? NI_AdvSimd_Arm64_MinScalar : NI_AdvSimd_Arm64_Min;
}
else
{
intrinsic = NI_AdvSimd_Min;
}
}
#else
#error Unsupported platform
#endif // !TARGET_XARCH && !TARGET_ARM64
if (intrinsic != NI_Illegal)
{
return gtNewSimdHWIntrinsicNode(type, op1, op2, intrinsic, simdBaseJitType, simdSize, isSimdAsHWIntrinsic);
}
GenTree* op1Dup;
op1 = impCloneExpr(op1, &op1Dup, clsHnd, (unsigned)CHECK_SPILL_ALL, nullptr DEBUGARG("Clone op1 for vector Min"));
GenTree* op2Dup;
op2 = impCloneExpr(op2, &op2Dup, clsHnd, (unsigned)CHECK_SPILL_ALL, nullptr DEBUGARG("Clone op2 for vector Min"));
// op1 = op1 < op2
op1 = gtNewSimdCmpOpNode(GT_LT, type, op1, op2, simdBaseJitType, simdSize, isSimdAsHWIntrinsic);
// result = ConditionalSelect(op1, op1Dup, op2Dup)
return gtNewSimdCndSelNode(type, op1, op1Dup, op2Dup, simdBaseJitType, simdSize, isSimdAsHWIntrinsic);
}
GenTree* Compiler::gtNewSimdNarrowNode(var_types type,
GenTree* op1,
GenTree* op2,
CorInfoType simdBaseJitType,
unsigned simdSize,
bool isSimdAsHWIntrinsic)
{
assert(IsBaselineSimdIsaSupportedDebugOnly());
assert(varTypeIsSIMD(type));
assert(getSIMDTypeForSize(simdSize) == type);
assert(op1 != nullptr);
assert(op1->TypeIs(type));
assert(op2 != nullptr);
assert(op2->TypeIs(type));
var_types simdBaseType = JitType2PreciseVarType(simdBaseJitType);
assert(varTypeIsArithmetic(simdBaseType) && !varTypeIsLong(simdBaseType));
GenTree* tmp1;
GenTree* tmp2;
#if defined(TARGET_XARCH)
GenTree* tmp3;
GenTree* tmp4;
if (simdSize == 32)
{
assert(compIsaSupportedDebugOnly(InstructionSet_AVX));
switch (simdBaseType)
{
case TYP_BYTE:
case TYP_UBYTE:
{
assert(compIsaSupportedDebugOnly(InstructionSet_AVX2));
// This is the same in principle to the other comments below, however due to
// code formatting, its too long to reasonably display here.
CorInfoType opBaseJitType = (simdBaseType == TYP_BYTE) ? CORINFO_TYPE_SHORT : CORINFO_TYPE_USHORT;
CORINFO_CLASS_HANDLE clsHnd = gtGetStructHandleForSIMD(type, opBaseJitType);
tmp1 = gtNewSimdHWIntrinsicNode(type, gtNewIconNode(0x00FF), NI_Vector256_Create, opBaseJitType,
simdSize, isSimdAsHWIntrinsic);
GenTree* tmp1Dup;
tmp1 = impCloneExpr(tmp1, &tmp1Dup, clsHnd, (unsigned)CHECK_SPILL_ALL,
nullptr DEBUGARG("Clone tmp1 for vector narrow"));
tmp2 = gtNewSimdHWIntrinsicNode(type, op1, tmp1, NI_SSE2_And, simdBaseJitType, simdSize,
isSimdAsHWIntrinsic);
tmp3 = gtNewSimdHWIntrinsicNode(type, op2, tmp1Dup, NI_SSE2_And, simdBaseJitType, simdSize,
isSimdAsHWIntrinsic);
tmp4 = gtNewSimdHWIntrinsicNode(type, tmp2, tmp3, NI_SSE2_PackUnsignedSaturate, CORINFO_TYPE_UBYTE,
simdSize, isSimdAsHWIntrinsic);
CorInfoType permuteBaseJitType = (simdBaseType == TYP_BYTE) ? CORINFO_TYPE_LONG : CORINFO_TYPE_ULONG;
return gtNewSimdHWIntrinsicNode(type, tmp4, gtNewIconNode(SHUFFLE_WYZX), NI_AVX2_Permute4x64,
permuteBaseJitType, simdSize, isSimdAsHWIntrinsic);
}
case TYP_SHORT:
case TYP_USHORT:
{
assert(compIsaSupportedDebugOnly(InstructionSet_AVX2));
// op1 = Elements 0L, 0U, 1L, 1U, 2L, 2U, 3L, 3U | 4L, 4U, 5L, 5U, 6L, 6U, 7L, 7U
// op2 = Elements 8L, 8U, 9L, 9U, AL, AU, BL, BU | CL, CU, DL, DU, EL, EU, FL, FU
//
// tmp2 = Elements 0L, --, 1L, --, 2L, --, 3L, -- | 4L, --, 5L, --, 6L, --, 7L, --
// tmp3 = Elements 8L, --, 9L, --, AL, --, BL, -- | CL, --, DL, --, EL, --, FL, --
// tmp4 = Elements 0L, 1L, 2L, 3L, 8L, 9L, AL, BL | 4L, 5L, 6L, 7L, CL, DL, EL, FL
// return Elements 0L, 1L, 2L, 3L, 4L, 5L, 6L, 7L | 8L, 9L, AL, BL, CL, DL, EL, FL
//
// var tmp1 = Vector256.Create(0x0000FFFF).AsInt16();
// var tmp2 = Avx2.And(op1.AsInt16(), tmp1);
// var tmp3 = Avx2.And(op2.AsInt16(), tmp1);
// var tmp4 = Avx2.PackUnsignedSaturate(tmp2, tmp3);
// return Avx2.Permute4x64(tmp4.AsUInt64(), SHUFFLE_WYZX).As<T>();
CorInfoType opBaseJitType = (simdBaseType == TYP_SHORT) ? CORINFO_TYPE_INT : CORINFO_TYPE_UINT;
CORINFO_CLASS_HANDLE clsHnd = gtGetStructHandleForSIMD(type, opBaseJitType);
tmp1 = gtNewSimdHWIntrinsicNode(type, gtNewIconNode(0x0000FFFF), NI_Vector256_Create, opBaseJitType,
simdSize, isSimdAsHWIntrinsic);
GenTree* tmp1Dup;
tmp1 = impCloneExpr(tmp1, &tmp1Dup, clsHnd, (unsigned)CHECK_SPILL_ALL,
nullptr DEBUGARG("Clone tmp1 for vector narrow"));
tmp2 = gtNewSimdHWIntrinsicNode(type, op1, tmp1, NI_SSE2_And, simdBaseJitType, simdSize,
isSimdAsHWIntrinsic);
tmp3 = gtNewSimdHWIntrinsicNode(type, op2, tmp1Dup, NI_SSE2_And, simdBaseJitType, simdSize,
isSimdAsHWIntrinsic);
tmp4 = gtNewSimdHWIntrinsicNode(type, tmp2, tmp3, NI_SSE41_PackUnsignedSaturate, CORINFO_TYPE_USHORT,
simdSize, isSimdAsHWIntrinsic);
CorInfoType permuteBaseJitType = (simdBaseType == TYP_BYTE) ? CORINFO_TYPE_LONG : CORINFO_TYPE_ULONG;
return gtNewSimdHWIntrinsicNode(type, tmp4, gtNewIconNode(SHUFFLE_WYZX), NI_AVX2_Permute4x64,
permuteBaseJitType, simdSize, isSimdAsHWIntrinsic);
}
case TYP_INT:
case TYP_UINT:
{
assert(compIsaSupportedDebugOnly(InstructionSet_AVX2));
// op1 = Elements 0, 1 | 2, 3; 0L, 0U, 1L, 1U | 2L, 2U, 3L, 3U
// op2 = Elements 4, 5 | 6, 7; 4L, 4U, 5L, 5U | 6L, 6U, 7L, 7U
//
// tmp1 = Elements 0L, 4L, 0U, 4U | 2L, 6L, 2U, 6U
// tmp2 = Elements 1L, 5L, 1U, 5U | 3L, 7L, 3U, 7U
// tmp3 = Elements 0L, 1L, 4L, 5L | 2L, 3L, 6L, 7L
// return Elements 0L, 1L, 2L, 3L | 4L, 5L, 6L, 7L
//
// var tmp1 = Avx2.UnpackLow(op1, op2);
// var tmp2 = Avx2.UnpackHigh(op1, op2);
// var tmp3 = Avx2.UnpackLow(tmp1, tmp2);
// return Avx2.Permute4x64(tmp3.AsUInt64(), SHUFFLE_WYZX).AsUInt32();
CorInfoType opBaseJitType = (simdBaseType == TYP_INT) ? CORINFO_TYPE_LONG : CORINFO_TYPE_ULONG;
CORINFO_CLASS_HANDLE clsHnd = gtGetStructHandleForSIMD(type, opBaseJitType);
GenTree* op1Dup;
op1 = impCloneExpr(op1, &op1Dup, clsHnd, (unsigned)CHECK_SPILL_ALL,
nullptr DEBUGARG("Clone op1 for vector narrow"));
GenTree* op2Dup;
op2 = impCloneExpr(op2, &op2Dup, clsHnd, (unsigned)CHECK_SPILL_ALL,
nullptr DEBUGARG("Clone op2 for vector narrow"));
tmp1 = gtNewSimdHWIntrinsicNode(type, op1, op2, NI_AVX2_UnpackLow, simdBaseJitType, simdSize,
isSimdAsHWIntrinsic);
tmp2 = gtNewSimdHWIntrinsicNode(type, op1Dup, op2Dup, NI_AVX2_UnpackHigh, simdBaseJitType, simdSize,
isSimdAsHWIntrinsic);
tmp3 = gtNewSimdHWIntrinsicNode(type, tmp1, tmp2, NI_AVX2_UnpackLow, simdBaseJitType, simdSize,
isSimdAsHWIntrinsic);
return gtNewSimdHWIntrinsicNode(type, tmp3, gtNewIconNode(SHUFFLE_WYZX), NI_AVX2_Permute4x64,
opBaseJitType, simdSize, isSimdAsHWIntrinsic);
}
case TYP_FLOAT:
{
// op1 = Elements 0, 1 | 2, 3
// op2 = Elements 4, 5 | 6, 7
//
// tmp1 = Elements 0, 1, 2, 3 | -, -, -, -
// tmp1 = Elements 4, 5, 6, 7
// return Elements 0, 1, 2, 3 | 4, 5, 6, 7
//
// var tmp1 = Avx.ConvertToVector128Single(op1).ToVector256Unsafe();
// var tmp2 = Avx.ConvertToVector128Single(op2);
// return Avx.InsertVector128(tmp1, tmp2, 1);
tmp1 = gtNewSimdHWIntrinsicNode(TYP_SIMD16, op1, NI_AVX_ConvertToVector128Single, simdBaseJitType,
simdSize, isSimdAsHWIntrinsic);
tmp2 = gtNewSimdHWIntrinsicNode(TYP_SIMD16, op2, NI_AVX_ConvertToVector128Single, simdBaseJitType,
simdSize, isSimdAsHWIntrinsic);
tmp1 = gtNewSimdHWIntrinsicNode(type, tmp1, NI_Vector128_ToVector256Unsafe, simdBaseJitType, 16,
isSimdAsHWIntrinsic);
return gtNewSimdHWIntrinsicNode(type, tmp1, tmp2, gtNewIconNode(1), NI_AVX_InsertVector128,
simdBaseJitType, simdSize, isSimdAsHWIntrinsic);
}
default:
{
unreached();
}
}
}
else
{
switch (simdBaseType)
{
case TYP_BYTE:
case TYP_UBYTE:
{
// op1 = Elements 0, 1, 2, 3, 4, 5, 6, 7; 0L, 0U, 1L, 1U, 2L, 2U, 3L, 3U, 4L, 4U, 5L, 5U, 6L, 6U, 7L, 7U
// op2 = Elements 8, 9, A, B, C, D, E, F; 8L, 8U, 9L, 9U, AL, AU, BL, BU, CL, CU, DL, DU, EL, EU, FL, FU
//
// tmp2 = Elements 0L, --, 1L, --, 2L, --, 3L, --, 4L, --, 5L, --, 6L, --, 7L, --
// tmp3 = Elements 8L, --, 9L, --, AL, --, BL, --, CL, --, DL, --, EL, --, FL, --
// return Elements 0L, 1L, 2L, 3L, 4L, 5L, 6L, 7L, 8L, 9L, AL, BL, CL, DL, EL, FL
//
// var tmp1 = Vector128.Create((ushort)(0x00FF)).AsSByte();
// var tmp2 = Sse2.And(op1.AsSByte(), tmp1);
// var tmp3 = Sse2.And(op2.AsSByte(), tmp1);
// return Sse2.PackUnsignedSaturate(tmp1, tmp2).As<T>();
CorInfoType opBaseJitType = (simdBaseType == TYP_BYTE) ? CORINFO_TYPE_SHORT : CORINFO_TYPE_USHORT;
CORINFO_CLASS_HANDLE clsHnd = gtGetStructHandleForSIMD(type, opBaseJitType);
tmp1 = gtNewSimdHWIntrinsicNode(type, gtNewIconNode(0x00FF), NI_Vector128_Create, opBaseJitType,
simdSize, isSimdAsHWIntrinsic);
GenTree* tmp1Dup;
tmp1 = impCloneExpr(tmp1, &tmp1Dup, clsHnd, (unsigned)CHECK_SPILL_ALL,
nullptr DEBUGARG("Clone tmp1 for vector narrow"));
tmp2 = gtNewSimdHWIntrinsicNode(type, op1, tmp1, NI_SSE2_And, simdBaseJitType, simdSize,
isSimdAsHWIntrinsic);
tmp3 = gtNewSimdHWIntrinsicNode(type, op2, tmp1Dup, NI_SSE2_And, simdBaseJitType, simdSize,
isSimdAsHWIntrinsic);
return gtNewSimdHWIntrinsicNode(type, tmp2, tmp3, NI_SSE2_PackUnsignedSaturate, CORINFO_TYPE_UBYTE,
simdSize, isSimdAsHWIntrinsic);
}
case TYP_SHORT:
case TYP_USHORT:
{
// op1 = Elements 0, 1, 2, 3; 0L, 0U, 1L, 1U, 2L, 2U, 3L, 3U
// op2 = Elements 4, 5, 6, 7; 4L, 4U, 5L, 5U, 6L, 6U, 7L, 7U
//
// ...
CorInfoType opBaseJitType = (simdBaseType == TYP_SHORT) ? CORINFO_TYPE_INT : CORINFO_TYPE_UINT;
CORINFO_CLASS_HANDLE clsHnd = gtGetStructHandleForSIMD(type, opBaseJitType);
if (compOpportunisticallyDependsOn(InstructionSet_SSE41))
{
// ...
//
// tmp2 = Elements 0L, --, 1L, --, 2L, --, 3L, --
// tmp3 = Elements 4L, --, 5L, --, 6L, --, 7L, --
// return Elements 0L, 1L, 2L, 3L, 4L, 5L, 6L, 7L
//
// var tmp1 = Vector128.Create(0x0000FFFF).AsInt16();
// var tmp2 = Sse2.And(op1.AsInt16(), tmp1);
// var tmp3 = Sse2.And(op2.AsInt16(), tmp1);
// return Sse2.PackUnsignedSaturate(tmp2, tmp3).As<T>();
tmp1 = gtNewSimdHWIntrinsicNode(type, gtNewIconNode(0x0000FFFF), NI_Vector128_Create, opBaseJitType,
simdSize, isSimdAsHWIntrinsic);
GenTree* tmp1Dup;
tmp1 = impCloneExpr(tmp1, &tmp1Dup, clsHnd, (unsigned)CHECK_SPILL_ALL,
nullptr DEBUGARG("Clone tmp1 for vector narrow"));
tmp2 = gtNewSimdHWIntrinsicNode(type, op1, tmp1, NI_SSE2_And, simdBaseJitType, simdSize,
isSimdAsHWIntrinsic);
tmp3 = gtNewSimdHWIntrinsicNode(type, op2, tmp1Dup, NI_SSE2_And, simdBaseJitType, simdSize,
isSimdAsHWIntrinsic);
return gtNewSimdHWIntrinsicNode(type, tmp2, tmp3, NI_SSE41_PackUnsignedSaturate,
CORINFO_TYPE_USHORT, simdSize, isSimdAsHWIntrinsic);
}
else
{
// ...
//
// tmp1 = Elements 0L, 4L, 0U, 4U, 1L, 5L, 1U, 5U
// tmp2 = Elements 2L, 6L, 2U, 6U, 3L, 7L, 3U, 7U
// tmp3 = Elements 0L, 2L, 4L, 6L, 0U, 2U, 4U, 6U
// tmp4 = Elements 1L, 3L, 5L, 7L, 1U, 3U, 5U, 7U
// return Elements 0L, 1L, 2L, 3L, 4L, 5L, 6L, 7L
//
// var tmp1 = Sse2.UnpackLow(op1.AsUInt16(), op2.AsUInt16());
// var tmp2 = Sse2.UnpackHigh(op1.AsUInt16(), op2.AsUInt16());
// var tmp3 = Sse2.UnpackLow(tmp1, tmp2);
// var tmp4 = Sse2.UnpackHigh(tmp1, tmp2);
// return Sse2.UnpackLow(tmp3, tmp4).As<T>();
GenTree* op1Dup;
op1 = impCloneExpr(op1, &op1Dup, clsHnd, (unsigned)CHECK_SPILL_ALL,
nullptr DEBUGARG("Clone op1 for vector narrow"));
GenTree* op2Dup;
op2 = impCloneExpr(op2, &op2Dup, clsHnd, (unsigned)CHECK_SPILL_ALL,
nullptr DEBUGARG("Clone op1 for vector narrow"));
tmp1 = gtNewSimdHWIntrinsicNode(type, op1, op2, NI_SSE2_UnpackLow, simdBaseJitType, simdSize,
isSimdAsHWIntrinsic);
tmp2 = gtNewSimdHWIntrinsicNode(type, op1Dup, op2Dup, NI_SSE2_UnpackHigh, simdBaseJitType, simdSize,
isSimdAsHWIntrinsic);
clsHnd = gtGetStructHandleForSIMD(type, simdBaseJitType);
GenTree* tmp1Dup;
tmp1 = impCloneExpr(tmp1, &tmp1Dup, clsHnd, (unsigned)CHECK_SPILL_ALL,
nullptr DEBUGARG("Clone tmp1 for vector narrow"));
GenTree* tmp2Dup;
tmp2 = impCloneExpr(tmp2, &tmp2Dup, clsHnd, (unsigned)CHECK_SPILL_ALL,
nullptr DEBUGARG("Clone tmp2 for vector narrow"));
tmp3 = gtNewSimdHWIntrinsicNode(type, tmp1, tmp2, NI_SSE2_UnpackLow, simdBaseJitType, simdSize,
isSimdAsHWIntrinsic);
tmp4 = gtNewSimdHWIntrinsicNode(type, tmp1Dup, tmp2Dup, NI_SSE2_UnpackHigh, simdBaseJitType,
simdSize, isSimdAsHWIntrinsic);
return gtNewSimdHWIntrinsicNode(type, tmp3, tmp4, NI_SSE2_UnpackLow, simdBaseJitType, simdSize,
isSimdAsHWIntrinsic);
}
}
case TYP_INT:
case TYP_UINT:
{
// op1 = Elements 0, 1; 0L, 0U, 1L, 1U
// op2 = Elements 2, 3; 2L, 2U, 3L, 3U
//
// tmp1 = Elements 0L, 2L, 0U, 2U
// tmp2 = Elements 1L, 3L, 1U, 3U
// return Elements 0L, 1L, 2L, 3L
//
// var tmp1 = Sse2.UnpackLow(op1.AsUInt32(), op2.AsUInt32());
// var tmp2 = Sse2.UnpackHigh(op1.AsUInt32(), op2.AsUInt32());
// return Sse2.UnpackLow(tmp1, tmp2).As<T>();
CorInfoType opBaseJitType = (simdBaseType == TYP_INT) ? CORINFO_TYPE_LONG : CORINFO_TYPE_ULONG;
CORINFO_CLASS_HANDLE clsHnd = gtGetStructHandleForSIMD(type, opBaseJitType);
GenTree* op1Dup;
op1 = impCloneExpr(op1, &op1Dup, clsHnd, (unsigned)CHECK_SPILL_ALL,
nullptr DEBUGARG("Clone op1 for vector narrow"));
GenTree* op2Dup;
op2 = impCloneExpr(op2, &op2Dup, clsHnd, (unsigned)CHECK_SPILL_ALL,
nullptr DEBUGARG("Clone op2 for vector narrow"));
tmp1 = gtNewSimdHWIntrinsicNode(type, op1, op2, NI_SSE2_UnpackLow, simdBaseJitType, simdSize,
isSimdAsHWIntrinsic);
tmp2 = gtNewSimdHWIntrinsicNode(type, op1Dup, op2Dup, NI_SSE2_UnpackHigh, simdBaseJitType, simdSize,
isSimdAsHWIntrinsic);
return gtNewSimdHWIntrinsicNode(type, tmp1, tmp2, NI_SSE2_UnpackLow, simdBaseJitType, simdSize,
isSimdAsHWIntrinsic);
}
case TYP_FLOAT:
{
// op1 = Elements 0, 1
// op2 = Elements 2, 3
//
// tmp1 = Elements 0, 1, -, -
// tmp1 = Elements 2, 3, -, -
// return Elements 0, 1, 2, 3
//
// var tmp1 = Sse2.ConvertToVector128Single(op1);
// var tmp2 = Sse2.ConvertToVector128Single(op2);
// return Sse.MoveLowToHigh(tmp1, tmp2);
CorInfoType opBaseJitType = CORINFO_TYPE_DOUBLE;
tmp1 = gtNewSimdHWIntrinsicNode(type, op1, NI_SSE2_ConvertToVector128Single, opBaseJitType, simdSize,
isSimdAsHWIntrinsic);
tmp2 = gtNewSimdHWIntrinsicNode(type, op2, NI_SSE2_ConvertToVector128Single, opBaseJitType, simdSize,
isSimdAsHWIntrinsic);
return gtNewSimdHWIntrinsicNode(type, tmp1, tmp2, NI_SSE_MoveLowToHigh, simdBaseJitType, simdSize,
isSimdAsHWIntrinsic);
}
default:
{
unreached();
}
}
}
#elif defined(TARGET_ARM64)
if (simdSize == 16)
{
if (varTypeIsFloating(simdBaseType))
{
// var tmp1 = AdvSimd.Arm64.ConvertToSingleLower(op1);
// return AdvSimd.Arm64.ConvertToSingleUpper(tmp1, op2);
tmp1 = gtNewSimdHWIntrinsicNode(TYP_SIMD8, op1, NI_AdvSimd_Arm64_ConvertToSingleLower, simdBaseJitType, 8,
isSimdAsHWIntrinsic);
return gtNewSimdHWIntrinsicNode(type, tmp1, op2, NI_AdvSimd_Arm64_ConvertToSingleUpper, simdBaseJitType,
simdSize, isSimdAsHWIntrinsic);
}
else
{
// var tmp1 = AdvSimd.ExtractNarrowingLower(op1);
// return AdvSimd.ExtractNarrowingUpper(tmp1, op2);
tmp1 = gtNewSimdHWIntrinsicNode(TYP_SIMD8, op1, NI_AdvSimd_ExtractNarrowingLower, simdBaseJitType, 8,
isSimdAsHWIntrinsic);
return gtNewSimdHWIntrinsicNode(type, tmp1, op2, NI_AdvSimd_ExtractNarrowingUpper, simdBaseJitType,
simdSize, isSimdAsHWIntrinsic);
}
}
else if (varTypeIsFloating(simdBaseType))
{
// var tmp1 = op1.ToVector128Unsafe();
// var tmp2 = AdvSimd.InsertScalar(tmp1, op2);
// return AdvSimd.Arm64.ConvertToSingleLower(tmp2);
CorInfoType tmp2BaseJitType = CORINFO_TYPE_DOUBLE;
tmp1 = gtNewSimdHWIntrinsicNode(TYP_SIMD16, op1, NI_Vector64_ToVector128Unsafe, simdBaseJitType, simdSize,
isSimdAsHWIntrinsic);
tmp2 = gtNewSimdHWIntrinsicNode(TYP_SIMD16, tmp1, gtNewIconNode(1), op2, NI_AdvSimd_InsertScalar,
tmp2BaseJitType, 16, isSimdAsHWIntrinsic);
return gtNewSimdHWIntrinsicNode(type, tmp2, NI_AdvSimd_Arm64_ConvertToSingleLower, simdBaseJitType, simdSize,
isSimdAsHWIntrinsic);
}
else
{
// var tmp1 = op1.ToVector128Unsafe();
// var tmp2 = AdvSimd.InsertScalar(tmp1.AsUInt64(), 1, op2.AsUInt64()).As<T>(); - signed integer use int64,
// unsigned integer use uint64
// return AdvSimd.ExtractNarrowingLower(tmp2);
CorInfoType tmp2BaseJitType = varTypeIsSigned(simdBaseType) ? CORINFO_TYPE_LONG : CORINFO_TYPE_ULONG;
tmp1 = gtNewSimdHWIntrinsicNode(TYP_SIMD16, op1, NI_Vector64_ToVector128Unsafe, simdBaseJitType, simdSize,
isSimdAsHWIntrinsic);
tmp2 = gtNewSimdHWIntrinsicNode(TYP_SIMD16, tmp1, gtNewIconNode(1), op2, NI_AdvSimd_InsertScalar,
tmp2BaseJitType, 16, isSimdAsHWIntrinsic);
return gtNewSimdHWIntrinsicNode(type, tmp2, NI_AdvSimd_ExtractNarrowingLower, simdBaseJitType, simdSize,
isSimdAsHWIntrinsic);
}
#else
#error Unsupported platform
#endif // !TARGET_XARCH && !TARGET_ARM64
}
GenTree* Compiler::gtNewSimdSqrtNode(
var_types type, GenTree* op1, CorInfoType simdBaseJitType, unsigned simdSize, bool isSimdAsHWIntrinsic)
{
assert(IsBaselineSimdIsaSupportedDebugOnly());
assert(varTypeIsSIMD(type));
assert(getSIMDTypeForSize(simdSize) == type);
assert(op1 != nullptr);
assert(op1->TypeIs(type));
var_types simdBaseType = JitType2PreciseVarType(simdBaseJitType);
assert(varTypeIsFloating(simdBaseType));
NamedIntrinsic intrinsic = NI_Illegal;
#if defined(TARGET_XARCH)
if (simdSize == 32)
{
assert(compIsaSupportedDebugOnly(InstructionSet_AVX));
intrinsic = NI_AVX_Sqrt;
}
else if (simdBaseType == TYP_FLOAT)
{
intrinsic = NI_SSE_Sqrt;
}
else
{
intrinsic = NI_SSE2_Sqrt;
}
#elif defined(TARGET_ARM64)
if ((simdSize == 8) && (simdBaseType == TYP_DOUBLE))
{
intrinsic = NI_AdvSimd_SqrtScalar;
}
else
{
intrinsic = NI_AdvSimd_Arm64_Sqrt;
}
#else
#error Unsupported platform
#endif // !TARGET_XARCH && !TARGET_ARM64
assert(intrinsic != NI_Illegal);
return gtNewSimdHWIntrinsicNode(type, op1, intrinsic, simdBaseJitType, simdSize, isSimdAsHWIntrinsic);
}
GenTree* Compiler::gtNewSimdSumNode(
var_types type, GenTree* op1, CorInfoType simdBaseJitType, unsigned simdSize, bool isSimdAsHWIntrinsic)
{
assert(IsBaselineSimdIsaSupportedDebugOnly());
var_types simdType = getSIMDTypeForSize(simdSize);
assert(varTypeIsSIMD(simdType));
assert(op1 != nullptr);
assert(op1->TypeIs(simdType));
var_types simdBaseType = JitType2PreciseVarType(simdBaseJitType);
assert(varTypeIsArithmetic(simdBaseType));
NamedIntrinsic intrinsic = NI_Illegal;
GenTree* tmp = nullptr;
CORINFO_CLASS_HANDLE clsHnd = gtGetStructHandleForSIMD(simdType, simdBaseJitType);
#if defined(TARGET_XARCH)
assert(!varTypeIsByte(simdBaseType) && !varTypeIsLong(simdBaseType));
// HorizontalAdd combines pairs so we need log2(vectorLength) passes to sum all elements together.
unsigned vectorLength = getSIMDVectorLength(simdSize, simdBaseType);
int haddCount = genLog2(vectorLength);
if (simdSize == 32)
{
// Minus 1 because for the last pass we split the vector to low / high and add them together.
haddCount -= 1;
if (varTypeIsFloating(simdBaseType))
{
assert(compIsaSupportedDebugOnly(InstructionSet_AVX));
intrinsic = NI_AVX_HorizontalAdd;
}
else
{
assert(compIsaSupportedDebugOnly(InstructionSet_AVX2));
intrinsic = NI_AVX2_HorizontalAdd;
}
}
else if (varTypeIsFloating(simdBaseType))
{
assert(compIsaSupportedDebugOnly(InstructionSet_SSE3));
intrinsic = NI_SSE3_HorizontalAdd;
}
else
{
assert(compIsaSupportedDebugOnly(InstructionSet_SSSE3));
intrinsic = NI_SSSE3_HorizontalAdd;
}
for (int i = 0; i < haddCount; i++)
{
op1 = impCloneExpr(op1, &tmp, clsHnd, (unsigned)CHECK_SPILL_ALL, nullptr DEBUGARG("Clone op1 for vector sum"));
op1 = gtNewSimdHWIntrinsicNode(simdType, op1, tmp, intrinsic, simdBaseJitType, simdSize, isSimdAsHWIntrinsic);
}
if (simdSize == 32)
{
intrinsic = (simdBaseType == TYP_FLOAT) ? NI_SSE_Add : NI_SSE2_Add;
op1 = impCloneExpr(op1, &tmp, clsHnd, (unsigned)CHECK_SPILL_ALL, nullptr DEBUGARG("Clone op1 for vector sum"));
op1 = gtNewSimdHWIntrinsicNode(TYP_SIMD16, op1, gtNewIconNode(0x01, TYP_INT), NI_AVX_ExtractVector128,
simdBaseJitType, simdSize, isSimdAsHWIntrinsic);
tmp = gtNewSimdHWIntrinsicNode(simdType, tmp, NI_Vector256_GetLower, simdBaseJitType, simdSize,
isSimdAsHWIntrinsic);
op1 = gtNewSimdHWIntrinsicNode(TYP_SIMD16, op1, tmp, intrinsic, simdBaseJitType, 16, isSimdAsHWIntrinsic);
}
return gtNewSimdHWIntrinsicNode(type, op1, NI_Vector128_ToScalar, simdBaseJitType, simdSize, isSimdAsHWIntrinsic);
#elif defined(TARGET_ARM64)
switch (simdBaseType)
{
case TYP_BYTE:
case TYP_UBYTE:
case TYP_SHORT:
case TYP_USHORT:
{
tmp = gtNewSimdHWIntrinsicNode(simdType, op1, NI_AdvSimd_Arm64_AddAcross, simdBaseJitType, simdSize,
isSimdAsHWIntrinsic);
return gtNewSimdHWIntrinsicNode(type, tmp, NI_Vector64_ToScalar, simdBaseJitType, 8, isSimdAsHWIntrinsic);
}
case TYP_INT:
case TYP_UINT:
{
if (simdSize == 8)
{
op1 = impCloneExpr(op1, &tmp, clsHnd, (unsigned)CHECK_SPILL_ALL,
nullptr DEBUGARG("Clone op1 for vector sum"));
tmp = gtNewSimdHWIntrinsicNode(simdType, op1, tmp, NI_AdvSimd_AddPairwise, simdBaseJitType, simdSize,
isSimdAsHWIntrinsic);
}
else
{
tmp = gtNewSimdHWIntrinsicNode(TYP_SIMD16, op1, NI_AdvSimd_Arm64_AddAcross, simdBaseJitType, 16,
isSimdAsHWIntrinsic);
}
return gtNewSimdHWIntrinsicNode(type, tmp, NI_Vector64_ToScalar, simdBaseJitType, 8, isSimdAsHWIntrinsic);
}
case TYP_FLOAT:
{
if (simdSize == 8)
{
op1 = gtNewSimdHWIntrinsicNode(TYP_SIMD8, op1, NI_AdvSimd_Arm64_AddPairwiseScalar, simdBaseJitType,
simdSize, isSimdAsHWIntrinsic);
}
else
{
unsigned vectorLength = getSIMDVectorLength(simdSize, simdBaseType);
int haddCount = genLog2(vectorLength);
for (int i = 0; i < haddCount; i++)
{
op1 = impCloneExpr(op1, &tmp, clsHnd, (unsigned)CHECK_SPILL_ALL,
nullptr DEBUGARG("Clone op1 for vector sum"));
op1 = gtNewSimdHWIntrinsicNode(simdType, op1, tmp, NI_AdvSimd_Arm64_AddPairwise, simdBaseJitType,
simdSize, isSimdAsHWIntrinsic);
}
}
return gtNewSimdHWIntrinsicNode(type, op1, NI_Vector128_ToScalar, simdBaseJitType, simdSize,
isSimdAsHWIntrinsic);
}
case TYP_DOUBLE:
case TYP_LONG:
case TYP_ULONG:
{
if (simdSize == 16)
{
op1 = gtNewSimdHWIntrinsicNode(TYP_SIMD8, op1, NI_AdvSimd_Arm64_AddPairwiseScalar, simdBaseJitType,
simdSize, isSimdAsHWIntrinsic);
}
return gtNewSimdHWIntrinsicNode(type, op1, NI_Vector64_ToScalar, simdBaseJitType, 8, isSimdAsHWIntrinsic);
}
default:
{
unreached();
}
}
#else
#error Unsupported platform
#endif // !TARGET_XARCH && !TARGET_ARM64
}
GenTree* Compiler::gtNewSimdUnOpNode(genTreeOps op,
var_types type,
GenTree* op1,
CorInfoType simdBaseJitType,
unsigned simdSize,
bool isSimdAsHWIntrinsic)
{
assert(IsBaselineSimdIsaSupportedDebugOnly());
assert(varTypeIsSIMD(type));
assert(getSIMDTypeForSize(simdSize) == type);
assert(op1 != nullptr);
assert(op1->TypeIs(type));
var_types simdBaseType = JitType2PreciseVarType(simdBaseJitType);
assert(varTypeIsArithmetic(simdBaseType));
NamedIntrinsic intrinsic = NI_Illegal;
GenTree* op2 = nullptr;
switch (op)
{
#if defined(TARGET_XARCH)
case GT_NEG:
{
if (simdSize == 32)
{
assert(compIsaSupportedDebugOnly(InstructionSet_AVX));
assert(varTypeIsFloating(simdBaseType) || compIsaSupportedDebugOnly(InstructionSet_AVX2));
}
op2 = gtNewSimdZeroNode(type, simdBaseJitType, simdSize, isSimdAsHWIntrinsic);
// Zero - op1
return gtNewSimdBinOpNode(GT_SUB, type, op2, op1, simdBaseJitType, simdSize, isSimdAsHWIntrinsic);
}
case GT_NOT:
{
assert((simdSize != 32) || compIsaSupportedDebugOnly(InstructionSet_AVX));
intrinsic = (simdSize == 32) ? NI_Vector256_get_AllBitsSet : NI_Vector128_get_AllBitsSet;
op2 = gtNewSimdHWIntrinsicNode(type, intrinsic, simdBaseJitType, simdSize, isSimdAsHWIntrinsic);
// op1 ^ AllBitsSet
return gtNewSimdBinOpNode(GT_XOR, type, op1, op2, simdBaseJitType, simdSize, isSimdAsHWIntrinsic);
}
#elif defined(TARGET_ARM64)
case GT_NEG:
{
if (varTypeIsSigned(simdBaseType))
{
if (simdBaseType == TYP_LONG)
{
intrinsic = (simdSize == 8) ? NI_AdvSimd_Arm64_NegateScalar : NI_AdvSimd_Arm64_Negate;
}
else if (simdBaseType == TYP_DOUBLE)
{
intrinsic = (simdSize == 8) ? NI_AdvSimd_NegateScalar : NI_AdvSimd_Arm64_Negate;
}
else
{
intrinsic = NI_AdvSimd_Negate;
}
return gtNewSimdHWIntrinsicNode(type, op1, intrinsic, simdBaseJitType, simdSize, isSimdAsHWIntrinsic);
}
else
{
// Zero - op1
op2 = gtNewSimdZeroNode(type, simdBaseJitType, simdSize, isSimdAsHWIntrinsic);
return gtNewSimdBinOpNode(GT_SUB, type, op2, op1, simdBaseJitType, simdSize, isSimdAsHWIntrinsic);
}
}
case GT_NOT:
{
return gtNewSimdHWIntrinsicNode(type, op1, NI_AdvSimd_Not, simdBaseJitType, simdSize, isSimdAsHWIntrinsic);
}
#else
#error Unsupported platform
#endif // !TARGET_XARCH && !TARGET_ARM64
default:
{
unreached();
}
}
}
GenTree* Compiler::gtNewSimdWidenLowerNode(
var_types type, GenTree* op1, CorInfoType simdBaseJitType, unsigned simdSize, bool isSimdAsHWIntrinsic)
{
assert(IsBaselineSimdIsaSupportedDebugOnly());
assert(varTypeIsSIMD(type));
assert(getSIMDTypeForSize(simdSize) == type);
assert(op1 != nullptr);
assert(op1->TypeIs(type));
var_types simdBaseType = JitType2PreciseVarType(simdBaseJitType);
assert(varTypeIsArithmetic(simdBaseType) && !varTypeIsLong(simdBaseType));
NamedIntrinsic intrinsic = NI_Illegal;
GenTree* tmp1;
#if defined(TARGET_XARCH)
if (simdSize == 32)
{
assert(compIsaSupportedDebugOnly(InstructionSet_AVX));
assert(!varTypeIsIntegral(simdBaseType) || compIsaSupportedDebugOnly(InstructionSet_AVX2));
tmp1 =
gtNewSimdHWIntrinsicNode(type, op1, NI_Vector256_GetLower, simdBaseJitType, simdSize, isSimdAsHWIntrinsic);
switch (simdBaseType)
{
case TYP_BYTE:
case TYP_UBYTE:
{
intrinsic = NI_AVX2_ConvertToVector256Int16;
break;
}
case TYP_SHORT:
case TYP_USHORT:
{
intrinsic = NI_AVX2_ConvertToVector256Int32;
break;
}
case TYP_INT:
case TYP_UINT:
{
intrinsic = NI_AVX2_ConvertToVector256Int64;
break;
}
case TYP_FLOAT:
{
intrinsic = NI_AVX_ConvertToVector256Double;
break;
}
default:
{
unreached();
}
}
assert(intrinsic != NI_Illegal);
return gtNewSimdHWIntrinsicNode(type, tmp1, intrinsic, simdBaseJitType, simdSize, isSimdAsHWIntrinsic);
}
else if ((simdBaseType == TYP_FLOAT) || compOpportunisticallyDependsOn(InstructionSet_SSE41))
{
switch (simdBaseType)
{
case TYP_BYTE:
case TYP_UBYTE:
{
intrinsic = NI_SSE41_ConvertToVector128Int16;
break;
}
case TYP_SHORT:
case TYP_USHORT:
{
intrinsic = NI_SSE41_ConvertToVector128Int32;
break;
}
case TYP_INT:
case TYP_UINT:
{
intrinsic = NI_SSE41_ConvertToVector128Int64;
break;
}
case TYP_FLOAT:
{
intrinsic = NI_SSE2_ConvertToVector128Double;
break;
}
default:
{
unreached();
}
}
assert(intrinsic != NI_Illegal);
return gtNewSimdHWIntrinsicNode(type, op1, intrinsic, simdBaseJitType, simdSize, isSimdAsHWIntrinsic);
}
else
{
tmp1 = gtNewSimdZeroNode(type, simdBaseJitType, simdSize, isSimdAsHWIntrinsic);
if (varTypeIsSigned(simdBaseType))
{
CORINFO_CLASS_HANDLE clsHnd = gtGetStructHandleForSIMD(type, simdBaseJitType);
GenTree* op1Dup;
op1 = impCloneExpr(op1, &op1Dup, clsHnd, (unsigned)CHECK_SPILL_ALL,
nullptr DEBUGARG("Clone op1 for vector widen lower"));
tmp1 = gtNewSimdHWIntrinsicNode(type, op1, tmp1, NI_SSE2_CompareLessThan, simdBaseJitType, simdSize,
isSimdAsHWIntrinsic);
op1 = op1Dup;
}
return gtNewSimdHWIntrinsicNode(type, op1, tmp1, NI_SSE2_UnpackLow, simdBaseJitType, simdSize,
isSimdAsHWIntrinsic);
}
#elif defined(TARGET_ARM64)
if (simdSize == 16)
{
tmp1 = gtNewSimdHWIntrinsicNode(TYP_SIMD8, op1, NI_Vector128_GetLower, simdBaseJitType, simdSize,
isSimdAsHWIntrinsic);
}
else
{
assert(simdSize == 8);
tmp1 = op1;
}
if (varTypeIsFloating(simdBaseType))
{
assert(simdBaseType == TYP_FLOAT);
intrinsic = NI_AdvSimd_Arm64_ConvertToDouble;
}
else if (varTypeIsSigned(simdBaseType))
{
intrinsic = NI_AdvSimd_SignExtendWideningLower;
}
else
{
intrinsic = NI_AdvSimd_ZeroExtendWideningLower;
}
assert(intrinsic != NI_Illegal);
tmp1 = gtNewSimdHWIntrinsicNode(type, tmp1, intrinsic, simdBaseJitType, 8, isSimdAsHWIntrinsic);
if (simdSize == 8)
{
tmp1 = gtNewSimdHWIntrinsicNode(type, tmp1, NI_Vector128_GetLower, simdBaseJitType, 16, isSimdAsHWIntrinsic);
}
return tmp1;
#else
#error Unsupported platform
#endif // !TARGET_XARCH && !TARGET_ARM64
}
GenTree* Compiler::gtNewSimdWidenUpperNode(
var_types type, GenTree* op1, CorInfoType simdBaseJitType, unsigned simdSize, bool isSimdAsHWIntrinsic)
{
assert(IsBaselineSimdIsaSupportedDebugOnly());
assert(varTypeIsSIMD(type));
assert(getSIMDTypeForSize(simdSize) == type);
assert(op1 != nullptr);
assert(op1->TypeIs(type));
var_types simdBaseType = JitType2PreciseVarType(simdBaseJitType);
assert(varTypeIsArithmetic(simdBaseType) && !varTypeIsLong(simdBaseType));
NamedIntrinsic intrinsic = NI_Illegal;
GenTree* tmp1;
#if defined(TARGET_XARCH)
if (simdSize == 32)
{
assert(compIsaSupportedDebugOnly(InstructionSet_AVX));
assert(!varTypeIsIntegral(simdBaseType) || compIsaSupportedDebugOnly(InstructionSet_AVX2));
tmp1 = gtNewSimdHWIntrinsicNode(type, op1, gtNewIconNode(1), NI_AVX_ExtractVector128, simdBaseJitType, simdSize,
isSimdAsHWIntrinsic);
switch (simdBaseType)
{
case TYP_BYTE:
case TYP_UBYTE:
{
intrinsic = NI_AVX2_ConvertToVector256Int16;
break;
}
case TYP_SHORT:
case TYP_USHORT:
{
intrinsic = NI_AVX2_ConvertToVector256Int32;
break;
}
case TYP_INT:
case TYP_UINT:
{
intrinsic = NI_AVX2_ConvertToVector256Int64;
break;
}
case TYP_FLOAT:
{
intrinsic = NI_AVX_ConvertToVector256Double;
break;
}
default:
{
unreached();
}
}
assert(intrinsic != NI_Illegal);
return gtNewSimdHWIntrinsicNode(type, tmp1, intrinsic, simdBaseJitType, simdSize, isSimdAsHWIntrinsic);
}
else if (varTypeIsFloating(simdBaseType))
{
assert(simdBaseType == TYP_FLOAT);
CORINFO_CLASS_HANDLE clsHnd = gtGetStructHandleForSIMD(type, simdBaseJitType);
GenTree* op1Dup;
op1 = impCloneExpr(op1, &op1Dup, clsHnd, (unsigned)CHECK_SPILL_ALL,
nullptr DEBUGARG("Clone op1 for vector widen upper"));
tmp1 = gtNewSimdHWIntrinsicNode(type, op1, op1Dup, NI_SSE_MoveHighToLow, simdBaseJitType, simdSize,
isSimdAsHWIntrinsic);
return gtNewSimdHWIntrinsicNode(type, tmp1, NI_SSE2_ConvertToVector128Double, simdBaseJitType, simdSize,
isSimdAsHWIntrinsic);
}
else if (compOpportunisticallyDependsOn(InstructionSet_SSE41))
{
tmp1 = gtNewSimdHWIntrinsicNode(type, op1, gtNewIconNode(8), NI_SSE2_ShiftRightLogical128BitLane,
simdBaseJitType, simdSize, isSimdAsHWIntrinsic);
switch (simdBaseType)
{
case TYP_BYTE:
case TYP_UBYTE:
{
intrinsic = NI_SSE41_ConvertToVector128Int16;
break;
}
case TYP_SHORT:
case TYP_USHORT:
{
intrinsic = NI_SSE41_ConvertToVector128Int32;
break;
}
case TYP_INT:
case TYP_UINT:
{
intrinsic = NI_SSE41_ConvertToVector128Int64;
break;
}
default:
{
unreached();
}
}
assert(intrinsic != NI_Illegal);
return gtNewSimdHWIntrinsicNode(type, tmp1, intrinsic, simdBaseJitType, simdSize, isSimdAsHWIntrinsic);
}
else
{
tmp1 = gtNewSimdZeroNode(type, simdBaseJitType, simdSize, isSimdAsHWIntrinsic);
if (varTypeIsSigned(simdBaseType))
{
CORINFO_CLASS_HANDLE clsHnd = gtGetStructHandleForSIMD(type, simdBaseJitType);
GenTree* op1Dup;
op1 = impCloneExpr(op1, &op1Dup, clsHnd, (unsigned)CHECK_SPILL_ALL,
nullptr DEBUGARG("Clone op1 for vector widen upper"));
tmp1 = gtNewSimdHWIntrinsicNode(type, op1, tmp1, NI_SSE2_CompareLessThan, simdBaseJitType, simdSize,
isSimdAsHWIntrinsic);
op1 = op1Dup;
}
return gtNewSimdHWIntrinsicNode(type, op1, tmp1, NI_SSE2_UnpackHigh, simdBaseJitType, simdSize,
isSimdAsHWIntrinsic);
}
#elif defined(TARGET_ARM64)
GenTree* zero;
if (simdSize == 16)
{
if (varTypeIsFloating(simdBaseType))
{
assert(simdBaseType == TYP_FLOAT);
intrinsic = NI_AdvSimd_Arm64_ConvertToDoubleUpper;
}
else if (varTypeIsSigned(simdBaseType))
{
intrinsic = NI_AdvSimd_SignExtendWideningUpper;
}
else
{
intrinsic = NI_AdvSimd_ZeroExtendWideningUpper;
}
assert(intrinsic != NI_Illegal);
return gtNewSimdHWIntrinsicNode(type, op1, intrinsic, simdBaseJitType, simdSize, isSimdAsHWIntrinsic);
}
else
{
assert(simdSize == 8);
ssize_t index = 8 / genTypeSize(simdBaseType);
if (varTypeIsFloating(simdBaseType))
{
assert(simdBaseType == TYP_FLOAT);
intrinsic = NI_AdvSimd_Arm64_ConvertToDouble;
}
else if (varTypeIsSigned(simdBaseType))
{
intrinsic = NI_AdvSimd_SignExtendWideningLower;
}
else
{
intrinsic = NI_AdvSimd_ZeroExtendWideningLower;
}
assert(intrinsic != NI_Illegal);
tmp1 = gtNewSimdHWIntrinsicNode(TYP_SIMD16, op1, intrinsic, simdBaseJitType, simdSize, isSimdAsHWIntrinsic);
zero = gtNewSimdZeroNode(TYP_SIMD16, simdBaseJitType, 16, isSimdAsHWIntrinsic);
tmp1 = gtNewSimdHWIntrinsicNode(TYP_SIMD16, tmp1, zero, gtNewIconNode(index), NI_AdvSimd_ExtractVector128,
simdBaseJitType, 16, isSimdAsHWIntrinsic);
return gtNewSimdHWIntrinsicNode(type, tmp1, NI_Vector128_GetLower, simdBaseJitType, simdSize,
isSimdAsHWIntrinsic);
}
#else
#error Unsupported platform
#endif // !TARGET_XARCH && !TARGET_ARM64
}
GenTree* Compiler::gtNewSimdWithElementNode(var_types type,
GenTree* op1,
GenTree* op2,
GenTree* op3,
CorInfoType simdBaseJitType,
unsigned simdSize,
bool isSimdAsHWIntrinsic)
{
NamedIntrinsic hwIntrinsicID = NI_Vector128_WithElement;
var_types simdBaseType = JitType2PreciseVarType(simdBaseJitType);
assert(varTypeIsArithmetic(simdBaseType));
assert(op2->IsCnsIntOrI());
ssize_t imm8 = op2->AsIntCon()->IconValue();
ssize_t count = simdSize / genTypeSize(simdBaseType);
assert((0 <= imm8) && (imm8 < count));
#if defined(TARGET_XARCH)
switch (simdBaseType)
{
// Using software fallback if simdBaseType is not supported by hardware
case TYP_BYTE:
case TYP_UBYTE:
case TYP_INT:
case TYP_UINT:
assert(compIsaSupportedDebugOnly(InstructionSet_SSE41));
break;
case TYP_LONG:
case TYP_ULONG:
assert(compIsaSupportedDebugOnly(InstructionSet_SSE41_X64));
break;
case TYP_DOUBLE:
case TYP_FLOAT:
case TYP_SHORT:
case TYP_USHORT:
assert(compIsaSupportedDebugOnly(InstructionSet_SSE2));
break;
default:
unreached();
}
if (simdSize == 32)
{
hwIntrinsicID = NI_Vector256_WithElement;
}
#elif defined(TARGET_ARM64)
switch (simdBaseType)
{
case TYP_LONG:
case TYP_ULONG:
case TYP_DOUBLE:
if (simdSize == 8)
{
return gtNewSimdHWIntrinsicNode(type, op3, NI_Vector64_Create, simdBaseJitType, simdSize,
isSimdAsHWIntrinsic);
}
break;
case TYP_FLOAT:
case TYP_BYTE:
case TYP_UBYTE:
case TYP_SHORT:
case TYP_USHORT:
case TYP_INT:
case TYP_UINT:
break;
default:
unreached();
}
hwIntrinsicID = NI_AdvSimd_Insert;
#else
#error Unsupported platform
#endif // !TARGET_XARCH && !TARGET_ARM64
return gtNewSimdHWIntrinsicNode(type, op1, op2, op3, hwIntrinsicID, simdBaseJitType, simdSize, isSimdAsHWIntrinsic);
}
GenTree* Compiler::gtNewSimdZeroNode(var_types type,
CorInfoType simdBaseJitType,
unsigned simdSize,
bool isSimdAsHWIntrinsic)
{
assert(IsBaselineSimdIsaSupportedDebugOnly());
assert(varTypeIsSIMD(type));
assert(getSIMDTypeForSize(simdSize) == type);
var_types simdBaseType = JitType2PreciseVarType(simdBaseJitType);
assert(varTypeIsArithmetic(simdBaseType));
NamedIntrinsic intrinsic = NI_Illegal;
#if defined(TARGET_XARCH)
intrinsic = (simdSize == 32) ? NI_Vector256_get_Zero : NI_Vector128_get_Zero;
#elif defined(TARGET_ARM64)
intrinsic = (simdSize > 8) ? NI_Vector128_get_Zero : NI_Vector64_get_Zero;
#else
#error Unsupported platform
#endif // !TARGET_XARCH && !TARGET_ARM64
return gtNewSimdHWIntrinsicNode(type, intrinsic, simdBaseJitType, simdSize, isSimdAsHWIntrinsic);
}
GenTreeHWIntrinsic* Compiler::gtNewScalarHWIntrinsicNode(var_types type, NamedIntrinsic hwIntrinsicID)
{
return new (this, GT_HWINTRINSIC) GenTreeHWIntrinsic(type, getAllocator(CMK_ASTNode), hwIntrinsicID,
CORINFO_TYPE_UNDEF, 0, /* isSimdAsHWIntrinsic */ false);
}
GenTreeHWIntrinsic* Compiler::gtNewScalarHWIntrinsicNode(var_types type, GenTree* op1, NamedIntrinsic hwIntrinsicID)
{
SetOpLclRelatedToSIMDIntrinsic(op1);
return new (this, GT_HWINTRINSIC) GenTreeHWIntrinsic(type, getAllocator(CMK_ASTNode), hwIntrinsicID,
CORINFO_TYPE_UNDEF, 0, /* isSimdAsHWIntrinsic */ false, op1);
}
GenTreeHWIntrinsic* Compiler::gtNewScalarHWIntrinsicNode(var_types type,
GenTree* op1,
GenTree* op2,
NamedIntrinsic hwIntrinsicID)
{
SetOpLclRelatedToSIMDIntrinsic(op1);
SetOpLclRelatedToSIMDIntrinsic(op2);
return new (this, GT_HWINTRINSIC)
GenTreeHWIntrinsic(type, getAllocator(CMK_ASTNode), hwIntrinsicID, CORINFO_TYPE_UNDEF, 0,
/* isSimdAsHWIntrinsic */ false, op1, op2);
}
GenTreeHWIntrinsic* Compiler::gtNewScalarHWIntrinsicNode(
var_types type, GenTree* op1, GenTree* op2, GenTree* op3, NamedIntrinsic hwIntrinsicID)
{
SetOpLclRelatedToSIMDIntrinsic(op1);
SetOpLclRelatedToSIMDIntrinsic(op2);
SetOpLclRelatedToSIMDIntrinsic(op3);
return new (this, GT_HWINTRINSIC)
GenTreeHWIntrinsic(type, getAllocator(CMK_ASTNode), hwIntrinsicID, CORINFO_TYPE_UNDEF, 0,
/* isSimdAsHWIntrinsic */ false, op1, op2, op3);
}
// Returns true for the HW Intrinsic instructions that have MemoryLoad semantics, false otherwise
bool GenTreeHWIntrinsic::OperIsMemoryLoad() const
{
#if defined(TARGET_XARCH) || defined(TARGET_ARM64)
NamedIntrinsic intrinsicId = GetHWIntrinsicId();
HWIntrinsicCategory category = HWIntrinsicInfo::lookupCategory(intrinsicId);
if (category == HW_Category_MemoryLoad)
{
return true;
}
#ifdef TARGET_XARCH
else if (HWIntrinsicInfo::MaybeMemoryLoad(GetHWIntrinsicId()))
{
// Some intrinsics (without HW_Category_MemoryLoad) also have MemoryLoad semantics
// This is generally because they have both vector and pointer overloads, e.g.,
// * Vector128<byte> BroadcastScalarToVector128(Vector128<byte> value)
// * Vector128<byte> BroadcastScalarToVector128(byte* source)
// So, we need to check the argument's type is memory-reference or Vector128
if ((category == HW_Category_SimpleSIMD) || (category == HW_Category_SIMDScalar))
{
assert(GetOperandCount() == 1);
switch (intrinsicId)
{
case NI_SSE41_ConvertToVector128Int16:
case NI_SSE41_ConvertToVector128Int32:
case NI_SSE41_ConvertToVector128Int64:
case NI_AVX2_BroadcastScalarToVector128:
case NI_AVX2_BroadcastScalarToVector256:
case NI_AVX2_ConvertToVector256Int16:
case NI_AVX2_ConvertToVector256Int32:
case NI_AVX2_ConvertToVector256Int64:
{
CorInfoType auxiliaryType = GetAuxiliaryJitType();
if (auxiliaryType == CORINFO_TYPE_PTR)
{
return true;
}
assert(auxiliaryType == CORINFO_TYPE_UNDEF);
return false;
}
default:
{
unreached();
}
}
}
else if (category == HW_Category_IMM)
{
// Do we have less than 3 operands?
if (GetOperandCount() < 3)
{
return false;
}
else if (HWIntrinsicInfo::isAVX2GatherIntrinsic(GetHWIntrinsicId()))
{
return true;
}
}
}
#endif // TARGET_XARCH
#endif // TARGET_XARCH || TARGET_ARM64
return false;
}
// Returns true for the HW Intrinsic instructions that have MemoryStore semantics, false otherwise
bool GenTreeHWIntrinsic::OperIsMemoryStore() const
{
#if defined(TARGET_XARCH) || defined(TARGET_ARM64)
HWIntrinsicCategory category = HWIntrinsicInfo::lookupCategory(GetHWIntrinsicId());
if (category == HW_Category_MemoryStore)
{
return true;
}
#ifdef TARGET_XARCH
else if (HWIntrinsicInfo::MaybeMemoryStore(GetHWIntrinsicId()) &&
(category == HW_Category_IMM || category == HW_Category_Scalar))
{
// Some intrinsics (without HW_Category_MemoryStore) also have MemoryStore semantics
// Bmi2/Bmi2.X64.MultiplyNoFlags may return the lower half result by a out argument
// unsafe ulong MultiplyNoFlags(ulong left, ulong right, ulong* low)
//
// So, the 3-argument form is MemoryStore
if (GetOperandCount() == 3)
{
switch (GetHWIntrinsicId())
{
case NI_BMI2_MultiplyNoFlags:
case NI_BMI2_X64_MultiplyNoFlags:
return true;
default:
return false;
}
}
}
#endif // TARGET_XARCH
#endif // TARGET_XARCH || TARGET_ARM64
return false;
}
// Returns true for the HW Intrinsic instructions that have MemoryLoad or MemoryStore semantics, false otherwise
bool GenTreeHWIntrinsic::OperIsMemoryLoadOrStore() const
{
#if defined(TARGET_XARCH) || defined(TARGET_ARM64)
return OperIsMemoryLoad() || OperIsMemoryStore();
#else
return false;
#endif
}
NamedIntrinsic GenTreeHWIntrinsic::GetHWIntrinsicId() const
{
NamedIntrinsic id = gtHWIntrinsicId;
int numArgs = HWIntrinsicInfo::lookupNumArgs(id);
bool numArgsUnknown = numArgs < 0;
assert((static_cast<size_t>(numArgs) == GetOperandCount()) || numArgsUnknown);
return id;
}
void GenTreeHWIntrinsic::SetHWIntrinsicId(NamedIntrinsic intrinsicId)
{
#ifdef DEBUG
size_t oldOperandCount = GetOperandCount();
int newOperandCount = HWIntrinsicInfo::lookupNumArgs(intrinsicId);
bool newCountUnknown = newOperandCount < 0;
// We'll choose to trust the programmer here.
assert((oldOperandCount == static_cast<size_t>(newOperandCount)) || newCountUnknown);
#endif // DEBUG
gtHWIntrinsicId = intrinsicId;
}
// TODO-Review: why are layouts not compared here?
/* static */ bool GenTreeHWIntrinsic::Equals(GenTreeHWIntrinsic* op1, GenTreeHWIntrinsic* op2)
{
return (op1->TypeGet() == op2->TypeGet()) && (op1->GetHWIntrinsicId() == op2->GetHWIntrinsicId()) &&
(op1->GetSimdBaseType() == op2->GetSimdBaseType()) && (op1->GetSimdSize() == op2->GetSimdSize()) &&
(op1->GetAuxiliaryType() == op2->GetAuxiliaryType()) && (op1->GetOtherReg() == op2->GetOtherReg()) &&
OperandsAreEqual(op1, op2);
}
#endif // FEATURE_HW_INTRINSICS
//---------------------------------------------------------------------------------------
// gtNewMustThrowException:
// create a throw node (calling into JIT helper) that must be thrown.
// The result would be a comma node: COMMA(jithelperthrow(void), x) where x's type should be specified.
//
// Arguments
// helper - JIT helper ID
// type - return type of the node
//
// Return Value
// pointer to the throw node
//
GenTree* Compiler::gtNewMustThrowException(unsigned helper, var_types type, CORINFO_CLASS_HANDLE clsHnd)
{
GenTreeCall* node = gtNewHelperCallNode(helper, TYP_VOID);
node->gtCallMoreFlags |= GTF_CALL_M_DOES_NOT_RETURN;
if (type != TYP_VOID)
{
unsigned dummyTemp = lvaGrabTemp(true DEBUGARG("dummy temp of must thrown exception"));
if (type == TYP_STRUCT)
{
lvaSetStruct(dummyTemp, clsHnd, false);
type = lvaTable[dummyTemp].lvType; // struct type is normalized
}
else
{
lvaTable[dummyTemp].lvType = type;
}
GenTree* dummyNode = gtNewLclvNode(dummyTemp, type);
return gtNewOperNode(GT_COMMA, type, node, dummyNode);
}
return node;
}
//---------------------------------------------------------------------------------------
// InitializeStructReturnType:
// Initialize the Return Type Descriptor for a method that returns a struct type
//
// Arguments
// comp - Compiler Instance
// retClsHnd - VM handle to the struct type returned by the method
//
// Return Value
// None
//
void ReturnTypeDesc::InitializeStructReturnType(Compiler* comp,
CORINFO_CLASS_HANDLE retClsHnd,
CorInfoCallConvExtension callConv)
{
assert(!m_inited);
#if FEATURE_MULTIREG_RET
assert(retClsHnd != NO_CLASS_HANDLE);
unsigned structSize = comp->info.compCompHnd->getClassSize(retClsHnd);
Compiler::structPassingKind howToReturnStruct;
var_types returnType = comp->getReturnTypeForStruct(retClsHnd, callConv, &howToReturnStruct, structSize);
switch (howToReturnStruct)
{
case Compiler::SPK_EnclosingType:
m_isEnclosingType = true;
FALLTHROUGH;
case Compiler::SPK_PrimitiveType:
{
assert(returnType != TYP_UNKNOWN);
assert(returnType != TYP_STRUCT);
m_regType[0] = returnType;
break;
}
case Compiler::SPK_ByValueAsHfa:
{
assert(varTypeIsStruct(returnType));
var_types hfaType = comp->GetHfaType(retClsHnd);
// We should have an hfa struct type
assert(varTypeIsValidHfaType(hfaType));
// Note that the retail build issues a warning about a potential divsion by zero without this Max function
unsigned elemSize = Max((unsigned)1, EA_SIZE_IN_BYTES(emitActualTypeSize(hfaType)));
// The size of this struct should be evenly divisible by elemSize
assert((structSize % elemSize) == 0);
unsigned hfaCount = (structSize / elemSize);
for (unsigned i = 0; i < hfaCount; ++i)
{
m_regType[i] = hfaType;
}
if (comp->compFloatingPointUsed == false)
{
comp->compFloatingPointUsed = true;
}
break;
}
case Compiler::SPK_ByValue:
{
assert(varTypeIsStruct(returnType));
#ifdef UNIX_AMD64_ABI
SYSTEMV_AMD64_CORINFO_STRUCT_REG_PASSING_DESCRIPTOR structDesc;
comp->eeGetSystemVAmd64PassStructInRegisterDescriptor(retClsHnd, &structDesc);
assert(structDesc.passedInRegisters);
for (int i = 0; i < structDesc.eightByteCount; i++)
{
assert(i < MAX_RET_REG_COUNT);
m_regType[i] = comp->GetEightByteType(structDesc, i);
}
#elif defined(TARGET_ARM64)
// a non-HFA struct returned using two registers
//
assert((structSize > TARGET_POINTER_SIZE) && (structSize <= (2 * TARGET_POINTER_SIZE)));
BYTE gcPtrs[2] = {TYPE_GC_NONE, TYPE_GC_NONE};
comp->info.compCompHnd->getClassGClayout(retClsHnd, &gcPtrs[0]);
for (unsigned i = 0; i < 2; ++i)
{
m_regType[i] = comp->getJitGCType(gcPtrs[i]);
}
#elif defined(TARGET_X86)
// an 8-byte struct returned using two registers
assert(structSize == 8);
BYTE gcPtrs[2] = {TYPE_GC_NONE, TYPE_GC_NONE};
comp->info.compCompHnd->getClassGClayout(retClsHnd, &gcPtrs[0]);
for (unsigned i = 0; i < 2; ++i)
{
m_regType[i] = comp->getJitGCType(gcPtrs[i]);
}
#else // TARGET_XXX
// This target needs support here!
//
NYI("Unsupported TARGET returning a TYP_STRUCT in InitializeStructReturnType");
#endif // UNIX_AMD64_ABI
break; // for case SPK_ByValue
}
case Compiler::SPK_ByReference:
// We are returning using the return buffer argument
// There are no return registers
break;
default:
unreached(); // By the contract of getReturnTypeForStruct we should never get here.
} // end of switch (howToReturnStruct)
#endif // FEATURE_MULTIREG_RET
#ifdef DEBUG
m_inited = true;
#endif
}
//---------------------------------------------------------------------------------------
// InitializeLongReturnType:
// Initialize the Return Type Descriptor for a method that returns a TYP_LONG
//
void ReturnTypeDesc::InitializeLongReturnType()
{
assert(!m_inited);
#if defined(TARGET_X86) || defined(TARGET_ARM)
// Setups up a ReturnTypeDesc for returning a long using two registers
//
assert(MAX_RET_REG_COUNT >= 2);
m_regType[0] = TYP_INT;
m_regType[1] = TYP_INT;
#else // not (TARGET_X86 or TARGET_ARM)
m_regType[0] = TYP_LONG;
#endif // TARGET_X86 or TARGET_ARM
#ifdef DEBUG
m_inited = true;
#endif
}
//-------------------------------------------------------------------
// GetABIReturnReg: Return i'th return register as per target ABI
//
// Arguments:
// idx - Index of the return register.
// The first return register has an index of 0 and so on.
//
// Return Value:
// Returns i'th return register as per target ABI.
//
// Notes:
// x86 and ARM return long in multiple registers.
// ARM and ARM64 return HFA struct in multiple registers.
//
regNumber ReturnTypeDesc::GetABIReturnReg(unsigned idx) const
{
unsigned count = GetReturnRegCount();
assert(idx < count);
regNumber resultReg = REG_NA;
#ifdef UNIX_AMD64_ABI
var_types regType0 = GetReturnRegType(0);
if (idx == 0)
{
if (varTypeIsIntegralOrI(regType0))
{
resultReg = REG_INTRET;
}
else
{
noway_assert(varTypeUsesFloatReg(regType0));
resultReg = REG_FLOATRET;
}
}
else if (idx == 1)
{
var_types regType1 = GetReturnRegType(1);
if (varTypeIsIntegralOrI(regType1))
{
if (varTypeIsIntegralOrI(regType0))
{
resultReg = REG_INTRET_1;
}
else
{
resultReg = REG_INTRET;
}
}
else
{
noway_assert(varTypeUsesFloatReg(regType1));
if (varTypeUsesFloatReg(regType0))
{
resultReg = REG_FLOATRET_1;
}
else
{
resultReg = REG_FLOATRET;
}
}
}
#elif defined(TARGET_X86)
if (idx == 0)
{
resultReg = REG_LNGRET_LO;
}
else if (idx == 1)
{
resultReg = REG_LNGRET_HI;
}
#elif defined(TARGET_ARM)
var_types regType = GetReturnRegType(idx);
if (varTypeIsIntegralOrI(regType))
{
// Ints are returned in one return register.
// Longs are returned in two return registers.
if (idx == 0)
{
resultReg = REG_LNGRET_LO;
}
else if (idx == 1)
{
resultReg = REG_LNGRET_HI;
}
}
else
{
// Floats are returned in one return register (f0).
// Doubles are returned in one return register (d0).
// Structs are returned in four registers with HFAs.
assert(idx < MAX_RET_REG_COUNT); // Up to 4 return registers for HFA's
if (regType == TYP_DOUBLE)
{
resultReg = (regNumber)((unsigned)(REG_FLOATRET) + idx * 2); // d0, d1, d2 or d3
}
else
{
resultReg = (regNumber)((unsigned)(REG_FLOATRET) + idx); // f0, f1, f2 or f3
}
}
#elif defined(TARGET_ARM64)
var_types regType = GetReturnRegType(idx);
if (varTypeIsIntegralOrI(regType))
{
noway_assert(idx < 2); // Up to 2 return registers for 16-byte structs
resultReg = (idx == 0) ? REG_INTRET : REG_INTRET_1; // X0 or X1
}
else
{
noway_assert(idx < 4); // Up to 4 return registers for HFA's
resultReg = (regNumber)((unsigned)(REG_FLOATRET) + idx); // V0, V1, V2 or V3
}
#endif // TARGET_XXX
assert(resultReg != REG_NA);
return resultReg;
}
//--------------------------------------------------------------------------------
// GetABIReturnRegs: get the mask of return registers as per target arch ABI.
//
// Arguments:
// None
//
// Return Value:
// reg mask of return registers in which the return type is returned.
//
// Note:
// This routine can be used when the caller is not particular about the order
// of return registers and wants to know the set of return registers.
//
// static
regMaskTP ReturnTypeDesc::GetABIReturnRegs() const
{
regMaskTP resultMask = RBM_NONE;
unsigned count = GetReturnRegCount();
for (unsigned i = 0; i < count; ++i)
{
resultMask |= genRegMask(GetABIReturnReg(i));
}
return resultMask;
}
//------------------------------------------------------------------------
// The following functions manage the gtRsvdRegs set of temporary registers
// created by LSRA during code generation.
//------------------------------------------------------------------------
// AvailableTempRegCount: return the number of available temporary registers in the (optional) given set
// (typically, RBM_ALLINT or RBM_ALLFLOAT).
//
// Arguments:
// mask - (optional) Check for available temporary registers only in this set.
//
// Return Value:
// Count of available temporary registers in given set.
//
unsigned GenTree::AvailableTempRegCount(regMaskTP mask /* = (regMaskTP)-1 */) const
{
return genCountBits(gtRsvdRegs & mask);
}
//------------------------------------------------------------------------
// GetSingleTempReg: There is expected to be exactly one available temporary register
// in the given mask in the gtRsvdRegs set. Get that register. No future calls to get
// a temporary register are expected. Removes the register from the set, but only in
// DEBUG to avoid doing unnecessary work in non-DEBUG builds.
//
// Arguments:
// mask - (optional) Get an available temporary register only in this set.
//
// Return Value:
// Available temporary register in given mask.
//
regNumber GenTree::GetSingleTempReg(regMaskTP mask /* = (regMaskTP)-1 */)
{
regMaskTP availableSet = gtRsvdRegs & mask;
assert(genCountBits(availableSet) == 1);
regNumber tempReg = genRegNumFromMask(availableSet);
INDEBUG(gtRsvdRegs &= ~availableSet;) // Remove the register from the set, so it can't be used again.
return tempReg;
}
//------------------------------------------------------------------------
// ExtractTempReg: Find the lowest number temporary register from the gtRsvdRegs set
// that is also in the optional given mask (typically, RBM_ALLINT or RBM_ALLFLOAT),
// and return it. Remove this register from the temporary register set, so it won't
// be returned again.
//
// Arguments:
// mask - (optional) Extract an available temporary register only in this set.
//
// Return Value:
// Available temporary register in given mask.
//
regNumber GenTree::ExtractTempReg(regMaskTP mask /* = (regMaskTP)-1 */)
{
regMaskTP availableSet = gtRsvdRegs & mask;
assert(genCountBits(availableSet) >= 1);
regMaskTP tempRegMask = genFindLowestBit(availableSet);
gtRsvdRegs &= ~tempRegMask;
return genRegNumFromMask(tempRegMask);
}
//------------------------------------------------------------------------
// GetLclOffs: if `this` is a field or a field address it returns offset
// of the field inside the struct, for not a field it returns 0.
//
// Return Value:
// The offset value.
//
uint16_t GenTreeLclVarCommon::GetLclOffs() const
{
if (OperIsLocalField())
{
return AsLclFld()->GetLclOffs();
}
else
{
return 0;
}
}
//------------------------------------------------------------------------
// GetFieldSeq: Get the field sequence for this local node.
//
// Return Value:
// The sequence of the node for local fields, empty ("nullptr") otherwise.
//
FieldSeqNode* GenTreeLclVarCommon::GetFieldSeq() const
{
return OperIsLocalField() ? AsLclFld()->GetFieldSeq() : nullptr;
}
#if defined(TARGET_XARCH) && defined(FEATURE_HW_INTRINSICS)
//------------------------------------------------------------------------
// GetResultOpNumForFMA: check if the result is written into one of the operands.
// In the case that none of the operand is overwritten, check if any of them is lastUse.
//
// Return Value:
// The operand number overwritten or lastUse. 0 is the default value, where the result is written into
// a destination that is not one of the source operands and there is no last use op.
//
unsigned GenTreeHWIntrinsic::GetResultOpNumForFMA(GenTree* use, GenTree* op1, GenTree* op2, GenTree* op3)
{
// only FMA intrinsic node should call into this function
assert(HWIntrinsicInfo::lookupIsa(gtHWIntrinsicId) == InstructionSet_FMA);
if (use != nullptr && use->OperIs(GT_STORE_LCL_VAR))
{
// For store_lcl_var, check if any op is overwritten
GenTreeLclVarCommon* overwritten = use->AsLclVarCommon();
unsigned overwrittenLclNum = overwritten->GetLclNum();
if (op1->IsLocal() && op1->AsLclVarCommon()->GetLclNum() == overwrittenLclNum)
{
return 1;
}
else if (op2->IsLocal() && op2->AsLclVarCommon()->GetLclNum() == overwrittenLclNum)
{
return 2;
}
else if (op3->IsLocal() && op3->AsLclVarCommon()->GetLclNum() == overwrittenLclNum)
{
return 3;
}
}
// If no overwritten op, check if there is any last use op
// https://github.com/dotnet/runtime/issues/62215
if (op1->OperIs(GT_LCL_VAR) && op1->IsLastUse(0))
return 1;
else if (op2->OperIs(GT_LCL_VAR) && op2->IsLastUse(0))
return 2;
else if (op3->OperIs(GT_LCL_VAR) && op3->IsLastUse(0))
return 3;
return 0;
}
#endif // TARGET_XARCH && FEATURE_HW_INTRINSICS
#ifdef TARGET_ARM
//------------------------------------------------------------------------
// IsOffsetMisaligned: check if the field needs a special handling on arm.
//
// Return Value:
// true if it is a float field with a misaligned offset, false otherwise.
//
bool GenTreeLclFld::IsOffsetMisaligned() const
{
if (varTypeIsFloating(gtType))
{
return ((m_lclOffs % emitTypeSize(TYP_FLOAT)) != 0);
}
return false;
}
#endif // TARGET_ARM
bool GenTree::IsInvariant() const
{
return OperIsConst() || Compiler::impIsAddressInLocal(this);
}
//------------------------------------------------------------------------
// IsNeverNegative: returns true if the given tree is known to be never
// negative, i. e. the upper bit will always be zero.
// Only valid for integral types.
//
// Arguments:
// comp - Compiler object, needed for IntegralRange::ForNode
//
// Return Value:
// true if the given tree is known to be never negative
//
bool GenTree::IsNeverNegative(Compiler* comp) const
{
assert(varTypeIsIntegral(this));
if (IsIntegralConst())
{
return AsIntConCommon()->IntegralValue() >= 0;
}
// TODO-Casts: extend IntegralRange to handle constants
return IntegralRange::ForNode((GenTree*)this, comp).IsPositive();
}
| 1 |
dotnet/runtime | 66,251 | Do not generate `TYP_STRUCT` `LCL_FLD` nodes | Today, the morpher can fold `IND(struct ADDR(LCL))` into `LCL_FLD` `TYP_STRUCT` layout-less nodes, these end up being locations, always (well -- there is one case where that's not true, such a node can end up under a return of a small struct later to be retyped by lowering, but this is rare).
These will be a significant impediment to actual, proper `TYP_STRUCT` local field nodes, as it would mean handling the case, everywhere in code, where the layout would be missing, that we don't want to allow in the first place.
This change fixed that by introducing the folding that the creation of these location nodes achieves at the address level, where we can choose the type of the location node arbitrarily and freely.
Along the way, `IsLocalAddrExpr` was fixed to not forget to look at zero-offset sequences attached to `ADDR`s, to avoid regressions.
We are expecting some positive diffs here, from the `IsLocalAddrExpr` change (enabling more precise VNs) and the folding change itself (the old code path would, ever so rarely, miss out on some things).
We are also expecting one small regression due to call retyping of reg-sized `IND struct(ADDR(LCL))` nodes losing the field sequence. I did not fix it because a) it was small, and rare, b) I do not want to permeate the zero-offset code any more than is strictly required.
[Diffs](https://dev.azure.com/dnceng/public/_build/results?buildId=1653792&view=results). | SingleAccretion | 2022-03-05T21:01:45Z | 2022-03-30T04:48:39Z | cbf3f9c43649035f59e626792f82e7b579c44865 | 2453f16807b85b279efc26d17d6f20de87801c09 | Do not generate `TYP_STRUCT` `LCL_FLD` nodes. Today, the morpher can fold `IND(struct ADDR(LCL))` into `LCL_FLD` `TYP_STRUCT` layout-less nodes, these end up being locations, always (well -- there is one case where that's not true, such a node can end up under a return of a small struct later to be retyped by lowering, but this is rare).
These will be a significant impediment to actual, proper `TYP_STRUCT` local field nodes, as it would mean handling the case, everywhere in code, where the layout would be missing, that we don't want to allow in the first place.
This change fixed that by introducing the folding that the creation of these location nodes achieves at the address level, where we can choose the type of the location node arbitrarily and freely.
Along the way, `IsLocalAddrExpr` was fixed to not forget to look at zero-offset sequences attached to `ADDR`s, to avoid regressions.
We are expecting some positive diffs here, from the `IsLocalAddrExpr` change (enabling more precise VNs) and the folding change itself (the old code path would, ever so rarely, miss out on some things).
We are also expecting one small regression due to call retyping of reg-sized `IND struct(ADDR(LCL))` nodes losing the field sequence. I did not fix it because a) it was small, and rare, b) I do not want to permeate the zero-offset code any more than is strictly required.
[Diffs](https://dev.azure.com/dnceng/public/_build/results?buildId=1653792&view=results). | ./src/coreclr/jit/gentree.h | // Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
/*XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XX XX
XX GenTree XX
XX XX
XX This is the node in the semantic tree graph. It represents the operation XX
XX corresponding to the node, and other information during code-gen. XX
XX XX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
*/
/*****************************************************************************/
#ifndef _GENTREE_H_
#define _GENTREE_H_
/*****************************************************************************/
#include "vartype.h" // For "var_types"
#include "target.h" // For "regNumber"
#include "ssaconfig.h" // For "SsaConfig::RESERVED_SSA_NUM"
#include "valuenumtype.h"
#include "jitstd.h"
#include "jithashtable.h"
#include "simd.h"
#include "namedintrinsiclist.h"
#include "layout.h"
#include "debuginfo.h"
// Debugging GenTree is much easier if we add a magic virtual function to make the debugger able to figure out what type
// it's got. This is enabled by default in DEBUG. To enable it in RET builds (temporarily!), you need to change the
// build to define DEBUGGABLE_GENTREE=1, as well as pass /OPT:NOICF to the linker (or else all the vtables get merged,
// making the debugging value supplied by them useless).
#ifndef DEBUGGABLE_GENTREE
#ifdef DEBUG
#define DEBUGGABLE_GENTREE 1
#else // !DEBUG
#define DEBUGGABLE_GENTREE 0
#endif // !DEBUG
#endif // !DEBUGGABLE_GENTREE
// The SpecialCodeKind enum is used to indicate the type of special (unique)
// target block that will be targeted by an instruction.
// These are used by:
// GenTreeBoundsChk nodes (SCK_RNGCHK_FAIL, SCK_ARG_EXCPN, SCK_ARG_RNG_EXCPN)
// - these nodes have a field (gtThrowKind) to indicate which kind
// GenTreeOps nodes, for which codegen will generate the branch
// - it will use the appropriate kind based on the opcode, though it's not
// clear why SCK_OVERFLOW == SCK_ARITH_EXCPN
//
enum SpecialCodeKind
{
SCK_NONE,
SCK_RNGCHK_FAIL, // target when range check fails
SCK_DIV_BY_ZERO, // target for divide by zero (Not used on X86/X64)
SCK_ARITH_EXCPN, // target on arithmetic exception
SCK_OVERFLOW = SCK_ARITH_EXCPN, // target on overflow
SCK_ARG_EXCPN, // target on ArgumentException (currently used only for SIMD intrinsics)
SCK_ARG_RNG_EXCPN, // target on ArgumentOutOfRangeException (currently used only for SIMD intrinsics)
SCK_COUNT
};
/*****************************************************************************/
enum genTreeOps : BYTE
{
#define GTNODE(en, st, cm, ok) GT_##en,
#include "gtlist.h"
GT_COUNT,
#ifdef TARGET_64BIT
// GT_CNS_NATIVELONG is the gtOper symbol for GT_CNS_LNG or GT_CNS_INT, depending on the target.
// For the 64-bit targets we will only use GT_CNS_INT as it used to represent all the possible sizes
GT_CNS_NATIVELONG = GT_CNS_INT,
#else
// For the 32-bit targets we use a GT_CNS_LNG to hold a 64-bit integer constant and GT_CNS_INT for all others.
// In the future when we retarget the JIT for x86 we should consider eliminating GT_CNS_LNG
GT_CNS_NATIVELONG = GT_CNS_LNG,
#endif
};
// The following enum defines a set of bit flags that can be used
// to classify expression tree nodes.
//
enum GenTreeOperKind
{
GTK_SPECIAL = 0x00, // special operator
GTK_LEAF = 0x01, // leaf operator
GTK_UNOP = 0x02, // unary operator
GTK_BINOP = 0x04, // binary operator
GTK_KINDMASK = (GTK_SPECIAL | GTK_LEAF | GTK_UNOP | GTK_BINOP), // operator kind mask
GTK_SMPOP = (GTK_UNOP | GTK_BINOP),
GTK_COMMUTE = 0x08, // commutative operator
GTK_EXOP = 0x10, // Indicates that an oper for a node type that extends GenTreeOp (or GenTreeUnOp)
// by adding non-node fields to unary or binary operator.
GTK_NOVALUE = 0x20, // node does not produce a value
GTK_MASK = 0xFF
};
// The following enum defines a set of bit flags that describe opers for the purposes
// of DEBUG-only checks. This is separate from the above "GenTreeOperKind"s to avoid
// making the table for those larger in Release builds. However, it resides in the same
// "namespace" and so all values here must be distinct from those in "GenTreeOperKind".
//
enum GenTreeDebugOperKind
{
DBK_FIRST_FLAG = GTK_MASK + 1,
DBK_NOTHIR = DBK_FIRST_FLAG, // This oper is not supported in HIR (before rationalization).
DBK_NOTLIR = DBK_FIRST_FLAG << 1, // This oper is not supported in LIR (after rationalization).
DBK_NOCONTAIN = DBK_FIRST_FLAG << 2, // This oper produces a value, but may not be contained.
DBK_MASK = ~GTK_MASK
};
/*****************************************************************************/
enum gtCallTypes : BYTE
{
CT_USER_FUNC, // User function
CT_HELPER, // Jit-helper
CT_INDIRECT, // Indirect call
CT_COUNT // fake entry (must be last)
};
#ifdef DEBUG
/*****************************************************************************
*
* TargetHandleTypes are used to determine the type of handle present inside GenTreeIntCon node.
* The values are such that they don't overlap with helper's or user function's handle.
*/
enum TargetHandleType : BYTE
{
THT_Unknown = 2,
THT_GSCookieCheck = 4,
THT_SetGSCookie = 6,
THT_IntializeArrayIntrinsics = 8
};
#endif
/*****************************************************************************/
struct BasicBlock;
enum BasicBlockFlags : unsigned __int64;
struct InlineCandidateInfo;
struct GuardedDevirtualizationCandidateInfo;
struct ClassProfileCandidateInfo;
struct LateDevirtualizationInfo;
typedef unsigned short AssertionIndex;
static const AssertionIndex NO_ASSERTION_INDEX = 0;
//------------------------------------------------------------------------
// GetAssertionIndex: return 1-based AssertionIndex from 0-based int index.
//
// Arguments:
// index - 0-based index
// Return Value:
// 1-based AssertionIndex.
inline AssertionIndex GetAssertionIndex(unsigned index)
{
return (AssertionIndex)(index + 1);
}
class AssertionInfo
{
// true if the assertion holds on the bbNext edge instead of the bbJumpDest edge (for GT_JTRUE nodes)
unsigned short m_isNextEdgeAssertion : 1;
// 1-based index of the assertion
unsigned short m_assertionIndex : 15;
AssertionInfo(bool isNextEdgeAssertion, AssertionIndex assertionIndex)
: m_isNextEdgeAssertion(isNextEdgeAssertion), m_assertionIndex(assertionIndex)
{
assert(m_assertionIndex == assertionIndex);
}
public:
AssertionInfo() : AssertionInfo(false, 0)
{
}
AssertionInfo(AssertionIndex assertionIndex) : AssertionInfo(false, assertionIndex)
{
}
static AssertionInfo ForNextEdge(AssertionIndex assertionIndex)
{
// Ignore the edge information if there's no assertion
bool isNextEdge = (assertionIndex != NO_ASSERTION_INDEX);
return AssertionInfo(isNextEdge, assertionIndex);
}
void Clear()
{
m_isNextEdgeAssertion = 0;
m_assertionIndex = NO_ASSERTION_INDEX;
}
bool HasAssertion() const
{
return m_assertionIndex != NO_ASSERTION_INDEX;
}
AssertionIndex GetAssertionIndex() const
{
return m_assertionIndex;
}
bool IsNextEdgeAssertion() const
{
return m_isNextEdgeAssertion;
}
};
// GT_FIELD nodes will be lowered into more "code-gen-able" representations, like
// GT_IND's of addresses, or GT_LCL_FLD nodes. We'd like to preserve the more abstract
// information, and will therefore annotate such lowered nodes with FieldSeq's. A FieldSeq
// represents a (possibly) empty sequence of fields. The fields are in the order
// in which they are dereferenced. The first field may be an object field or a struct field;
// all subsequent fields must be struct fields.
class FieldSeqNode
{
public:
enum class FieldKind : uintptr_t
{
Instance = 0, // An instance field, object or struct.
SimpleStatic = 1, // Simple static field - the handle represents a unique location.
SharedStatic = 2, // Static field on a shared generic type: "Class<__Canon>.StaticField".
};
private:
static const uintptr_t FIELD_KIND_MASK = 0b11;
static_assert_no_msg(sizeof(CORINFO_FIELD_HANDLE) == sizeof(uintptr_t));
uintptr_t m_fieldHandleAndKind;
FieldSeqNode* m_next;
public:
FieldSeqNode(CORINFO_FIELD_HANDLE fieldHnd, FieldSeqNode* next, FieldKind fieldKind);
FieldKind GetKind() const
{
return static_cast<FieldKind>(m_fieldHandleAndKind & FIELD_KIND_MASK);
}
CORINFO_FIELD_HANDLE GetFieldHandle() const
{
assert(!IsPseudoField() && (GetFieldHandleValue() != NO_FIELD_HANDLE));
return GetFieldHandleValue();
}
CORINFO_FIELD_HANDLE GetFieldHandleValue() const
{
return CORINFO_FIELD_HANDLE(m_fieldHandleAndKind & ~FIELD_KIND_MASK);
}
// returns true when this is the pseudo #FirstElem field sequence
bool IsFirstElemFieldSeq() const;
// returns true when this is the pseudo #ConstantIndex field sequence
bool IsConstantIndexFieldSeq() const;
// returns true when this is the the pseudo #FirstElem field sequence or the pseudo #ConstantIndex field sequence
bool IsPseudoField() const;
bool IsStaticField() const
{
return (GetKind() == FieldKind::SimpleStatic) || (GetKind() == FieldKind::SharedStatic);
}
bool IsSharedStaticField() const
{
return GetKind() == FieldKind::SharedStatic;
}
FieldSeqNode* GetNext() const
{
return m_next;
}
FieldSeqNode* GetTail()
{
FieldSeqNode* tail = this;
while (tail->m_next != nullptr)
{
tail = tail->m_next;
}
return tail;
}
// Make sure this provides methods that allow it to be used as a KeyFuncs type in JitHashTable.
// Note that there is a one-to-one relationship between the field handle and the field kind, so
// we do not need to mask away the latter for comparison purposes.
static int GetHashCode(FieldSeqNode fsn)
{
return static_cast<int>(fsn.m_fieldHandleAndKind) ^ static_cast<int>(reinterpret_cast<intptr_t>(fsn.m_next));
}
static bool Equals(const FieldSeqNode& fsn1, const FieldSeqNode& fsn2)
{
return fsn1.m_fieldHandleAndKind == fsn2.m_fieldHandleAndKind && fsn1.m_next == fsn2.m_next;
}
};
// This class canonicalizes field sequences.
class FieldSeqStore
{
typedef JitHashTable<FieldSeqNode, /*KeyFuncs*/ FieldSeqNode, FieldSeqNode*> FieldSeqNodeCanonMap;
CompAllocator m_alloc;
FieldSeqNodeCanonMap* m_canonMap;
static FieldSeqNode s_notAField; // No value, just exists to provide an address.
// Dummy variables to provide the addresses for the "pseudo field handle" statics below.
static int FirstElemPseudoFieldStruct;
static int ConstantIndexPseudoFieldStruct;
public:
FieldSeqStore(CompAllocator alloc);
// Returns the (canonical in the store) singleton field sequence for the given handle.
FieldSeqNode* CreateSingleton(CORINFO_FIELD_HANDLE fieldHnd,
FieldSeqNode::FieldKind fieldKind = FieldSeqNode::FieldKind::Instance);
// This is a special distinguished FieldSeqNode indicating that a constant does *not*
// represent a valid field sequence. This is "infectious", in the sense that appending it
// (on either side) to any field sequence yields the "NotAField()" sequence.
static FieldSeqNode* NotAField()
{
return &s_notAField;
}
// Returns the (canonical in the store) field sequence representing the concatenation of
// the sequences represented by "a" and "b". Assumes that "a" and "b" are canonical; that is,
// they are the results of CreateSingleton, NotAField, or Append calls. If either of the arguments
// are the "NotAField" value, so is the result.
FieldSeqNode* Append(FieldSeqNode* a, FieldSeqNode* b);
// We have a few "pseudo" field handles:
// This treats the constant offset of the first element of something as if it were a field.
// Works for method table offsets of boxed structs, or first elem offset of arrays/strings.
static CORINFO_FIELD_HANDLE FirstElemPseudoField;
// If there is a constant index, we make a psuedo field to correspond to the constant added to
// offset of the indexed field. This keeps the field sequence structure "normalized", especially in the
// case where the element type is a struct, so we might add a further struct field offset.
static CORINFO_FIELD_HANDLE ConstantIndexPseudoField;
static bool IsPseudoField(CORINFO_FIELD_HANDLE hnd)
{
return hnd == FirstElemPseudoField || hnd == ConstantIndexPseudoField;
}
};
class GenTreeUseEdgeIterator;
class GenTreeOperandIterator;
struct Statement;
/*****************************************************************************/
// Forward declarations of the subtypes
#define GTSTRUCT_0(fn, en) struct GenTree##fn;
#define GTSTRUCT_1(fn, en) struct GenTree##fn;
#define GTSTRUCT_2(fn, en, en2) struct GenTree##fn;
#define GTSTRUCT_3(fn, en, en2, en3) struct GenTree##fn;
#define GTSTRUCT_4(fn, en, en2, en3, en4) struct GenTree##fn;
#define GTSTRUCT_N(fn, ...) struct GenTree##fn;
#define GTSTRUCT_2_SPECIAL(fn, en, en2) GTSTRUCT_2(fn, en, en2)
#define GTSTRUCT_3_SPECIAL(fn, en, en2, en3) GTSTRUCT_3(fn, en, en2, en3)
#include "gtstructs.h"
/*****************************************************************************/
// Don't format the GenTreeFlags declaration
// clang-format off
//------------------------------------------------------------------------
// GenTreeFlags: a bitmask of flags for GenTree stored in gtFlags
//
enum GenTreeFlags : unsigned int
{
GTF_EMPTY = 0,
//---------------------------------------------------------------------
// The first set of flags can be used with a large set of nodes, and
// thus they must all have distinct values. That is, one can test any
// expression node for one of these flags.
//---------------------------------------------------------------------
GTF_ASG = 0x00000001, // sub-expression contains an assignment
GTF_CALL = 0x00000002, // sub-expression contains a func. call
GTF_EXCEPT = 0x00000004, // sub-expression might throw an exception
GTF_GLOB_REF = 0x00000008, // sub-expression uses global variable(s)
GTF_ORDER_SIDEEFF = 0x00000010, // sub-expression has a re-ordering side effect
// If you set these flags, make sure that code:gtExtractSideEffList knows how to find the tree,
// otherwise the C# (run csc /o-) code:
// var v = side_eff_operation
// with no use of `v` will drop your tree on the floor.
GTF_PERSISTENT_SIDE_EFFECTS = GTF_ASG | GTF_CALL,
GTF_SIDE_EFFECT = GTF_PERSISTENT_SIDE_EFFECTS | GTF_EXCEPT,
GTF_GLOB_EFFECT = GTF_SIDE_EFFECT | GTF_GLOB_REF,
GTF_ALL_EFFECT = GTF_GLOB_EFFECT | GTF_ORDER_SIDEEFF,
GTF_REVERSE_OPS = 0x00000020, // operand op2 should be evaluated before op1 (normally, op1 is evaluated first and op2 is evaluated second)
GTF_CONTAINED = 0x00000040, // This node is contained (executed as part of its parent)
GTF_SPILLED = 0x00000080, // the value has been spilled
GTF_NOREG_AT_USE = 0x00000100, // tree node is in memory at the point of use
GTF_SET_FLAGS = 0x00000200, // Requires that codegen for this node set the flags. Use gtSetFlags() to check this flag.
GTF_USE_FLAGS = 0x00000400, // Indicates that this node uses the flags bits.
GTF_MAKE_CSE = 0x00000800, // Hoisted expression: try hard to make this into CSE (see optPerformHoistExpr)
GTF_DONT_CSE = 0x00001000, // Don't bother CSE'ing this expr
GTF_COLON_COND = 0x00002000, // This node is conditionally executed (part of ? :)
GTF_NODE_MASK = GTF_COLON_COND,
GTF_BOOLEAN = 0x00004000, // value is known to be 0/1
GTF_UNSIGNED = 0x00008000, // With GT_CAST: the source operand is an unsigned type
// With operators: the specified node is an unsigned operator
GTF_LATE_ARG = 0x00010000, // The specified node is evaluated to a temp in the arg list, and this temp is added to gtCallLateArgs.
GTF_SPILL = 0x00020000, // Needs to be spilled here
// The extra flag GTF_IS_IN_CSE is used to tell the consumer of the side effect flags
// that we are calling in the context of performing a CSE, thus we
// should allow the run-once side effects of running a class constructor.
//
// The only requirement of this flag is that it not overlap any of the
// side-effect flags. The actual bit used is otherwise arbitrary.
GTF_IS_IN_CSE = GTF_BOOLEAN,
GTF_COMMON_MASK = 0x0003FFFF, // mask of all the flags above
GTF_REUSE_REG_VAL = 0x00800000, // This is set by the register allocator on nodes whose value already exists in the
// register assigned to this node, so the code generator does not have to generate
// code to produce the value. It is currently used only on constant nodes.
// It CANNOT be set on var (GT_LCL*) nodes, or on indir (GT_IND or GT_STOREIND) nodes, since
// it is not needed for lclVars and is highly unlikely to be useful for indir nodes.
//---------------------------------------------------------------------
// The following flags can be used only with a small set of nodes, and
// thus their values need not be distinct (other than within the set
// that goes with a particular node/nodes, of course). That is, one can
// only test for one of these flags if the 'gtOper' value is tested as
// well to make sure it's the right operator for the particular flag.
//---------------------------------------------------------------------
// NB: GTF_VAR_* and GTF_REG_* share the same namespace of flags.
// These flags are also used by GT_LCL_FLD, and the last-use (DEATH) flags are also used by GenTreeCopyOrReload.
GTF_VAR_DEF = 0x80000000, // GT_LCL_VAR -- this is a definition
GTF_VAR_USEASG = 0x40000000, // GT_LCL_VAR -- this is a partial definition, a use of the previous definition is implied
// A partial definition usually occurs when a struct field is assigned to (s.f = ...) or
// when a scalar typed variable is assigned to via a narrow store (*((byte*)&i) = ...).
// Last-use bits.
// Note that a node marked GTF_VAR_MULTIREG can only be a pure definition of all the fields, or a pure use of all the fields,
// so we don't need the equivalent of GTF_VAR_USEASG.
GTF_VAR_MULTIREG_DEATH0 = 0x04000000, // GT_LCL_VAR -- The last-use bit for a lclVar (the first register if it is multireg).
GTF_VAR_DEATH = GTF_VAR_MULTIREG_DEATH0,
GTF_VAR_MULTIREG_DEATH1 = 0x08000000, // GT_LCL_VAR -- The last-use bit for the second register of a multireg lclVar.
GTF_VAR_MULTIREG_DEATH2 = 0x10000000, // GT_LCL_VAR -- The last-use bit for the third register of a multireg lclVar.
GTF_VAR_MULTIREG_DEATH3 = 0x20000000, // GT_LCL_VAR -- The last-use bit for the fourth register of a multireg lclVar.
GTF_VAR_DEATH_MASK = GTF_VAR_MULTIREG_DEATH0 | GTF_VAR_MULTIREG_DEATH1 | GTF_VAR_MULTIREG_DEATH2 | GTF_VAR_MULTIREG_DEATH3,
// This is the amount we have to shift, plus the regIndex, to get the last use bit we want.
#define MULTIREG_LAST_USE_SHIFT 26
GTF_VAR_MULTIREG = 0x02000000, // This is a struct or (on 32-bit platforms) long variable that is used or defined
// to/from a multireg source or destination (e.g. a call arg or return, or an op
// that returns its result in multiple registers such as a long multiply).
GTF_LIVENESS_MASK = GTF_VAR_DEF | GTF_VAR_USEASG | GTF_VAR_DEATH_MASK,
GTF_VAR_CAST = 0x01000000, // GT_LCL_VAR -- has been explictly cast (variable node may not be type of local)
GTF_VAR_ITERATOR = 0x00800000, // GT_LCL_VAR -- this is a iterator reference in the loop condition
GTF_VAR_CLONED = 0x00400000, // GT_LCL_VAR -- this node has been cloned or is a clone
GTF_VAR_CONTEXT = 0x00200000, // GT_LCL_VAR -- this node is part of a runtime lookup
GTF_VAR_FOLDED_IND = 0x00100000, // GT_LCL_VAR -- this node was folded from *(typ*)&lclVar expression tree in fgMorphSmpOp()
// where 'typ' is a small type and 'lclVar' corresponds to a normalized-on-store local variable.
// This flag identifies such nodes in order to make sure that fgDoNormalizeOnStore() is called
// on their parents in post-order morph.
// Relevant for inlining optimizations (see fgInlinePrependStatements)
GTF_VAR_ARR_INDEX = 0x00000020, // The variable is part of (the index portion of) an array index expression.
// Shares a value with GTF_REVERSE_OPS, which is meaningless for local var.
// For additional flags for GT_CALL node see GTF_CALL_M_*
GTF_CALL_UNMANAGED = 0x80000000, // GT_CALL -- direct call to unmanaged code
GTF_CALL_INLINE_CANDIDATE = 0x40000000, // GT_CALL -- this call has been marked as an inline candidate
GTF_CALL_VIRT_KIND_MASK = 0x30000000, // GT_CALL -- mask of the below call kinds
GTF_CALL_NONVIRT = 0x00000000, // GT_CALL -- a non virtual call
GTF_CALL_VIRT_STUB = 0x10000000, // GT_CALL -- a stub-dispatch virtual call
GTF_CALL_VIRT_VTABLE = 0x20000000, // GT_CALL -- a vtable-based virtual call
GTF_CALL_NULLCHECK = 0x08000000, // GT_CALL -- must check instance pointer for null
GTF_CALL_POP_ARGS = 0x04000000, // GT_CALL -- caller pop arguments?
GTF_CALL_HOISTABLE = 0x02000000, // GT_CALL -- call is hoistable
GTF_MEMORYBARRIER_LOAD = 0x40000000, // GT_MEMORYBARRIER -- Load barrier
GTF_FLD_VOLATILE = 0x40000000, // GT_FIELD/GT_CLS_VAR -- same as GTF_IND_VOLATILE
GTF_FLD_INITCLASS = 0x20000000, // GT_FIELD/GT_CLS_VAR -- field access requires preceding class/static init helper
GTF_INX_RNGCHK = 0x80000000, // GT_INDEX/GT_INDEX_ADDR -- the array reference should be range-checked.
GTF_INX_STRING_LAYOUT = 0x40000000, // GT_INDEX -- this uses the special string array layout
GTF_INX_NOFAULT = 0x20000000, // GT_INDEX -- the INDEX does not throw an exception (morph to GTF_IND_NONFAULTING)
GTF_IND_TGT_NOT_HEAP = 0x80000000, // GT_IND -- the target is not on the heap
GTF_IND_VOLATILE = 0x40000000, // GT_IND -- the load or store must use volatile sematics (this is a nop on X86)
GTF_IND_NONFAULTING = 0x20000000, // Operations for which OperIsIndir() is true -- An indir that cannot fault.
// Same as GTF_ARRLEN_NONFAULTING.
GTF_IND_TGTANYWHERE = 0x10000000, // GT_IND -- the target could be anywhere
GTF_IND_TLS_REF = 0x08000000, // GT_IND -- the target is accessed via TLS
GTF_IND_ASG_LHS = 0x04000000, // GT_IND -- this GT_IND node is (the effective val) of the LHS of an
// assignment; don't evaluate it independently.
GTF_IND_REQ_ADDR_IN_REG = GTF_IND_ASG_LHS, // GT_IND -- requires its addr operand to be evaluated
// into a register. This flag is useful in cases where it
// is required to generate register indirect addressing mode.
// One such case is virtual stub calls on xarch. This is only
// valid in the backend, where GTF_IND_ASG_LHS is not necessary
// (all such indirections will be lowered to GT_STOREIND).
GTF_IND_UNALIGNED = 0x02000000, // GT_IND -- the load or store is unaligned (we assume worst case
// alignment of 1 byte)
GTF_IND_INVARIANT = 0x01000000, // GT_IND -- the target is invariant (a prejit indirection)
GTF_IND_ARR_INDEX = 0x00800000, // GT_IND -- the indirection represents an (SZ) array index
GTF_IND_NONNULL = 0x00400000, // GT_IND -- the indirection never returns null (zero)
GTF_IND_FLAGS = GTF_IND_VOLATILE | GTF_IND_TGTANYWHERE | GTF_IND_NONFAULTING | GTF_IND_TLS_REF | \
GTF_IND_UNALIGNED | GTF_IND_INVARIANT | GTF_IND_NONNULL | GTF_IND_ARR_INDEX | GTF_IND_TGT_NOT_HEAP,
GTF_CLS_VAR_VOLATILE = 0x40000000, // GT_FIELD/GT_CLS_VAR -- same as GTF_IND_VOLATILE
GTF_CLS_VAR_INITCLASS = 0x20000000, // GT_FIELD/GT_CLS_VAR -- same as GTF_FLD_INITCLASS
GTF_CLS_VAR_ASG_LHS = 0x04000000, // GT_CLS_VAR -- this GT_CLS_VAR node is (the effective val) of the LHS
// of an assignment; don't evaluate it independently.
GTF_ADDRMODE_NO_CSE = 0x80000000, // GT_ADD/GT_MUL/GT_LSH -- Do not CSE this node only, forms complex
// addressing mode
GTF_MUL_64RSLT = 0x40000000, // GT_MUL -- produce 64-bit result
GTF_RELOP_NAN_UN = 0x80000000, // GT_<relop> -- Is branch taken if ops are NaN?
GTF_RELOP_JMP_USED = 0x40000000, // GT_<relop> -- result of compare used for jump or ?:
GTF_RELOP_ZTT = 0x08000000, // GT_<relop> -- Loop test cloned for converting while-loops into do-while
// with explicit "loop test" in the header block.
GTF_RELOP_SJUMP_OPT = 0x04000000, // GT_<relop> -- Swap signed jl/jge with js/jns during emitter, reuses flags
// from previous instruction.
GTF_JCMP_EQ = 0x80000000, // GTF_JCMP_EQ -- Branch on equal rather than not equal
GTF_JCMP_TST = 0x40000000, // GTF_JCMP_TST -- Use bit test instruction rather than compare against zero instruction
GTF_RET_MERGED = 0x80000000, // GT_RETURN -- This is a return generated during epilog merging.
GTF_QMARK_CAST_INSTOF = 0x80000000, // GT_QMARK -- Is this a top (not nested) level qmark created for
// castclass or instanceof?
GTF_BOX_VALUE = 0x80000000, // GT_BOX -- "box" is on a value type
GTF_ICON_HDL_MASK = 0xFF000000, // Bits used by handle types below
GTF_ICON_SCOPE_HDL = 0x01000000, // GT_CNS_INT -- constant is a scope handle
GTF_ICON_CLASS_HDL = 0x02000000, // GT_CNS_INT -- constant is a class handle
GTF_ICON_METHOD_HDL = 0x03000000, // GT_CNS_INT -- constant is a method handle
GTF_ICON_FIELD_HDL = 0x04000000, // GT_CNS_INT -- constant is a field handle
GTF_ICON_STATIC_HDL = 0x05000000, // GT_CNS_INT -- constant is a handle to static data
GTF_ICON_STR_HDL = 0x06000000, // GT_CNS_INT -- constant is a string handle
GTF_ICON_CONST_PTR = 0x07000000, // GT_CNS_INT -- constant is a pointer to immutable data, (e.g. IAT_PPVALUE)
GTF_ICON_GLOBAL_PTR = 0x08000000, // GT_CNS_INT -- constant is a pointer to mutable data (e.g. from the VM state)
GTF_ICON_VARG_HDL = 0x09000000, // GT_CNS_INT -- constant is a var arg cookie handle
GTF_ICON_PINVKI_HDL = 0x0A000000, // GT_CNS_INT -- constant is a pinvoke calli handle
GTF_ICON_TOKEN_HDL = 0x0B000000, // GT_CNS_INT -- constant is a token handle (other than class, method or field)
GTF_ICON_TLS_HDL = 0x0C000000, // GT_CNS_INT -- constant is a TLS ref with offset
GTF_ICON_FTN_ADDR = 0x0D000000, // GT_CNS_INT -- constant is a function address
GTF_ICON_CIDMID_HDL = 0x0E000000, // GT_CNS_INT -- constant is a class ID or a module ID
GTF_ICON_BBC_PTR = 0x0F000000, // GT_CNS_INT -- constant is a basic block count pointer
GTF_ICON_STATIC_BOX_PTR = 0x10000000, // GT_CNS_INT -- constant is an address of the box for a STATIC_IN_HEAP field
GTF_ICON_FIELD_SEQ = 0x11000000, // <--------> -- constant is a FieldSeqNode* (used only as VNHandle)
// GTF_ICON_REUSE_REG_VAL = 0x00800000 // GT_CNS_INT -- GTF_REUSE_REG_VAL, defined above
GTF_ICON_FIELD_OFF = 0x00400000, // GT_CNS_INT -- constant is a field offset
GTF_ICON_SIMD_COUNT = 0x00200000, // GT_CNS_INT -- constant is Vector<T>.Count
GTF_ICON_INITCLASS = 0x00100000, // GT_CNS_INT -- Constant is used to access a static that requires preceding
// class/static init helper. In some cases, the constant is
// the address of the static field itself, and in other cases
// there's an extra layer of indirection and it is the address
// of the cell that the runtime will fill in with the address
// of the static field; in both of those cases, the constant
// is what gets flagged.
GTF_BLK_VOLATILE = GTF_IND_VOLATILE, // GT_ASG, GT_STORE_BLK, GT_STORE_OBJ, GT_STORE_DYNBLK -- is a volatile block operation
GTF_BLK_UNALIGNED = GTF_IND_UNALIGNED, // GT_ASG, GT_STORE_BLK, GT_STORE_OBJ, GT_STORE_DYNBLK -- is an unaligned block operation
GTF_OVERFLOW = 0x10000000, // Supported for: GT_ADD, GT_SUB, GT_MUL and GT_CAST.
// Requires an overflow check. Use gtOverflow(Ex)() to check this flag.
GTF_DIV_BY_CNS_OPT = 0x80000000, // GT_DIV -- Uses the division by constant optimization to compute this division
GTF_CHK_INDEX_INBND = 0x80000000, // GT_BOUNDS_CHECK -- have proved this check is always in-bounds
GTF_ARRLEN_ARR_IDX = 0x80000000, // GT_ARR_LENGTH -- Length which feeds into an array index expression
GTF_ARRLEN_NONFAULTING = 0x20000000, // GT_ARR_LENGTH -- An array length operation that cannot fault. Same as GT_IND_NONFAULTING.
GTF_SIMDASHW_OP = 0x80000000, // GT_HWINTRINSIC -- Indicates that the structHandle should be gotten from gtGetStructHandleForSIMD
// rather than from gtGetStructHandleForHWSIMD.
// Flag used by assertion prop to indicate that a type is a TYP_LONG
#ifdef TARGET_64BIT
GTF_ASSERTION_PROP_LONG = 0x00000001,
#endif // TARGET_64BIT
};
inline constexpr GenTreeFlags operator ~(GenTreeFlags a)
{
return (GenTreeFlags)(~(unsigned int)a);
}
inline constexpr GenTreeFlags operator |(GenTreeFlags a, GenTreeFlags b)
{
return (GenTreeFlags)((unsigned int)a | (unsigned int)b);
}
inline constexpr GenTreeFlags operator &(GenTreeFlags a, GenTreeFlags b)
{
return (GenTreeFlags)((unsigned int)a & (unsigned int)b);
}
inline GenTreeFlags& operator |=(GenTreeFlags& a, GenTreeFlags b)
{
return a = (GenTreeFlags)((unsigned int)a | (unsigned int)b);
}
inline GenTreeFlags& operator &=(GenTreeFlags& a, GenTreeFlags b)
{
return a = (GenTreeFlags)((unsigned int)a & (unsigned int)b);
}
inline GenTreeFlags& operator ^=(GenTreeFlags& a, GenTreeFlags b)
{
return a = (GenTreeFlags)((unsigned int)a ^ (unsigned int)b);
}
// Can any side-effects be observed externally, say by a caller method?
// For assignments, only assignments to global memory can be observed
// externally, whereas simple assignments to local variables can not.
//
// Be careful when using this inside a "try" protected region as the
// order of assignments to local variables would need to be preserved
// wrt side effects if the variables are alive on entry to the
// "catch/finally" region. In such cases, even assignments to locals
// will have to be restricted.
#define GTF_GLOBALLY_VISIBLE_SIDE_EFFECTS(flags) \
(((flags) & (GTF_CALL | GTF_EXCEPT)) || (((flags) & (GTF_ASG | GTF_GLOB_REF)) == (GTF_ASG | GTF_GLOB_REF)))
#if defined(DEBUG)
//------------------------------------------------------------------------
// GenTreeDebugFlags: a bitmask of debug-only flags for GenTree stored in gtDebugFlags
//
enum GenTreeDebugFlags : unsigned int
{
GTF_DEBUG_NONE = 0x00000000, // No debug flags.
GTF_DEBUG_NODE_MORPHED = 0x00000001, // the node has been morphed (in the global morphing phase)
GTF_DEBUG_NODE_SMALL = 0x00000002,
GTF_DEBUG_NODE_LARGE = 0x00000004,
GTF_DEBUG_NODE_CG_PRODUCED = 0x00000008, // genProduceReg has been called on this node
GTF_DEBUG_NODE_CG_CONSUMED = 0x00000010, // genConsumeReg has been called on this node
GTF_DEBUG_NODE_LSRA_ADDED = 0x00000020, // This node was added by LSRA
GTF_DEBUG_NODE_MASK = 0x0000003F, // These flags are all node (rather than operation) properties.
GTF_DEBUG_VAR_CSE_REF = 0x00800000, // GT_LCL_VAR -- This is a CSE LCL_VAR node
};
inline constexpr GenTreeDebugFlags operator ~(GenTreeDebugFlags a)
{
return (GenTreeDebugFlags)(~(unsigned int)a);
}
inline constexpr GenTreeDebugFlags operator |(GenTreeDebugFlags a, GenTreeDebugFlags b)
{
return (GenTreeDebugFlags)((unsigned int)a | (unsigned int)b);
}
inline constexpr GenTreeDebugFlags operator &(GenTreeDebugFlags a, GenTreeDebugFlags b)
{
return (GenTreeDebugFlags)((unsigned int)a & (unsigned int)b);
}
inline GenTreeDebugFlags& operator |=(GenTreeDebugFlags& a, GenTreeDebugFlags b)
{
return a = (GenTreeDebugFlags)((unsigned int)a | (unsigned int)b);
}
inline GenTreeDebugFlags& operator &=(GenTreeDebugFlags& a, GenTreeDebugFlags b)
{
return a = (GenTreeDebugFlags)((unsigned int)a & (unsigned int)b);
}
#endif // defined(DEBUG)
// clang-format on
constexpr bool OpersAreContiguous(genTreeOps firstOper, genTreeOps secondOper)
{
return (firstOper + 1) == secondOper;
}
template <typename... Opers>
constexpr bool OpersAreContiguous(genTreeOps firstOper, genTreeOps secondOper, Opers... otherOpers)
{
return OpersAreContiguous(firstOper, secondOper) && OpersAreContiguous(secondOper, otherOpers...);
}
#ifndef HOST_64BIT
#include <pshpack4.h>
#endif
struct GenTree
{
// We use GT_STRUCT_0 only for the category of simple ops.
#define GTSTRUCT_0(fn, en) \
GenTree##fn* As##fn() \
{ \
assert(OperIsSimple()); \
return reinterpret_cast<GenTree##fn*>(this); \
} \
const GenTree##fn* As##fn() const \
{ \
assert(OperIsSimple()); \
return reinterpret_cast<const GenTree##fn*>(this); \
} \
GenTree##fn& As##fn##Ref() \
{ \
return *As##fn(); \
}
#define GTSTRUCT_N(fn, ...) \
GenTree##fn* As##fn() \
{ \
assert(OperIs(__VA_ARGS__)); \
return reinterpret_cast<GenTree##fn*>(this); \
} \
const GenTree##fn* As##fn() const \
{ \
assert(OperIs(__VA_ARGS__)); \
return reinterpret_cast<const GenTree##fn*>(this); \
} \
GenTree##fn& As##fn##Ref() \
{ \
return *As##fn(); \
}
#define GTSTRUCT_1(fn, en) GTSTRUCT_N(fn, en)
#define GTSTRUCT_2(fn, en, en2) GTSTRUCT_N(fn, en, en2)
#define GTSTRUCT_3(fn, en, en2, en3) GTSTRUCT_N(fn, en, en2, en3)
#define GTSTRUCT_4(fn, en, en2, en3, en4) GTSTRUCT_N(fn, en, en2, en3, en4)
#define GTSTRUCT_2_SPECIAL(fn, en, en2) GTSTRUCT_2(fn, en, en2)
#define GTSTRUCT_3_SPECIAL(fn, en, en2, en3) GTSTRUCT_3(fn, en, en2, en3)
#include "gtstructs.h"
genTreeOps gtOper; // enum subtype BYTE
var_types gtType; // enum subtype BYTE
genTreeOps OperGet() const
{
return gtOper;
}
var_types TypeGet() const
{
return gtType;
}
#ifdef DEBUG
genTreeOps gtOperSave; // Only used to save gtOper when we destroy a node, to aid debugging.
#endif
#define NO_CSE (0)
#define IS_CSE_INDEX(x) ((x) != 0)
#define IS_CSE_USE(x) ((x) > 0)
#define IS_CSE_DEF(x) ((x) < 0)
#define GET_CSE_INDEX(x) (((x) > 0) ? x : -(x))
#define TO_CSE_DEF(x) (-(x))
signed char gtCSEnum; // 0 or the CSE index (negated if def)
// valid only for CSE expressions
unsigned char gtLIRFlags; // Used for nodes that are in LIR. See LIR::Flags in lir.h for the various flags.
AssertionInfo gtAssertionInfo;
bool GeneratesAssertion() const
{
return gtAssertionInfo.HasAssertion();
}
void ClearAssertion()
{
gtAssertionInfo.Clear();
}
AssertionInfo GetAssertionInfo() const
{
return gtAssertionInfo;
}
void SetAssertionInfo(AssertionInfo info)
{
gtAssertionInfo = info;
}
//
// Cost metrics on the node. Don't allow direct access to the variable for setting.
//
public:
#ifdef DEBUG
// You are not allowed to read the cost values before they have been set in gtSetEvalOrder().
// Keep track of whether the costs have been initialized, and assert if they are read before being initialized.
// Obviously, this information does need to be initialized when a node is created.
// This is public so the dumpers can see it.
bool gtCostsInitialized;
#endif // DEBUG
#define MAX_COST UCHAR_MAX
#define IND_COST_EX 3 // execution cost for an indirection
unsigned char GetCostEx() const
{
assert(gtCostsInitialized);
return _gtCostEx;
}
unsigned char GetCostSz() const
{
assert(gtCostsInitialized);
return _gtCostSz;
}
// Set the costs. They are always both set at the same time.
// Don't use the "put" property: force calling this function, to make it more obvious in the few places
// that set the values.
// Note that costs are only set in gtSetEvalOrder() and its callees.
void SetCosts(unsigned costEx, unsigned costSz)
{
assert(costEx != (unsigned)-1); // looks bogus
assert(costSz != (unsigned)-1); // looks bogus
INDEBUG(gtCostsInitialized = true;)
_gtCostEx = (costEx > MAX_COST) ? MAX_COST : (unsigned char)costEx;
_gtCostSz = (costSz > MAX_COST) ? MAX_COST : (unsigned char)costSz;
}
// Opimized copy function, to avoid the SetCosts() function comparisons, and make it more clear that a node copy is
// happening.
void CopyCosts(const GenTree* const tree)
{
// If the 'tree' costs aren't initialized, we'll hit an assert below.
INDEBUG(gtCostsInitialized = tree->gtCostsInitialized;)
_gtCostEx = tree->GetCostEx();
_gtCostSz = tree->GetCostSz();
}
// Same as CopyCosts, but avoids asserts if the costs we are copying have not been initialized.
// This is because the importer, for example, clones nodes, before these costs have been initialized.
// Note that we directly access the 'tree' costs, not going through the accessor functions (either
// directly or through the properties).
void CopyRawCosts(const GenTree* const tree)
{
INDEBUG(gtCostsInitialized = tree->gtCostsInitialized;)
_gtCostEx = tree->_gtCostEx;
_gtCostSz = tree->_gtCostSz;
}
private:
unsigned char _gtCostEx; // estimate of expression execution cost
unsigned char _gtCostSz; // estimate of expression code size cost
//
// Register or register pair number of the node.
//
CLANG_FORMAT_COMMENT_ANCHOR;
#ifdef DEBUG
public:
enum genRegTag
{
GT_REGTAG_NONE, // Nothing has been assigned to _gtRegNum
GT_REGTAG_REG // _gtRegNum has been assigned
};
genRegTag GetRegTag() const
{
assert(gtRegTag == GT_REGTAG_NONE || gtRegTag == GT_REGTAG_REG);
return gtRegTag;
}
private:
genRegTag gtRegTag; // What is in _gtRegNum?
#endif // DEBUG
private:
// This stores the register assigned to the node. If a register is not assigned, _gtRegNum is set to REG_NA.
regNumberSmall _gtRegNum;
// Count of operands. Used *only* by GenTreeMultiOp, exists solely due to padding constraints.
friend struct GenTreeMultiOp;
uint8_t m_operandCount;
public:
// The register number is stored in a small format (8 bits), but the getters return and the setters take
// a full-size (unsigned) format, to localize the casts here.
CLANG_FORMAT_COMMENT_ANCHOR;
#ifdef DEBUG
bool canBeContained() const;
#endif
// for codegen purposes, is this node a subnode of its parent
bool isContained() const;
bool isContainedIndir() const;
bool isIndirAddrMode();
// This returns true only for GT_IND and GT_STOREIND, and is used in contexts where a "true"
// indirection is expected (i.e. either a load to or a store from a single register).
// OperIsIndir() returns true also for indirection nodes such as GT_BLK, etc. as well as GT_NULLCHECK.
bool isIndir() const;
bool isContainedIntOrIImmed() const
{
return isContained() && IsCnsIntOrI() && !isUsedFromSpillTemp();
}
bool isContainedFltOrDblImmed() const
{
return isContained() && (OperGet() == GT_CNS_DBL);
}
bool isLclField() const
{
return OperGet() == GT_LCL_FLD || OperGet() == GT_STORE_LCL_FLD;
}
bool isUsedFromSpillTemp() const;
// Indicates whether it is a memory op.
// Right now it includes Indir and LclField ops.
bool isMemoryOp() const
{
return isIndir() || isLclField();
}
bool isUsedFromMemory() const
{
return ((isContained() && (isMemoryOp() || (OperGet() == GT_LCL_VAR) || (OperGet() == GT_CNS_DBL))) ||
isUsedFromSpillTemp());
}
bool isLclVarUsedFromMemory() const
{
return (OperGet() == GT_LCL_VAR) && (isContained() || isUsedFromSpillTemp());
}
bool isLclFldUsedFromMemory() const
{
return isLclField() && (isContained() || isUsedFromSpillTemp());
}
bool isUsedFromReg() const
{
return !isContained() && !isUsedFromSpillTemp();
}
regNumber GetRegNum() const
{
assert((gtRegTag == GT_REGTAG_REG) || (gtRegTag == GT_REGTAG_NONE)); // TODO-Cleanup: get rid of the NONE case,
// and fix everyplace that reads undefined
// values
regNumber reg = (regNumber)_gtRegNum;
assert((gtRegTag == GT_REGTAG_NONE) || // TODO-Cleanup: get rid of the NONE case, and fix everyplace that reads
// undefined values
(reg >= REG_FIRST && reg <= REG_COUNT));
return reg;
}
void SetRegNum(regNumber reg)
{
assert(reg >= REG_FIRST && reg <= REG_COUNT);
_gtRegNum = (regNumberSmall)reg;
INDEBUG(gtRegTag = GT_REGTAG_REG;)
assert(_gtRegNum == reg);
}
void ClearRegNum()
{
_gtRegNum = REG_NA;
INDEBUG(gtRegTag = GT_REGTAG_NONE;)
}
// Copy the _gtRegNum/gtRegTag fields
void CopyReg(GenTree* from);
bool gtHasReg(Compiler* comp) const;
int GetRegisterDstCount(Compiler* compiler) const;
regMaskTP gtGetRegMask() const;
regMaskTP gtGetContainedRegMask();
GenTreeFlags gtFlags;
#if defined(DEBUG)
GenTreeDebugFlags gtDebugFlags;
#endif // defined(DEBUG)
ValueNumPair gtVNPair;
regMaskSmall gtRsvdRegs; // set of fixed trashed registers
unsigned AvailableTempRegCount(regMaskTP mask = (regMaskTP)-1) const;
regNumber GetSingleTempReg(regMaskTP mask = (regMaskTP)-1);
regNumber ExtractTempReg(regMaskTP mask = (regMaskTP)-1);
void SetVNsFromNode(GenTree* tree)
{
gtVNPair = tree->gtVNPair;
}
ValueNum GetVN(ValueNumKind vnk) const
{
if (vnk == VNK_Liberal)
{
return gtVNPair.GetLiberal();
}
else
{
assert(vnk == VNK_Conservative);
return gtVNPair.GetConservative();
}
}
void SetVN(ValueNumKind vnk, ValueNum vn)
{
if (vnk == VNK_Liberal)
{
return gtVNPair.SetLiberal(vn);
}
else
{
assert(vnk == VNK_Conservative);
return gtVNPair.SetConservative(vn);
}
}
void SetVNs(ValueNumPair vnp)
{
gtVNPair = vnp;
}
void ClearVN()
{
gtVNPair = ValueNumPair(); // Initializes both elements to "NoVN".
}
GenTree* gtNext;
GenTree* gtPrev;
#ifdef DEBUG
unsigned gtTreeID;
unsigned gtSeqNum; // liveness traversal order within the current statement
int gtUseNum; // use-ordered traversal within the function
#endif
static const unsigned char gtOperKindTable[];
static unsigned OperKind(unsigned gtOper)
{
assert(gtOper < GT_COUNT);
return gtOperKindTable[gtOper];
}
unsigned OperKind() const
{
assert(gtOper < GT_COUNT);
return gtOperKindTable[gtOper];
}
static bool IsExOp(unsigned opKind)
{
return (opKind & GTK_EXOP) != 0;
}
bool IsValue() const
{
if ((OperKind(gtOper) & GTK_NOVALUE) != 0)
{
return false;
}
if (gtType == TYP_VOID)
{
// These are the only operators which can produce either VOID or non-VOID results.
assert(OperIs(GT_NOP, GT_CALL, GT_COMMA) || OperIsCompare() || OperIsLong() || OperIsSIMD() ||
OperIsHWIntrinsic());
return false;
}
return true;
}
// LIR flags
// These helper methods, along with the flag values they manipulate, are defined in lir.h
//
// UnusedValue indicates that, although this node produces a value, it is unused.
inline void SetUnusedValue();
inline void ClearUnusedValue();
inline bool IsUnusedValue() const;
// RegOptional indicates that codegen can still generate code even if it isn't allocated a register.
inline bool IsRegOptional() const;
inline void SetRegOptional();
inline void ClearRegOptional();
#ifdef DEBUG
void dumpLIRFlags();
#endif
bool TypeIs(var_types type) const
{
return gtType == type;
}
template <typename... T>
bool TypeIs(var_types type, T... rest) const
{
return TypeIs(type) || TypeIs(rest...);
}
static bool StaticOperIs(genTreeOps operCompare, genTreeOps oper)
{
return operCompare == oper;
}
template <typename... T>
static bool StaticOperIs(genTreeOps operCompare, genTreeOps oper, T... rest)
{
return StaticOperIs(operCompare, oper) || StaticOperIs(operCompare, rest...);
}
bool OperIs(genTreeOps oper) const
{
return OperGet() == oper;
}
template <typename... T>
bool OperIs(genTreeOps oper, T... rest) const
{
return OperIs(oper) || OperIs(rest...);
}
static bool OperIsConst(genTreeOps gtOper)
{
static_assert_no_msg(OpersAreContiguous(GT_CNS_INT, GT_CNS_LNG, GT_CNS_DBL, GT_CNS_STR));
return (GT_CNS_INT <= gtOper) && (gtOper <= GT_CNS_STR);
}
bool OperIsConst() const
{
return OperIsConst(gtOper);
}
static bool OperIsLeaf(genTreeOps gtOper)
{
return (OperKind(gtOper) & GTK_LEAF) != 0;
}
bool OperIsLeaf() const
{
return (OperKind(gtOper) & GTK_LEAF) != 0;
}
static bool OperIsLocal(genTreeOps gtOper)
{
static_assert_no_msg(
OpersAreContiguous(GT_PHI_ARG, GT_LCL_VAR, GT_LCL_FLD, GT_STORE_LCL_VAR, GT_STORE_LCL_FLD));
return (GT_PHI_ARG <= gtOper) && (gtOper <= GT_STORE_LCL_FLD);
}
static bool OperIsLocalAddr(genTreeOps gtOper)
{
return (gtOper == GT_LCL_VAR_ADDR || gtOper == GT_LCL_FLD_ADDR);
}
static bool OperIsLocalField(genTreeOps gtOper)
{
return (gtOper == GT_LCL_FLD || gtOper == GT_LCL_FLD_ADDR || gtOper == GT_STORE_LCL_FLD);
}
inline bool OperIsLocalField() const
{
return OperIsLocalField(gtOper);
}
static bool OperIsScalarLocal(genTreeOps gtOper)
{
return (gtOper == GT_LCL_VAR || gtOper == GT_STORE_LCL_VAR);
}
static bool OperIsNonPhiLocal(genTreeOps gtOper)
{
return OperIsLocal(gtOper) && (gtOper != GT_PHI_ARG);
}
static bool OperIsLocalRead(genTreeOps gtOper)
{
return (OperIsLocal(gtOper) && !OperIsLocalStore(gtOper));
}
static bool OperIsLocalStore(genTreeOps gtOper)
{
return (gtOper == GT_STORE_LCL_VAR || gtOper == GT_STORE_LCL_FLD);
}
static bool OperIsAddrMode(genTreeOps gtOper)
{
return (gtOper == GT_LEA);
}
static bool OperIsInitVal(genTreeOps gtOper)
{
return (gtOper == GT_INIT_VAL);
}
bool OperIsInitVal() const
{
return OperIsInitVal(OperGet());
}
bool IsConstInitVal() const
{
return (gtOper == GT_CNS_INT) || (OperIsInitVal() && (gtGetOp1()->gtOper == GT_CNS_INT));
}
bool OperIsBlkOp();
bool OperIsCopyBlkOp();
bool OperIsInitBlkOp();
static bool OperIsBlk(genTreeOps gtOper)
{
return (gtOper == GT_BLK) || (gtOper == GT_OBJ) || OperIsStoreBlk(gtOper);
}
bool OperIsBlk() const
{
return OperIsBlk(OperGet());
}
static bool OperIsStoreBlk(genTreeOps gtOper)
{
return StaticOperIs(gtOper, GT_STORE_BLK, GT_STORE_OBJ, GT_STORE_DYN_BLK);
}
bool OperIsStoreBlk() const
{
return OperIsStoreBlk(OperGet());
}
bool OperIsPutArgSplit() const
{
#if FEATURE_ARG_SPLIT
assert((gtOper != GT_PUTARG_SPLIT) || compFeatureArgSplit());
return gtOper == GT_PUTARG_SPLIT;
#else // !FEATURE_ARG_SPLIT
return false;
#endif
}
bool OperIsPutArgStk() const
{
return gtOper == GT_PUTARG_STK;
}
bool OperIsPutArgStkOrSplit() const
{
return OperIsPutArgStk() || OperIsPutArgSplit();
}
bool OperIsPutArgReg() const
{
return gtOper == GT_PUTARG_REG;
}
bool OperIsPutArg() const
{
return OperIsPutArgStk() || OperIsPutArgReg() || OperIsPutArgSplit();
}
bool OperIsFieldList() const
{
return OperIs(GT_FIELD_LIST);
}
bool OperIsMultiRegOp() const
{
#if !defined(TARGET_64BIT)
if (OperIs(GT_MUL_LONG))
{
return true;
}
#if defined(TARGET_ARM)
if (OperIs(GT_PUTARG_REG, GT_BITCAST))
{
return true;
}
#endif // TARGET_ARM
#endif // TARGET_64BIT
return false;
}
bool OperIsAddrMode() const
{
return OperIsAddrMode(OperGet());
}
bool OperIsLocal() const
{
return OperIsLocal(OperGet());
}
bool OperIsLocalAddr() const
{
return OperIsLocalAddr(OperGet());
}
bool OperIsScalarLocal() const
{
return OperIsScalarLocal(OperGet());
}
bool OperIsNonPhiLocal() const
{
return OperIsNonPhiLocal(OperGet());
}
bool OperIsLocalStore() const
{
return OperIsLocalStore(OperGet());
}
bool OperIsLocalRead() const
{
return OperIsLocalRead(OperGet());
}
static bool OperIsCompare(genTreeOps gtOper)
{
static_assert_no_msg(OpersAreContiguous(GT_EQ, GT_NE, GT_LT, GT_LE, GT_GE, GT_GT, GT_TEST_EQ, GT_TEST_NE));
return (GT_EQ <= gtOper) && (gtOper <= GT_TEST_NE);
}
bool OperIsCompare() const
{
return OperIsCompare(OperGet());
}
static bool OperIsShift(genTreeOps gtOper)
{
return (gtOper == GT_LSH) || (gtOper == GT_RSH) || (gtOper == GT_RSZ);
}
bool OperIsShift() const
{
return OperIsShift(OperGet());
}
static bool OperIsShiftLong(genTreeOps gtOper)
{
#ifdef TARGET_64BIT
return false;
#else
return (gtOper == GT_LSH_HI) || (gtOper == GT_RSH_LO);
#endif
}
bool OperIsShiftLong() const
{
return OperIsShiftLong(OperGet());
}
static bool OperIsRotate(genTreeOps gtOper)
{
return (gtOper == GT_ROL) || (gtOper == GT_ROR);
}
bool OperIsRotate() const
{
return OperIsRotate(OperGet());
}
static bool OperIsShiftOrRotate(genTreeOps gtOper)
{
return OperIsShift(gtOper) || OperIsRotate(gtOper) || OperIsShiftLong(gtOper);
}
bool OperIsShiftOrRotate() const
{
return OperIsShiftOrRotate(OperGet());
}
static bool OperIsMul(genTreeOps gtOper)
{
return (gtOper == GT_MUL) || (gtOper == GT_MULHI)
#if !defined(TARGET_64BIT) || defined(TARGET_ARM64)
|| (gtOper == GT_MUL_LONG)
#endif
;
}
bool OperIsMul() const
{
return OperIsMul(gtOper);
}
bool OperIsArithmetic() const
{
genTreeOps op = OperGet();
return op == GT_ADD || op == GT_SUB || op == GT_MUL || op == GT_DIV || op == GT_MOD
|| op == GT_UDIV || op == GT_UMOD
|| op == GT_OR || op == GT_XOR || op == GT_AND
|| OperIsShiftOrRotate(op);
}
#ifdef TARGET_XARCH
static bool OperIsRMWMemOp(genTreeOps gtOper)
{
// Return if binary op is one of the supported operations for RMW of memory.
return (gtOper == GT_ADD || gtOper == GT_SUB || gtOper == GT_AND || gtOper == GT_OR || gtOper == GT_XOR ||
gtOper == GT_NOT || gtOper == GT_NEG || OperIsShiftOrRotate(gtOper));
}
bool OperIsRMWMemOp() const
{
// Return if binary op is one of the supported operations for RMW of memory.
return OperIsRMWMemOp(gtOper);
}
#endif // TARGET_XARCH
static bool OperIsUnary(genTreeOps gtOper)
{
return (OperKind(gtOper) & GTK_UNOP) != 0;
}
bool OperIsUnary() const
{
return OperIsUnary(gtOper);
}
static bool OperIsBinary(genTreeOps gtOper)
{
return (OperKind(gtOper) & GTK_BINOP) != 0;
}
bool OperIsBinary() const
{
return OperIsBinary(gtOper);
}
static bool OperIsSimple(genTreeOps gtOper)
{
return (OperKind(gtOper) & GTK_SMPOP) != 0;
}
static bool OperIsSpecial(genTreeOps gtOper)
{
return ((OperKind(gtOper) & GTK_KINDMASK) == GTK_SPECIAL);
}
bool OperIsSimple() const
{
return OperIsSimple(gtOper);
}
#ifdef FEATURE_SIMD
bool isCommutativeSIMDIntrinsic();
#else // !
bool isCommutativeSIMDIntrinsic()
{
return false;
}
#endif // FEATURE_SIMD
#ifdef FEATURE_HW_INTRINSICS
bool isCommutativeHWIntrinsic() const;
bool isContainableHWIntrinsic() const;
bool isRMWHWIntrinsic(Compiler* comp);
#else
bool isCommutativeHWIntrinsic() const
{
return false;
}
bool isContainableHWIntrinsic() const
{
return false;
}
bool isRMWHWIntrinsic(Compiler* comp)
{
return false;
}
#endif // FEATURE_HW_INTRINSICS
static bool OperIsCommutative(genTreeOps gtOper)
{
return (OperKind(gtOper) & GTK_COMMUTE) != 0;
}
bool OperIsCommutative()
{
return OperIsCommutative(gtOper) || (OperIsSIMD(gtOper) && isCommutativeSIMDIntrinsic()) ||
(OperIsHWIntrinsic(gtOper) && isCommutativeHWIntrinsic());
}
static bool OperMayOverflow(genTreeOps gtOper)
{
return ((gtOper == GT_ADD) || (gtOper == GT_SUB) || (gtOper == GT_MUL) || (gtOper == GT_CAST)
#if !defined(TARGET_64BIT)
|| (gtOper == GT_ADD_HI) || (gtOper == GT_SUB_HI)
#endif
);
}
bool OperMayOverflow() const
{
return OperMayOverflow(gtOper);
}
// This returns true only for GT_IND and GT_STOREIND, and is used in contexts where a "true"
// indirection is expected (i.e. either a load to or a store from a single register).
// OperIsIndir() returns true also for indirection nodes such as GT_BLK, etc. as well as GT_NULLCHECK.
static bool OperIsIndir(genTreeOps gtOper)
{
return gtOper == GT_IND || gtOper == GT_STOREIND || gtOper == GT_NULLCHECK || OperIsBlk(gtOper);
}
static bool OperIsIndirOrArrLength(genTreeOps gtOper)
{
return OperIsIndir(gtOper) || (gtOper == GT_ARR_LENGTH);
}
bool OperIsIndir() const
{
return OperIsIndir(gtOper);
}
bool OperIsIndirOrArrLength() const
{
return OperIsIndirOrArrLength(gtOper);
}
bool OperIsImplicitIndir() const;
static bool OperIsAtomicOp(genTreeOps gtOper)
{
switch (gtOper)
{
case GT_XADD:
case GT_XORR:
case GT_XAND:
case GT_XCHG:
case GT_LOCKADD:
case GT_CMPXCHG:
return true;
default:
return false;
}
}
bool OperIsAtomicOp() const
{
return OperIsAtomicOp(gtOper);
}
bool OperIsStore() const
{
return OperIsStore(gtOper);
}
static bool OperIsStore(genTreeOps gtOper)
{
return (gtOper == GT_STOREIND || gtOper == GT_STORE_LCL_VAR || gtOper == GT_STORE_LCL_FLD ||
OperIsStoreBlk(gtOper) || OperIsAtomicOp(gtOper));
}
static bool OperIsMultiOp(genTreeOps gtOper)
{
return OperIsSIMD(gtOper) || OperIsHWIntrinsic(gtOper);
}
bool OperIsMultiOp() const
{
return OperIsMultiOp(OperGet());
}
bool OperIsSsaDef() const
{
return OperIs(GT_ASG, GT_CALL);
}
// This is here for cleaner FEATURE_SIMD #ifdefs.
static bool OperIsSIMD(genTreeOps gtOper)
{
#ifdef FEATURE_SIMD
return gtOper == GT_SIMD;
#else // !FEATURE_SIMD
return false;
#endif // !FEATURE_SIMD
}
bool OperIsSIMD() const
{
return OperIsSIMD(gtOper);
}
static bool OperIsHWIntrinsic(genTreeOps gtOper)
{
#ifdef FEATURE_HW_INTRINSICS
return gtOper == GT_HWINTRINSIC;
#else
return false;
#endif // FEATURE_HW_INTRINSICS
}
bool OperIsHWIntrinsic() const
{
return OperIsHWIntrinsic(gtOper);
}
bool OperIsSimdOrHWintrinsic() const
{
return OperIsSIMD() || OperIsHWIntrinsic();
}
// This is here for cleaner GT_LONG #ifdefs.
static bool OperIsLong(genTreeOps gtOper)
{
#if defined(TARGET_64BIT)
return false;
#else
return gtOper == GT_LONG;
#endif
}
bool OperIsLong() const
{
return OperIsLong(gtOper);
}
bool OperIsConditionalJump() const
{
return (gtOper == GT_JTRUE) || (gtOper == GT_JCMP) || (gtOper == GT_JCC);
}
#ifdef DEBUG
static const GenTreeDebugOperKind gtDebugOperKindTable[];
static GenTreeDebugOperKind DebugOperKind(genTreeOps oper)
{
assert(oper < GT_COUNT);
return gtDebugOperKindTable[oper];
}
GenTreeDebugOperKind DebugOperKind() const
{
return DebugOperKind(OperGet());
}
bool NullOp1Legal() const
{
assert(OperIsSimple());
switch (gtOper)
{
case GT_LEA:
case GT_RETFILT:
case GT_NOP:
case GT_FIELD:
return true;
case GT_RETURN:
return gtType == TYP_VOID;
default:
return false;
}
}
bool NullOp2Legal() const
{
assert(OperIsSimple(gtOper) || OperIsBlk(gtOper));
if (!OperIsBinary(gtOper))
{
return true;
}
switch (gtOper)
{
case GT_INTRINSIC:
case GT_LEA:
#if defined(TARGET_ARM)
case GT_PUTARG_REG:
#endif // defined(TARGET_ARM)
return true;
default:
return false;
}
}
bool OperIsLIR() const
{
if (OperIs(GT_NOP))
{
// NOPs may only be present in LIR if they do not produce a value.
return IsNothingNode();
}
return (DebugOperKind() & DBK_NOTLIR) == 0;
}
bool OperSupportsReverseOpEvalOrder(Compiler* comp) const;
static bool RequiresNonNullOp2(genTreeOps oper);
bool IsValidCallArgument();
#endif // DEBUG
inline bool IsFPZero() const;
inline bool IsIntegralConst(ssize_t constVal) const;
inline bool IsIntegralConstVector(ssize_t constVal) const;
inline bool IsSIMDZero() const;
inline bool IsFloatPositiveZero() const;
inline bool IsVectorZero() const;
inline bool IsBoxedValue();
inline GenTree* gtGetOp1() const;
// Directly return op2. Asserts the node is binary. Might return nullptr if the binary node allows
// a nullptr op2, such as GT_LEA. This is more efficient than gtGetOp2IfPresent() if you know what
// node type you have.
inline GenTree* gtGetOp2() const;
// The returned pointer might be nullptr if the node is not binary, or if non-null op2 is not required.
inline GenTree* gtGetOp2IfPresent() const;
bool TryGetUse(GenTree* operand, GenTree*** pUse);
bool TryGetUse(GenTree* operand)
{
GenTree** unusedUse = nullptr;
return TryGetUse(operand, &unusedUse);
}
private:
bool TryGetUseBinOp(GenTree* operand, GenTree*** pUse);
public:
GenTree* gtGetParent(GenTree*** pUse);
void ReplaceOperand(GenTree** useEdge, GenTree* replacement);
inline GenTree* gtEffectiveVal(bool commaOnly = false);
inline GenTree* gtCommaAssignVal();
// Tunnel through any GT_RET_EXPRs
GenTree* gtRetExprVal(BasicBlockFlags* pbbFlags = nullptr);
inline GenTree* gtSkipPutArgType();
// Return the child of this node if it is a GT_RELOAD or GT_COPY; otherwise simply return the node itself
inline GenTree* gtSkipReloadOrCopy();
// Returns true if it is a call node returning its value in more than one register
inline bool IsMultiRegCall() const;
// Returns true if it is a struct lclVar node residing in multiple registers.
inline bool IsMultiRegLclVar() const;
// Returns true if it is a node returning its value in more than one register
bool IsMultiRegNode() const;
// Returns the number of registers defined by a multireg node.
unsigned GetMultiRegCount(Compiler* comp) const;
// Returns the regIndex'th register defined by a possibly-multireg node.
regNumber GetRegByIndex(int regIndex) const;
// Returns the type of the regIndex'th register defined by a multi-reg node.
var_types GetRegTypeByIndex(int regIndex) const;
// Returns the GTF flag equivalent for the regIndex'th register of a multi-reg node.
GenTreeFlags GetRegSpillFlagByIdx(int regIndex) const;
// Last-use information for either GenTreeLclVar or GenTreeCopyOrReload nodes.
private:
GenTreeFlags GetLastUseBit(int regIndex) const;
public:
bool IsLastUse(int regIndex) const;
bool HasLastUse() const;
void SetLastUse(int regIndex);
void ClearLastUse(int regIndex);
// Returns true if it is a GT_COPY or GT_RELOAD node
inline bool IsCopyOrReload() const;
// Returns true if it is a GT_COPY or GT_RELOAD of a multi-reg call node
inline bool IsCopyOrReloadOfMultiRegCall() const;
bool OperRequiresAsgFlag();
bool OperRequiresCallFlag(Compiler* comp);
bool OperMayThrow(Compiler* comp);
unsigned GetScaleIndexMul();
unsigned GetScaleIndexShf();
unsigned GetScaledIndex();
public:
static unsigned char s_gtNodeSizes[];
#if NODEBASH_STATS || MEASURE_NODE_SIZE || COUNT_AST_OPERS
static unsigned char s_gtTrueSizes[];
#endif
#if COUNT_AST_OPERS
static unsigned s_gtNodeCounts[];
#endif
static void InitNodeSize();
size_t GetNodeSize() const;
bool IsNodeProperlySized() const;
void ReplaceWith(GenTree* src, Compiler* comp);
static genTreeOps ReverseRelop(genTreeOps relop);
static genTreeOps SwapRelop(genTreeOps relop);
//---------------------------------------------------------------------
static bool Compare(GenTree* op1, GenTree* op2, bool swapOK = false);
//---------------------------------------------------------------------
#if defined(DEBUG) || NODEBASH_STATS || MEASURE_NODE_SIZE || COUNT_AST_OPERS || DUMP_FLOWGRAPHS
static const char* OpName(genTreeOps op);
#endif
#if MEASURE_NODE_SIZE
static const char* OpStructName(genTreeOps op);
#endif
//---------------------------------------------------------------------
bool IsNothingNode() const;
void gtBashToNOP();
// Value number update action enumeration
enum ValueNumberUpdate
{
CLEAR_VN, // Clear value number
PRESERVE_VN // Preserve value number
};
void SetOper(genTreeOps oper, ValueNumberUpdate vnUpdate = CLEAR_VN); // set gtOper
void SetOperResetFlags(genTreeOps oper); // set gtOper and reset flags
// set gtOper and only keep GTF_COMMON_MASK flags
void ChangeOper(genTreeOps oper, ValueNumberUpdate vnUpdate = CLEAR_VN);
void ChangeOperUnchecked(genTreeOps oper);
void SetOperRaw(genTreeOps oper);
void ChangeType(var_types newType)
{
var_types oldType = gtType;
gtType = newType;
GenTree* node = this;
while (node->gtOper == GT_COMMA)
{
node = node->gtGetOp2();
if (node->gtType != newType)
{
assert(node->gtType == oldType);
node->gtType = newType;
}
}
}
template <typename T>
void BashToConst(T value, var_types type = TYP_UNDEF);
void BashToZeroConst(var_types type);
#if NODEBASH_STATS
static void RecordOperBashing(genTreeOps operOld, genTreeOps operNew);
static void ReportOperBashing(FILE* fp);
#else
static void RecordOperBashing(genTreeOps operOld, genTreeOps operNew)
{ /* do nothing */
}
static void ReportOperBashing(FILE* fp)
{ /* do nothing */
}
#endif
bool IsLocal() const
{
return OperIsLocal(OperGet());
}
// Returns "true" iff 'this' is a GT_LCL_FLD or GT_STORE_LCL_FLD on which the type
// is not the same size as the type of the GT_LCL_VAR.
bool IsPartialLclFld(Compiler* comp);
// Returns "true" iff "this" defines a local variable. Requires "comp" to be the
// current compilation. If returns "true", sets "*pLclVarTree" to the
// tree for the local that is defined, and, if "pIsEntire" is non-null, sets "*pIsEntire" to
// true or false, depending on whether the assignment writes to the entirety of the local
// variable, or just a portion of it.
bool DefinesLocal(Compiler* comp, GenTreeLclVarCommon** pLclVarTree, bool* pIsEntire = nullptr);
bool IsLocalAddrExpr(Compiler* comp,
GenTreeLclVarCommon** pLclVarTree,
FieldSeqNode** pFldSeq,
ssize_t* pOffset = nullptr);
// Simpler variant of the above which just returns the local node if this is an expression that
// yields an address into a local
GenTreeLclVarCommon* IsLocalAddrExpr();
// Determine if this tree represents the value of an entire implicit byref parameter,
// and if so return the tree for the parameter.
GenTreeLclVar* IsImplicitByrefParameterValue(Compiler* compiler);
// Determine if this is a LclVarCommon node and return some additional info about it in the
// two out parameters.
bool IsLocalExpr(Compiler* comp, GenTreeLclVarCommon** pLclVarTree, FieldSeqNode** pFldSeq);
// Determine whether this is an assignment tree of the form X = X (op) Y,
// where Y is an arbitrary tree, and X is a lclVar.
unsigned IsLclVarUpdateTree(GenTree** otherTree, genTreeOps* updateOper);
bool IsFieldAddr(Compiler* comp, GenTree** pBaseAddr, FieldSeqNode** pFldSeq);
// Requires "this" to be the address of an array (the child of a GT_IND labeled with GTF_IND_ARR_INDEX).
// Sets "pArr" to the node representing the array (either an array object pointer, or perhaps a byref to the some
// element).
// Sets "*pArrayType" to the class handle for the array type.
// Sets "*inxVN" to the value number inferred for the array index.
// Sets "*pFldSeq" to the sequence, if any, of struct fields used to index into the array element.
void ParseArrayAddress(
Compiler* comp, struct ArrayInfo* arrayInfo, GenTree** pArr, ValueNum* pInxVN, FieldSeqNode** pFldSeq);
// Helper method for the above.
void ParseArrayAddressWork(Compiler* comp,
target_ssize_t inputMul,
GenTree** pArr,
ValueNum* pInxVN,
target_ssize_t* pOffset,
FieldSeqNode** pFldSeq);
// Requires "this" to be a GT_IND. Requires the outermost caller to set "*pFldSeq" to nullptr.
// Returns true if it is an array index expression, or access to a (sequence of) struct field(s)
// within a struct array element. If it returns true, sets *arrayInfo to the array information, and sets *pFldSeq
// to the sequence of struct field accesses.
bool ParseArrayElemForm(Compiler* comp, ArrayInfo* arrayInfo, FieldSeqNode** pFldSeq);
// Requires "this" to be the address of a (possible) array element (or struct field within that).
// If it is, sets "*arrayInfo" to the array access info, "*pFldSeq" to the sequence of struct fields
// accessed within the array element, and returns true. If not, returns "false".
bool ParseArrayElemAddrForm(Compiler* comp, ArrayInfo* arrayInfo, FieldSeqNode** pFldSeq);
// Requires "this" to be an int expression. If it is a sequence of one or more integer constants added together,
// returns true and sets "*pFldSeq" to the sequence of fields with which those constants are annotated.
bool ParseOffsetForm(Compiler* comp, FieldSeqNode** pFldSeq);
// Labels "*this" as an array index expression: label all constants and variables that could contribute, as part of
// an affine expression, to the value of the of the index.
void LabelIndex(Compiler* comp, bool isConst = true);
// Assumes that "this" occurs in a context where it is being dereferenced as the LHS of an assignment-like
// statement (assignment, initblk, or copyblk). The "width" should be the number of bytes copied by the
// operation. Returns "true" if "this" is an address of (or within)
// a local variable; sets "*pLclVarTree" to that local variable instance; and, if "pIsEntire" is non-null,
// sets "*pIsEntire" to true if this assignment writes the full width of the local.
bool DefinesLocalAddr(Compiler* comp, unsigned width, GenTreeLclVarCommon** pLclVarTree, bool* pIsEntire);
// These are only used for dumping.
// The GetRegNum() is only valid in LIR, but the dumping methods are not easily
// modified to check this.
CLANG_FORMAT_COMMENT_ANCHOR;
#ifdef DEBUG
bool InReg() const
{
return (GetRegTag() != GT_REGTAG_NONE) ? true : false;
}
regNumber GetReg() const
{
return (GetRegTag() != GT_REGTAG_NONE) ? GetRegNum() : REG_NA;
}
#endif
static bool IsContained(unsigned flags)
{
return ((flags & GTF_CONTAINED) != 0);
}
void SetContained()
{
assert(IsValue());
gtFlags |= GTF_CONTAINED;
assert(isContained());
}
void ClearContained()
{
assert(IsValue());
gtFlags &= ~GTF_CONTAINED;
ClearRegOptional();
}
bool CanCSE() const
{
return ((gtFlags & GTF_DONT_CSE) == 0);
}
void SetDoNotCSE()
{
gtFlags |= GTF_DONT_CSE;
}
void ClearDoNotCSE()
{
gtFlags &= ~GTF_DONT_CSE;
}
bool IsReverseOp() const
{
return (gtFlags & GTF_REVERSE_OPS) ? true : false;
}
void SetReverseOp()
{
gtFlags |= GTF_REVERSE_OPS;
}
void ClearReverseOp()
{
gtFlags &= ~GTF_REVERSE_OPS;
}
bool IsUnsigned() const
{
return ((gtFlags & GTF_UNSIGNED) != 0);
}
void SetUnsigned()
{
assert(OperIs(GT_ADD, GT_SUB, GT_CAST, GT_LE, GT_LT, GT_GT, GT_GE) || OperIsMul());
gtFlags |= GTF_UNSIGNED;
}
void ClearUnsigned()
{
assert(OperIs(GT_ADD, GT_SUB, GT_CAST) || OperIsMul());
gtFlags &= ~GTF_UNSIGNED;
}
void SetOverflow()
{
assert(OperMayOverflow());
gtFlags |= GTF_OVERFLOW;
}
void ClearOverflow()
{
assert(OperMayOverflow());
gtFlags &= ~GTF_OVERFLOW;
}
bool Is64RsltMul() const
{
return (gtFlags & GTF_MUL_64RSLT) != 0;
}
void Set64RsltMul()
{
gtFlags |= GTF_MUL_64RSLT;
}
void Clear64RsltMul()
{
gtFlags &= ~GTF_MUL_64RSLT;
}
void SetAllEffectsFlags(GenTree* source)
{
SetAllEffectsFlags(source->gtFlags & GTF_ALL_EFFECT);
}
void SetAllEffectsFlags(GenTree* firstSource, GenTree* secondSource)
{
SetAllEffectsFlags((firstSource->gtFlags | secondSource->gtFlags) & GTF_ALL_EFFECT);
}
void SetAllEffectsFlags(GenTree* firstSource, GenTree* secondSource, GenTree* thirdSouce)
{
SetAllEffectsFlags((firstSource->gtFlags | secondSource->gtFlags | thirdSouce->gtFlags) & GTF_ALL_EFFECT);
}
void SetAllEffectsFlags(GenTreeFlags sourceFlags)
{
assert((sourceFlags & ~GTF_ALL_EFFECT) == 0);
gtFlags &= ~GTF_ALL_EFFECT;
gtFlags |= sourceFlags;
}
inline bool IsCnsIntOrI() const;
inline bool IsIntegralConst() const;
inline bool IsIntegralConstUnsignedPow2() const;
inline bool IsIntegralConstAbsPow2() const;
inline bool IsIntCnsFitsInI32(); // Constant fits in INT32
inline bool IsCnsFltOrDbl() const;
inline bool IsCnsNonZeroFltOrDbl() const;
bool IsIconHandle() const
{
assert(gtOper == GT_CNS_INT);
return (gtFlags & GTF_ICON_HDL_MASK) ? true : false;
}
bool IsIconHandle(GenTreeFlags handleType) const
{
assert(gtOper == GT_CNS_INT);
assert((handleType & GTF_ICON_HDL_MASK) != 0); // check that handleType is one of the valid GTF_ICON_* values
assert((handleType & ~GTF_ICON_HDL_MASK) == 0);
return (gtFlags & GTF_ICON_HDL_MASK) == handleType;
}
// Return just the part of the flags corresponding to the GTF_ICON_*_HDL flag. For example,
// GTF_ICON_SCOPE_HDL. The tree node must be a const int, but it might not be a handle, in which
// case we'll return zero.
GenTreeFlags GetIconHandleFlag() const
{
assert(gtOper == GT_CNS_INT);
return (gtFlags & GTF_ICON_HDL_MASK);
}
// Mark this node as no longer being a handle; clear its GTF_ICON_*_HDL bits.
void ClearIconHandleMask()
{
assert(gtOper == GT_CNS_INT);
gtFlags &= ~GTF_ICON_HDL_MASK;
}
// Return true if the two GT_CNS_INT trees have the same handle flag (GTF_ICON_*_HDL).
static bool SameIconHandleFlag(GenTree* t1, GenTree* t2)
{
return t1->GetIconHandleFlag() == t2->GetIconHandleFlag();
}
bool IsArgPlaceHolderNode() const
{
return OperGet() == GT_ARGPLACE;
}
bool IsCall() const
{
return OperGet() == GT_CALL;
}
inline bool IsHelperCall();
bool gtOverflow() const;
bool gtOverflowEx() const;
bool gtSetFlags() const;
bool gtRequestSetFlags();
#ifdef DEBUG
static int gtDispFlags(GenTreeFlags flags, GenTreeDebugFlags debugFlags);
#endif
// cast operations
inline var_types CastFromType();
inline var_types& CastToType();
// Returns "true" iff "this" is a phi-related node (i.e. a GT_PHI_ARG, GT_PHI, or a PhiDefn).
bool IsPhiNode();
// Returns "true" iff "*this" is an assignment (GT_ASG) tree that defines an SSA name (lcl = phi(...));
bool IsPhiDefn();
// Returns "true" iff "*this" is a statement containing an assignment that defines an SSA name (lcl = phi(...));
// Because of the fact that we hid the assignment operator of "BitSet" (in DEBUG),
// we can't synthesize an assignment operator.
// TODO-Cleanup: Could change this w/o liveset on tree nodes
// (This is also necessary for the VTable trick.)
GenTree()
{
}
// Returns an iterator that will produce the use edge to each operand of this node. Differs
// from the sequence of nodes produced by a loop over `GetChild` in its handling of call, phi,
// and block op nodes.
GenTreeUseEdgeIterator UseEdgesBegin();
GenTreeUseEdgeIterator UseEdgesEnd();
IteratorPair<GenTreeUseEdgeIterator> UseEdges();
// Returns an iterator that will produce each operand of this node, in execution order.
GenTreeOperandIterator OperandsBegin();
GenTreeOperandIterator OperandsEnd();
// Returns a range that will produce the operands of this node in execution order.
IteratorPair<GenTreeOperandIterator> Operands();
enum class VisitResult
{
Abort = false,
Continue = true
};
// Visits each operand of this node. The operand must be either a lambda, function, or functor with the signature
// `GenTree::VisitResult VisitorFunction(GenTree* operand)`. Here is a simple example:
//
// unsigned operandCount = 0;
// node->VisitOperands([&](GenTree* operand) -> GenTree::VisitResult)
// {
// operandCount++;
// return GenTree::VisitResult::Continue;
// });
//
// This function is generally more efficient that the operand iterator and should be preferred over that API for
// hot code, as it affords better opportunities for inlining and acheives shorter dynamic path lengths when
// deciding how operands need to be accessed.
//
// Note that this function does not respect `GTF_REVERSE_OPS`. This is always safe in LIR, but may be dangerous
// in HIR if for some reason you need to visit operands in the order in which they will execute.
template <typename TVisitor>
void VisitOperands(TVisitor visitor);
private:
template <typename TVisitor>
void VisitBinOpOperands(TVisitor visitor);
public:
bool Precedes(GenTree* other);
bool IsInvariant() const;
bool IsNeverNegative(Compiler* comp) const;
bool IsReuseRegVal() const
{
// This can be extended to non-constant nodes, but not to local or indir nodes.
if (IsInvariant() && ((gtFlags & GTF_REUSE_REG_VAL) != 0))
{
return true;
}
return false;
}
void SetReuseRegVal()
{
assert(IsInvariant());
gtFlags |= GTF_REUSE_REG_VAL;
}
void ResetReuseRegVal()
{
assert(IsInvariant());
gtFlags &= ~GTF_REUSE_REG_VAL;
}
void SetIndirExceptionFlags(Compiler* comp);
#if MEASURE_NODE_SIZE
static void DumpNodeSizes(FILE* fp);
#endif
#ifdef DEBUG
private:
GenTree& operator=(const GenTree& gt)
{
assert(!"Don't copy");
return *this;
}
#endif // DEBUG
#if DEBUGGABLE_GENTREE
// In DEBUG builds, add a dummy virtual method, to give the debugger run-time type information.
virtual void DummyVirt()
{
}
typedef void* VtablePtr;
VtablePtr GetVtableForOper(genTreeOps oper);
void SetVtableForOper(genTreeOps oper);
static VtablePtr s_vtablesForOpers[GT_COUNT];
static VtablePtr s_vtableForOp;
#endif // DEBUGGABLE_GENTREE
public:
inline void* operator new(size_t sz, class Compiler*, genTreeOps oper);
inline GenTree(genTreeOps oper, var_types type DEBUGARG(bool largeNode = false));
};
// Represents a GT_PHI node - a variable sized list of GT_PHI_ARG nodes.
// All PHI_ARG nodes must represent uses of the same local variable and
// the PHI node's type must be the same as the local variable's type.
//
// The PHI node does not represent a definition by itself, it is always
// the RHS of a GT_ASG node. The LHS of the ASG node is always a GT_LCL_VAR
// node, that is a definition for the same local variable referenced by
// all the used PHI_ARG nodes:
//
// ASG(LCL_VAR(lcl7), PHI(PHI_ARG(lcl7), PHI_ARG(lcl7), PHI_ARG(lcl7)))
//
// PHI nodes are also present in LIR, where GT_STORE_LCL_VAR replaces the
// ASG node.
//
// The order of the PHI_ARG uses is not currently relevant and it may be
// the same or not as the order of the predecessor blocks.
//
struct GenTreePhi final : public GenTree
{
class Use
{
GenTree* m_node;
Use* m_next;
public:
Use(GenTree* node, Use* next = nullptr) : m_node(node), m_next(next)
{
assert(node->OperIs(GT_PHI_ARG));
}
GenTree*& NodeRef()
{
return m_node;
}
GenTree* GetNode() const
{
assert(m_node->OperIs(GT_PHI_ARG));
return m_node;
}
void SetNode(GenTree* node)
{
assert(node->OperIs(GT_PHI_ARG));
m_node = node;
}
Use*& NextRef()
{
return m_next;
}
Use* GetNext() const
{
return m_next;
}
};
class UseIterator
{
Use* m_use;
public:
UseIterator(Use* use) : m_use(use)
{
}
Use& operator*() const
{
return *m_use;
}
Use* operator->() const
{
return m_use;
}
UseIterator& operator++()
{
m_use = m_use->GetNext();
return *this;
}
bool operator==(const UseIterator& i) const
{
return m_use == i.m_use;
}
bool operator!=(const UseIterator& i) const
{
return m_use != i.m_use;
}
};
class UseList
{
Use* m_uses;
public:
UseList(Use* uses) : m_uses(uses)
{
}
UseIterator begin() const
{
return UseIterator(m_uses);
}
UseIterator end() const
{
return UseIterator(nullptr);
}
};
Use* gtUses;
GenTreePhi(var_types type) : GenTree(GT_PHI, type), gtUses(nullptr)
{
}
UseList Uses()
{
return UseList(gtUses);
}
//--------------------------------------------------------------------------
// Equals: Checks if 2 PHI nodes are equal.
//
// Arguments:
// phi1 - The first PHI node
// phi2 - The second PHI node
//
// Return Value:
// true if the 2 PHI nodes have the same type, number of uses, and the
// uses are equal.
//
// Notes:
// The order of uses must be the same for equality, even if the
// order is not usually relevant and is not guaranteed to reflect
// a particular order of the predecessor blocks.
//
static bool Equals(GenTreePhi* phi1, GenTreePhi* phi2)
{
if (phi1->TypeGet() != phi2->TypeGet())
{
return false;
}
GenTreePhi::UseIterator i1 = phi1->Uses().begin();
GenTreePhi::UseIterator end1 = phi1->Uses().end();
GenTreePhi::UseIterator i2 = phi2->Uses().begin();
GenTreePhi::UseIterator end2 = phi2->Uses().end();
for (; (i1 != end1) && (i2 != end2); ++i1, ++i2)
{
if (!Compare(i1->GetNode(), i2->GetNode()))
{
return false;
}
}
return (i1 == end1) && (i2 == end2);
}
#if DEBUGGABLE_GENTREE
GenTreePhi() : GenTree()
{
}
#endif
};
// Represents a list of fields constituting a struct, when it is passed as an argument.
//
struct GenTreeFieldList : public GenTree
{
class Use
{
GenTree* m_node;
Use* m_next;
uint16_t m_offset;
var_types m_type;
public:
Use(GenTree* node, unsigned offset, var_types type)
: m_node(node), m_next(nullptr), m_offset(static_cast<uint16_t>(offset)), m_type(type)
{
// We can save space on 32 bit hosts by storing the offset as uint16_t. Struct promotion
// only accepts structs which are much smaller than that - 128 bytes = max 4 fields * max
// SIMD vector size (32 bytes).
assert(offset <= UINT16_MAX);
}
GenTree*& NodeRef()
{
return m_node;
}
GenTree* GetNode() const
{
return m_node;
}
void SetNode(GenTree* node)
{
assert(node != nullptr);
m_node = node;
}
Use*& NextRef()
{
return m_next;
}
Use* GetNext() const
{
return m_next;
}
void SetNext(Use* next)
{
m_next = next;
}
unsigned GetOffset() const
{
return m_offset;
}
var_types GetType() const
{
return m_type;
}
void SetType(var_types type)
{
m_type = type;
}
};
class UseIterator
{
Use* use;
public:
UseIterator(Use* use) : use(use)
{
}
Use& operator*()
{
return *use;
}
Use* operator->()
{
return use;
}
void operator++()
{
use = use->GetNext();
}
bool operator==(const UseIterator& other)
{
return use == other.use;
}
bool operator!=(const UseIterator& other)
{
return use != other.use;
}
};
class UseList
{
Use* m_head;
Use* m_tail;
public:
UseList() : m_head(nullptr), m_tail(nullptr)
{
}
Use* GetHead() const
{
return m_head;
}
UseIterator begin() const
{
return m_head;
}
UseIterator end() const
{
return nullptr;
}
void AddUse(Use* newUse)
{
assert(newUse->GetNext() == nullptr);
if (m_head == nullptr)
{
m_head = newUse;
}
else
{
m_tail->SetNext(newUse);
}
m_tail = newUse;
}
void InsertUse(Use* insertAfter, Use* newUse)
{
assert(newUse->GetNext() == nullptr);
newUse->SetNext(insertAfter->GetNext());
insertAfter->SetNext(newUse);
if (m_tail == insertAfter)
{
m_tail = newUse;
}
}
void Reverse()
{
m_tail = m_head;
m_head = nullptr;
for (Use *next, *use = m_tail; use != nullptr; use = next)
{
next = use->GetNext();
use->SetNext(m_head);
m_head = use;
}
}
bool IsSorted() const
{
unsigned offset = 0;
for (GenTreeFieldList::Use& use : *this)
{
if (use.GetOffset() < offset)
{
return false;
}
offset = use.GetOffset();
}
return true;
}
};
private:
UseList m_uses;
public:
GenTreeFieldList() : GenTree(GT_FIELD_LIST, TYP_STRUCT)
{
SetContained();
}
UseList& Uses()
{
return m_uses;
}
// Add a new field use to the end of the use list and update side effect flags.
void AddField(Compiler* compiler, GenTree* node, unsigned offset, var_types type);
// Add a new field use to the end of the use list without updating side effect flags.
void AddFieldLIR(Compiler* compiler, GenTree* node, unsigned offset, var_types type);
// Insert a new field use after the specified use and update side effect flags.
void InsertField(Compiler* compiler, Use* insertAfter, GenTree* node, unsigned offset, var_types type);
// Insert a new field use after the specified use without updating side effect flags.
void InsertFieldLIR(Compiler* compiler, Use* insertAfter, GenTree* node, unsigned offset, var_types type);
//--------------------------------------------------------------------------
// Equals: Check if 2 FIELD_LIST nodes are equal.
//
// Arguments:
// list1 - The first FIELD_LIST node
// list2 - The second FIELD_LIST node
//
// Return Value:
// true if the 2 FIELD_LIST nodes have the same type, number of uses, and the
// uses are equal.
//
static bool Equals(GenTreeFieldList* list1, GenTreeFieldList* list2)
{
assert(list1->TypeGet() == TYP_STRUCT);
assert(list2->TypeGet() == TYP_STRUCT);
UseIterator i1 = list1->Uses().begin();
UseIterator end1 = list1->Uses().end();
UseIterator i2 = list2->Uses().begin();
UseIterator end2 = list2->Uses().end();
for (; (i1 != end1) && (i2 != end2); ++i1, ++i2)
{
if (!Compare(i1->GetNode(), i2->GetNode()) || (i1->GetOffset() != i2->GetOffset()) ||
(i1->GetType() != i2->GetType()))
{
return false;
}
}
return (i1 == end1) && (i2 == end2);
}
};
//------------------------------------------------------------------------
// GenTreeUseEdgeIterator: an iterator that will produce each use edge of a GenTree node in the order in which
// they are used.
//
// Operand iteration is common enough in the back end of the compiler that the implementation of this type has
// traded some simplicity for speed:
// - As much work as is reasonable is done in the constructor rather than during operand iteration
// - Node-specific functionality is handled by a small class of "advance" functions called by operator++
// rather than making operator++ itself handle all nodes
// - Some specialization has been performed for specific node types/shapes (e.g. the advance function for
// binary nodes is specialized based on whether or not the node has the GTF_REVERSE_OPS flag set)
//
// Valid values of this type may be obtained by calling `GenTree::UseEdgesBegin` and `GenTree::UseEdgesEnd`.
//
class GenTreeUseEdgeIterator final
{
friend class GenTreeOperandIterator;
friend GenTreeUseEdgeIterator GenTree::UseEdgesBegin();
friend GenTreeUseEdgeIterator GenTree::UseEdgesEnd();
enum
{
CALL_INSTANCE = 0,
CALL_ARGS = 1,
CALL_LATE_ARGS = 2,
CALL_CONTROL_EXPR = 3,
CALL_COOKIE = 4,
CALL_ADDRESS = 5,
CALL_TERMINAL = 6,
};
typedef void (GenTreeUseEdgeIterator::*AdvanceFn)();
AdvanceFn m_advance;
GenTree* m_node;
GenTree** m_edge;
// Pointer sized state storage, GenTreePhi::Use* or GenTreeCall::Use*
// or the exclusive end/beginning of GenTreeMultiOp's operand array.
void* m_statePtr;
// Integer sized state storage, usually the operand index for non-list based nodes.
int m_state;
GenTreeUseEdgeIterator(GenTree* node);
// Advance functions for special nodes
void AdvanceCmpXchg();
void AdvanceArrElem();
void AdvanceArrOffset();
void AdvanceStoreDynBlk();
void AdvanceFieldList();
void AdvancePhi();
template <bool ReverseOperands>
void AdvanceBinOp();
void SetEntryStateForBinOp();
// The advance function for call nodes
template <int state>
void AdvanceCall();
#if defined(FEATURE_SIMD) || defined(FEATURE_HW_INTRINSICS)
void AdvanceMultiOp();
void AdvanceReversedMultiOp();
void SetEntryStateForMultiOp();
#endif
void Terminate();
public:
GenTreeUseEdgeIterator();
inline GenTree** operator*()
{
assert(m_state != -1);
return m_edge;
}
inline GenTree** operator->()
{
assert(m_state != -1);
return m_edge;
}
inline bool operator==(const GenTreeUseEdgeIterator& other) const
{
if (m_state == -1 || other.m_state == -1)
{
return m_state == other.m_state;
}
return (m_node == other.m_node) && (m_edge == other.m_edge) && (m_statePtr == other.m_statePtr) &&
(m_state == other.m_state);
}
inline bool operator!=(const GenTreeUseEdgeIterator& other) const
{
return !(operator==(other));
}
GenTreeUseEdgeIterator& operator++();
};
//------------------------------------------------------------------------
// GenTreeOperandIterator: an iterator that will produce each operand of a
// GenTree node in the order in which they are
// used. This uses `GenTreeUseEdgeIterator` under
// the covers.
//
// Note: valid values of this type may be obtained by calling
// `GenTree::OperandsBegin` and `GenTree::OperandsEnd`.
class GenTreeOperandIterator final
{
friend GenTreeOperandIterator GenTree::OperandsBegin();
friend GenTreeOperandIterator GenTree::OperandsEnd();
GenTreeUseEdgeIterator m_useEdges;
GenTreeOperandIterator(GenTree* node) : m_useEdges(node)
{
}
public:
GenTreeOperandIterator() : m_useEdges()
{
}
inline GenTree* operator*()
{
return *(*m_useEdges);
}
inline GenTree* operator->()
{
return *(*m_useEdges);
}
inline bool operator==(const GenTreeOperandIterator& other) const
{
return m_useEdges == other.m_useEdges;
}
inline bool operator!=(const GenTreeOperandIterator& other) const
{
return !(operator==(other));
}
inline GenTreeOperandIterator& operator++()
{
++m_useEdges;
return *this;
}
};
/*****************************************************************************/
// In the current design, we never instantiate GenTreeUnOp: it exists only to be
// used as a base class. For unary operators, we instantiate GenTreeOp, with a NULL second
// argument. We check that this is true dynamically. We could tighten this and get static
// checking, but that would entail accessing the first child of a unary operator via something
// like gtUnOp.gtOp1 instead of AsOp()->gtOp1.
struct GenTreeUnOp : public GenTree
{
GenTree* gtOp1;
protected:
GenTreeUnOp(genTreeOps oper, var_types type DEBUGARG(bool largeNode = false))
: GenTree(oper, type DEBUGARG(largeNode)), gtOp1(nullptr)
{
}
GenTreeUnOp(genTreeOps oper, var_types type, GenTree* op1 DEBUGARG(bool largeNode = false))
: GenTree(oper, type DEBUGARG(largeNode)), gtOp1(op1)
{
assert(op1 != nullptr || NullOp1Legal());
if (op1 != nullptr)
{ // Propagate effects flags from child.
gtFlags |= op1->gtFlags & GTF_ALL_EFFECT;
}
}
#if DEBUGGABLE_GENTREE
GenTreeUnOp() : GenTree(), gtOp1(nullptr)
{
}
#endif
};
struct GenTreeOp : public GenTreeUnOp
{
GenTree* gtOp2;
GenTreeOp(genTreeOps oper, var_types type, GenTree* op1, GenTree* op2 DEBUGARG(bool largeNode = false))
: GenTreeUnOp(oper, type, op1 DEBUGARG(largeNode)), gtOp2(op2)
{
// comparisons are always integral types
assert(!GenTree::OperIsCompare(oper) || varTypeIsIntegral(type));
// Binary operators, with a few exceptions, require a non-nullptr
// second argument.
assert(op2 != nullptr || NullOp2Legal());
// Unary operators, on the other hand, require a null second argument.
assert(!OperIsUnary(oper) || op2 == nullptr);
// Propagate effects flags from child. (UnOp handled this for first child.)
if (op2 != nullptr)
{
gtFlags |= op2->gtFlags & GTF_ALL_EFFECT;
}
}
// A small set of types are unary operators with optional arguments. We use
// this constructor to build those.
GenTreeOp(genTreeOps oper, var_types type DEBUGARG(bool largeNode = false))
: GenTreeUnOp(oper, type DEBUGARG(largeNode)), gtOp2(nullptr)
{
// Unary operators with optional arguments:
assert(oper == GT_NOP || oper == GT_RETURN || oper == GT_RETFILT || OperIsBlk(oper));
}
// returns true if we will use the division by constant optimization for this node.
bool UsesDivideByConstOptimized(Compiler* comp);
// checks if we will use the division by constant optimization this node
// then sets the flag GTF_DIV_BY_CNS_OPT and GTF_DONT_CSE on the constant
void CheckDivideByConstOptimized(Compiler* comp);
// True if this node is marked as using the division by constant optimization
bool MarkedDivideByConstOptimized() const
{
return (gtFlags & GTF_DIV_BY_CNS_OPT) != 0;
}
#if !defined(TARGET_64BIT) || defined(TARGET_ARM64)
bool IsValidLongMul();
#endif
#if !defined(TARGET_64BIT) && defined(DEBUG)
void DebugCheckLongMul();
#endif
#if DEBUGGABLE_GENTREE
GenTreeOp() : GenTreeUnOp(), gtOp2(nullptr)
{
}
#endif
// True if this relop is marked for a transform during the emitter
// phase, e.g., jge => jns
bool MarkedForSignJumpOpt() const
{
return (gtFlags & GTF_RELOP_SJUMP_OPT) != 0;
}
};
struct GenTreeVal : public GenTree
{
size_t gtVal1;
GenTreeVal(genTreeOps oper, var_types type, ssize_t val) : GenTree(oper, type), gtVal1(val)
{
}
#if DEBUGGABLE_GENTREE
GenTreeVal() : GenTree()
{
}
#endif
};
struct GenTreeIntConCommon : public GenTree
{
inline INT64 LngValue() const;
inline void SetLngValue(INT64 val);
inline ssize_t IconValue() const;
inline void SetIconValue(ssize_t val);
inline INT64 IntegralValue() const;
inline void SetIntegralValue(int64_t value);
template <typename T>
inline void SetValueTruncating(T value);
GenTreeIntConCommon(genTreeOps oper, var_types type DEBUGARG(bool largeNode = false))
: GenTree(oper, type DEBUGARG(largeNode))
{
}
bool FitsInI8() // IconValue() fits into 8-bit signed storage
{
return FitsInI8(IconValue());
}
static bool FitsInI8(ssize_t val) // Constant fits into 8-bit signed storage
{
return (int8_t)val == val;
}
bool FitsInI32() // IconValue() fits into 32-bit signed storage
{
return FitsInI32(IconValue());
}
static bool FitsInI32(ssize_t val) // Constant fits into 32-bit signed storage
{
#ifdef TARGET_64BIT
return (int32_t)val == val;
#else
return true;
#endif
}
bool ImmedValNeedsReloc(Compiler* comp);
bool ImmedValCanBeFolded(Compiler* comp, genTreeOps op);
#ifdef TARGET_XARCH
bool FitsInAddrBase(Compiler* comp);
bool AddrNeedsReloc(Compiler* comp);
#endif
#if DEBUGGABLE_GENTREE
GenTreeIntConCommon() : GenTree()
{
}
#endif
};
// node representing a read from a physical register
struct GenTreePhysReg : public GenTree
{
// physregs need a field beyond GetRegNum() because
// GetRegNum() indicates the destination (and can be changed)
// whereas reg indicates the source
regNumber gtSrcReg;
GenTreePhysReg(regNumber r, var_types type = TYP_I_IMPL) : GenTree(GT_PHYSREG, type), gtSrcReg(r)
{
}
#if DEBUGGABLE_GENTREE
GenTreePhysReg() : GenTree()
{
}
#endif
};
/* gtIntCon -- integer constant (GT_CNS_INT) */
struct GenTreeIntCon : public GenTreeIntConCommon
{
/*
* This is the GT_CNS_INT struct definition.
* It's used to hold for both int constants and pointer handle constants.
* For the 64-bit targets we will only use GT_CNS_INT as it used to represent all the possible sizes
* For the 32-bit targets we use a GT_CNS_LNG to hold a 64-bit integer constant and GT_CNS_INT for all others.
* In the future when we retarget the JIT for x86 we should consider eliminating GT_CNS_LNG
*/
ssize_t gtIconVal; // Must overlap and have the same offset with the gtIconVal field in GenTreeLngCon below.
/* The InitializeArray intrinsic needs to go back to the newarray statement
to find the class handle of the array so that we can get its size. However,
in ngen mode, the handle in that statement does not correspond to the compile
time handle (rather it lets you get a handle at run-time). In that case, we also
need to store a compile time handle, which goes in this gtCompileTimeHandle field.
*/
ssize_t gtCompileTimeHandle;
// TODO-Cleanup: It's not clear what characterizes the cases where the field
// above is used. It may be that its uses and those of the "gtFieldSeq" field below
// are mutually exclusive, and they could be put in a union. Or else we should separate
// this type into three subtypes.
// If this constant represents the offset of one or more fields, "gtFieldSeq" represents that
// sequence of fields.
FieldSeqNode* gtFieldSeq;
#ifdef DEBUG
// If the value represents target address, holds the method handle to that target which is used
// to fetch target method name and display in the disassembled code.
size_t gtTargetHandle = 0;
#endif
GenTreeIntCon(var_types type, ssize_t value DEBUGARG(bool largeNode = false))
: GenTreeIntConCommon(GT_CNS_INT, type DEBUGARG(largeNode))
, gtIconVal(value)
, gtCompileTimeHandle(0)
, gtFieldSeq(FieldSeqStore::NotAField())
{
}
GenTreeIntCon(var_types type, ssize_t value, FieldSeqNode* fields DEBUGARG(bool largeNode = false))
: GenTreeIntConCommon(GT_CNS_INT, type DEBUGARG(largeNode))
, gtIconVal(value)
, gtCompileTimeHandle(0)
, gtFieldSeq(fields)
{
assert(fields != nullptr);
}
void FixupInitBlkValue(var_types asgType);
#if DEBUGGABLE_GENTREE
GenTreeIntCon() : GenTreeIntConCommon()
{
}
#endif
};
/* gtLngCon -- long constant (GT_CNS_LNG) */
struct GenTreeLngCon : public GenTreeIntConCommon
{
INT64 gtLconVal; // Must overlap and have the same offset with the gtIconVal field in GenTreeIntCon above.
INT32 LoVal()
{
return (INT32)(gtLconVal & 0xffffffff);
}
INT32 HiVal()
{
return (INT32)(gtLconVal >> 32);
}
GenTreeLngCon(INT64 val) : GenTreeIntConCommon(GT_CNS_NATIVELONG, TYP_LONG)
{
SetLngValue(val);
}
#if DEBUGGABLE_GENTREE
GenTreeLngCon() : GenTreeIntConCommon()
{
}
#endif
};
inline INT64 GenTreeIntConCommon::LngValue() const
{
#ifndef TARGET_64BIT
assert(gtOper == GT_CNS_LNG);
return AsLngCon()->gtLconVal;
#else
return IconValue();
#endif
}
inline void GenTreeIntConCommon::SetLngValue(INT64 val)
{
#ifndef TARGET_64BIT
assert(gtOper == GT_CNS_LNG);
AsLngCon()->gtLconVal = val;
#else
// Compile time asserts that these two fields overlap and have the same offsets: gtIconVal and gtLconVal
C_ASSERT(offsetof(GenTreeLngCon, gtLconVal) == offsetof(GenTreeIntCon, gtIconVal));
C_ASSERT(sizeof(AsLngCon()->gtLconVal) == sizeof(AsIntCon()->gtIconVal));
SetIconValue(ssize_t(val));
#endif
}
inline ssize_t GenTreeIntConCommon::IconValue() const
{
assert(gtOper == GT_CNS_INT); // We should never see a GT_CNS_LNG for a 64-bit target!
return AsIntCon()->gtIconVal;
}
inline void GenTreeIntConCommon::SetIconValue(ssize_t val)
{
assert(gtOper == GT_CNS_INT); // We should never see a GT_CNS_LNG for a 64-bit target!
AsIntCon()->gtIconVal = val;
}
inline INT64 GenTreeIntConCommon::IntegralValue() const
{
#ifdef TARGET_64BIT
return LngValue();
#else
return gtOper == GT_CNS_LNG ? LngValue() : (INT64)IconValue();
#endif // TARGET_64BIT
}
inline void GenTreeIntConCommon::SetIntegralValue(int64_t value)
{
#ifdef TARGET_64BIT
SetIconValue(value);
#else
if (OperIs(GT_CNS_LNG))
{
SetLngValue(value);
}
else
{
assert(FitsIn<int32_t>(value));
SetIconValue(static_cast<int32_t>(value));
}
#endif // TARGET_64BIT
}
//------------------------------------------------------------------------
// SetValueTruncating: Set the value, truncating to TYP_INT if necessary.
//
// The function will truncate the supplied value to a 32 bit signed
// integer if the node's type is not TYP_LONG, otherwise setting it
// as-is. Note that this function intentionally does not check for
// small types (such nodes are created in lowering) for TP reasons.
//
// This function is intended to be used where its truncating behavior is
// desirable. One example is folding of ADD(CNS_INT, CNS_INT) performed in
// wider integers, which is typical when compiling on 64 bit hosts, as
// most aritmetic is done in ssize_t's aka int64_t's in that case, while
// the node itself can be of a narrower type.
//
// Arguments:
// value - Value to set, truncating to TYP_INT if the node is not of TYP_LONG
//
// Notes:
// This function is templated so that it works well with compiler warnings of
// the form "Operation may overflow before being assigned to a wider type", in
// case "value" is of type ssize_t, which is common.
//
template <typename T>
inline void GenTreeIntConCommon::SetValueTruncating(T value)
{
static_assert_no_msg((std::is_same<T, int32_t>::value || std::is_same<T, int64_t>::value));
if (TypeIs(TYP_LONG))
{
SetLngValue(value);
}
else
{
SetIconValue(static_cast<int32_t>(value));
}
}
/* gtDblCon -- double constant (GT_CNS_DBL) */
struct GenTreeDblCon : public GenTree
{
double gtDconVal;
bool isBitwiseEqual(GenTreeDblCon* other)
{
unsigned __int64 bits = *(unsigned __int64*)(>DconVal);
unsigned __int64 otherBits = *(unsigned __int64*)(&(other->gtDconVal));
return (bits == otherBits);
}
GenTreeDblCon(double val, var_types type = TYP_DOUBLE) : GenTree(GT_CNS_DBL, type), gtDconVal(val)
{
assert(varTypeIsFloating(type));
}
#if DEBUGGABLE_GENTREE
GenTreeDblCon() : GenTree()
{
}
#endif
};
/* gtStrCon -- string constant (GT_CNS_STR) */
#define EMPTY_STRING_SCON (unsigned)-1
struct GenTreeStrCon : public GenTree
{
unsigned gtSconCPX;
CORINFO_MODULE_HANDLE gtScpHnd;
// Returns true if this GT_CNS_STR was imported for String.Empty field
bool IsStringEmptyField()
{
return gtSconCPX == EMPTY_STRING_SCON && gtScpHnd == nullptr;
}
// Because this node can come from an inlined method we need to
// have the scope handle, since it will become a helper call.
GenTreeStrCon(unsigned sconCPX, CORINFO_MODULE_HANDLE mod DEBUGARG(bool largeNode = false))
: GenTree(GT_CNS_STR, TYP_REF DEBUGARG(largeNode)), gtSconCPX(sconCPX), gtScpHnd(mod)
{
}
#if DEBUGGABLE_GENTREE
GenTreeStrCon() : GenTree()
{
}
#endif
};
// Common supertype of LCL_VAR, LCL_FLD, REG_VAR, PHI_ARG
// This inherits from UnOp because lclvar stores are Unops
struct GenTreeLclVarCommon : public GenTreeUnOp
{
private:
unsigned _gtLclNum; // The local number. An index into the Compiler::lvaTable array.
unsigned _gtSsaNum; // The SSA number.
public:
GenTreeLclVarCommon(genTreeOps oper, var_types type, unsigned lclNum DEBUGARG(bool largeNode = false))
: GenTreeUnOp(oper, type DEBUGARG(largeNode))
{
SetLclNum(lclNum);
}
unsigned GetLclNum() const
{
return _gtLclNum;
}
void SetLclNum(unsigned lclNum)
{
_gtLclNum = lclNum;
_gtSsaNum = SsaConfig::RESERVED_SSA_NUM;
}
uint16_t GetLclOffs() const;
unsigned GetSsaNum() const
{
return _gtSsaNum;
}
void SetSsaNum(unsigned ssaNum)
{
_gtSsaNum = ssaNum;
}
bool HasSsaName()
{
return (GetSsaNum() != SsaConfig::RESERVED_SSA_NUM);
}
#if DEBUGGABLE_GENTREE
GenTreeLclVarCommon() : GenTreeUnOp()
{
}
#endif
};
//------------------------------------------------------------------------
// MultiRegSpillFlags
//
// GTF_SPILL or GTF_SPILLED flag on a multi-reg node indicates that one or
// more of its result regs are in that state. The spill flags of each register
// are stored here. We only need 2 bits per returned register,
// so this is treated as a 2-bit array. No architecture needs more than 8 bits.
//
typedef unsigned char MultiRegSpillFlags;
static const unsigned PACKED_GTF_SPILL = 1;
static const unsigned PACKED_GTF_SPILLED = 2;
//----------------------------------------------------------------------
// GetMultiRegSpillFlagsByIdx: get spill flag associated with the return register
// specified by its index.
//
// Arguments:
// idx - Position or index of the return register
//
// Return Value:
// Returns GTF_* flags associated with the register. Only GTF_SPILL and GTF_SPILLED are considered.
//
inline GenTreeFlags GetMultiRegSpillFlagsByIdx(MultiRegSpillFlags flags, unsigned idx)
{
static_assert_no_msg(MAX_RET_REG_COUNT * 2 <= sizeof(unsigned char) * BITS_PER_BYTE);
assert(idx < MAX_RET_REG_COUNT);
unsigned bits = flags >> (idx * 2); // It doesn't matter that we possibly leave other high bits here.
GenTreeFlags spillFlags = GTF_EMPTY;
if (bits & PACKED_GTF_SPILL)
{
spillFlags |= GTF_SPILL;
}
if (bits & PACKED_GTF_SPILLED)
{
spillFlags |= GTF_SPILLED;
}
return spillFlags;
}
//----------------------------------------------------------------------
// SetMultiRegSpillFlagsByIdx: set spill flags for the register specified by its index.
//
// Arguments:
// oldFlags - The current value of the MultiRegSpillFlags for a node.
// flagsToSet - GTF_* flags. Only GTF_SPILL and GTF_SPILLED are allowed.
// Note that these are the flags used on non-multireg nodes,
// and this method adds the appropriate flags to the
// incoming MultiRegSpillFlags and returns it.
// idx - Position or index of the register
//
// Return Value:
// The new value for the node's MultiRegSpillFlags.
//
inline MultiRegSpillFlags SetMultiRegSpillFlagsByIdx(MultiRegSpillFlags oldFlags, GenTreeFlags flagsToSet, unsigned idx)
{
static_assert_no_msg(MAX_RET_REG_COUNT * 2 <= sizeof(unsigned char) * BITS_PER_BYTE);
assert(idx < MAX_RET_REG_COUNT);
MultiRegSpillFlags newFlags = oldFlags;
unsigned bits = 0;
if (flagsToSet & GTF_SPILL)
{
bits |= PACKED_GTF_SPILL;
}
if (flagsToSet & GTF_SPILLED)
{
bits |= PACKED_GTF_SPILLED;
}
const unsigned char packedFlags = PACKED_GTF_SPILL | PACKED_GTF_SPILLED;
// Clear anything that was already there by masking out the bits before 'or'ing in what we want there.
newFlags = (unsigned char)((newFlags & ~(packedFlags << (idx * 2))) | (bits << (idx * 2)));
return newFlags;
}
// gtLclVar -- load/store/addr of local variable
struct GenTreeLclVar : public GenTreeLclVarCommon
{
private:
regNumberSmall gtOtherReg[MAX_MULTIREG_COUNT - 1];
MultiRegSpillFlags gtSpillFlags;
public:
INDEBUG(IL_OFFSET gtLclILoffs;) // instr offset of ref (only for JIT dumps)
// Multireg support
bool IsMultiReg() const
{
return ((gtFlags & GTF_VAR_MULTIREG) != 0);
}
void ClearMultiReg()
{
gtFlags &= ~GTF_VAR_MULTIREG;
}
void SetMultiReg()
{
gtFlags |= GTF_VAR_MULTIREG;
ClearOtherRegFlags();
}
regNumber GetRegNumByIdx(int regIndex) const
{
assert(regIndex < MAX_MULTIREG_COUNT);
return (regIndex == 0) ? GetRegNum() : (regNumber)gtOtherReg[regIndex - 1];
}
void SetRegNumByIdx(regNumber reg, int regIndex)
{
assert(regIndex < MAX_MULTIREG_COUNT);
if (regIndex == 0)
{
SetRegNum(reg);
}
else
{
gtOtherReg[regIndex - 1] = regNumberSmall(reg);
}
}
GenTreeFlags GetRegSpillFlagByIdx(unsigned idx) const
{
return GetMultiRegSpillFlagsByIdx(gtSpillFlags, idx);
}
void SetRegSpillFlagByIdx(GenTreeFlags flags, unsigned idx)
{
gtSpillFlags = SetMultiRegSpillFlagsByIdx(gtSpillFlags, flags, idx);
}
unsigned int GetFieldCount(Compiler* compiler) const;
var_types GetFieldTypeByIndex(Compiler* compiler, unsigned idx);
//-------------------------------------------------------------------
// clearOtherRegFlags: clear GTF_* flags associated with gtOtherRegs
//
// Arguments:
// None
//
// Return Value:
// None
void ClearOtherRegFlags()
{
gtSpillFlags = 0;
}
//-------------------------------------------------------------------------
// CopyOtherRegFlags: copy GTF_* flags associated with gtOtherRegs from
// the given LclVar node.
//
// Arguments:
// fromCall - GenTreeLclVar node from which to copy
//
// Return Value:
// None
//
void CopyOtherRegFlags(GenTreeLclVar* from)
{
this->gtSpillFlags = from->gtSpillFlags;
}
GenTreeLclVar(genTreeOps oper,
var_types type,
unsigned lclNum DEBUGARG(IL_OFFSET ilOffs = BAD_IL_OFFSET) DEBUGARG(bool largeNode = false))
: GenTreeLclVarCommon(oper, type, lclNum DEBUGARG(largeNode)) DEBUGARG(gtLclILoffs(ilOffs))
{
assert(OperIsLocal(oper) || OperIsLocalAddr(oper));
}
#if DEBUGGABLE_GENTREE
GenTreeLclVar() : GenTreeLclVarCommon()
{
}
#endif
};
// gtLclFld -- load/store/addr of local variable field
struct GenTreeLclFld : public GenTreeLclVarCommon
{
private:
uint16_t m_lclOffs; // offset into the variable to access
FieldSeqNode* m_fieldSeq; // This LclFld node represents some sequences of accesses.
public:
GenTreeLclFld(genTreeOps oper, var_types type, unsigned lclNum, unsigned lclOffs)
: GenTreeLclVarCommon(oper, type, lclNum), m_lclOffs(static_cast<uint16_t>(lclOffs)), m_fieldSeq(nullptr)
{
assert(lclOffs <= UINT16_MAX);
}
uint16_t GetLclOffs() const
{
return m_lclOffs;
}
void SetLclOffs(unsigned lclOffs)
{
assert(lclOffs <= UINT16_MAX);
m_lclOffs = static_cast<uint16_t>(lclOffs);
}
FieldSeqNode* GetFieldSeq() const
{
return m_fieldSeq;
}
void SetFieldSeq(FieldSeqNode* fieldSeq)
{
m_fieldSeq = fieldSeq;
}
#ifdef TARGET_ARM
bool IsOffsetMisaligned() const;
#endif // TARGET_ARM
#if DEBUGGABLE_GENTREE
GenTreeLclFld() : GenTreeLclVarCommon()
{
}
#endif
};
// GenTreeCast - conversion to a different type (GT_CAST).
//
// This node represents all "conv[.ovf].{type}[.un]" IL opcodes.
//
// There are four semantically significant values that determine what it does:
//
// 1) "genActualType(CastOp())" - the type being cast from.
// 2) "gtCastType" - the type being cast to.
// 3) "IsUnsigned" (the "GTF_UNSIGNED" flag) - whether the cast is "unsigned".
// 4) "gtOverflow" (the "GTF_OVERFLOW" flag) - whether the cast is checked.
//
// Different "kinds" of casts use these values differently; not all are always
// meaningful or legal:
//
// 1) For casts from FP types, "IsUnsigned" will always be "false".
// 2) Checked casts use "IsUnsigned" to represent the fact the type being cast
// from is unsigned. The target type's signedness is similarly significant.
// 3) For unchecked casts, "IsUnsigned" is significant for "int -> long", where
// it decides whether the cast sign- or zero-extends its source, and "integer
// -> FP" cases. For all other unchecked casts, "IsUnsigned" is meaningless.
// 4) For unchecked casts, signedness of the target type is only meaningful if
// the cast is to an FP or small type. In the latter case (and everywhere
// else in IR) it decided whether the value will be sign- or zero-extended.
//
// For additional context on "GT_CAST"'s semantics, see "IntegralRange::ForCast"
// methods and "GenIntCastDesc"'s constructor.
//
struct GenTreeCast : public GenTreeOp
{
GenTree*& CastOp()
{
return gtOp1;
}
var_types gtCastType;
GenTreeCast(var_types type, GenTree* op, bool fromUnsigned, var_types castType DEBUGARG(bool largeNode = false))
: GenTreeOp(GT_CAST, type, op, nullptr DEBUGARG(largeNode)), gtCastType(castType)
{
// We do not allow casts from floating point types to be treated as from
// unsigned to avoid bugs related to wrong GTF_UNSIGNED in case the
// CastOp's type changes.
assert(!varTypeIsFloating(op) || !fromUnsigned);
gtFlags |= fromUnsigned ? GTF_UNSIGNED : GTF_EMPTY;
}
#if DEBUGGABLE_GENTREE
GenTreeCast() : GenTreeOp()
{
}
#endif
};
// GT_BOX nodes are place markers for boxed values. The "real" tree
// for most purposes is in gtBoxOp.
struct GenTreeBox : public GenTreeUnOp
{
// An expanded helper call to implement the "box" if we don't get
// rid of it any other way. Must be in same position as op1.
GenTree*& BoxOp()
{
return gtOp1;
}
// This is the statement that contains the assignment tree when the node is an inlined GT_BOX on a value
// type
Statement* gtAsgStmtWhenInlinedBoxValue;
// And this is the statement that copies from the value being boxed to the box payload
Statement* gtCopyStmtWhenInlinedBoxValue;
GenTreeBox(var_types type,
GenTree* boxOp,
Statement* asgStmtWhenInlinedBoxValue,
Statement* copyStmtWhenInlinedBoxValue)
: GenTreeUnOp(GT_BOX, type, boxOp)
, gtAsgStmtWhenInlinedBoxValue(asgStmtWhenInlinedBoxValue)
, gtCopyStmtWhenInlinedBoxValue(copyStmtWhenInlinedBoxValue)
{
}
#if DEBUGGABLE_GENTREE
GenTreeBox() : GenTreeUnOp()
{
}
#endif
};
// GenTreeField -- data member ref (GT_FIELD)
struct GenTreeField : public GenTreeUnOp
{
CORINFO_FIELD_HANDLE gtFldHnd;
DWORD gtFldOffset;
bool gtFldMayOverlap;
#ifdef FEATURE_READYTORUN
CORINFO_CONST_LOOKUP gtFieldLookup;
#endif
GenTreeField(var_types type, GenTree* obj, CORINFO_FIELD_HANDLE fldHnd, DWORD offs)
: GenTreeUnOp(GT_FIELD, type, obj), gtFldHnd(fldHnd), gtFldOffset(offs), gtFldMayOverlap(false)
{
#ifdef FEATURE_READYTORUN
gtFieldLookup.addr = nullptr;
#endif
}
#if DEBUGGABLE_GENTREE
GenTreeField() : GenTreeUnOp()
{
}
#endif
// The object this field belongs to. Will be "nullptr" for static fields.
// Note that this is an address, i. e. for struct fields it will be ADDR(STRUCT).
GenTree* GetFldObj() const
{
return gtOp1;
}
// True if this field is a volatile memory operation.
bool IsVolatile() const
{
return (gtFlags & GTF_FLD_VOLATILE) != 0;
}
};
// There was quite a bit of confusion in the code base about which of gtOp1 and gtOp2 was the
// 'then' and 'else' clause of a colon node. Adding these accessors, while not enforcing anything,
// at least *allows* the programmer to be obviously correct.
// However, these conventions seem backward.
// TODO-Cleanup: If we could get these accessors used everywhere, then we could switch them.
struct GenTreeColon : public GenTreeOp
{
GenTree*& ThenNode()
{
return gtOp2;
}
GenTree*& ElseNode()
{
return gtOp1;
}
#if DEBUGGABLE_GENTREE
GenTreeColon() : GenTreeOp()
{
}
#endif
GenTreeColon(var_types typ, GenTree* thenNode, GenTree* elseNode) : GenTreeOp(GT_COLON, typ, elseNode, thenNode)
{
}
};
// gtCall -- method call (GT_CALL)
enum class InlineObservation;
//------------------------------------------------------------------------
// GenTreeCallFlags: a bitmask of flags for GenTreeCall stored in gtCallMoreFlags.
//
// clang-format off
enum GenTreeCallFlags : unsigned int
{
GTF_CALL_M_EMPTY = 0,
GTF_CALL_M_EXPLICIT_TAILCALL = 0x00000001, // the call is "tail" prefixed and importer has performed tail call checks
GTF_CALL_M_TAILCALL = 0x00000002, // the call is a tailcall
GTF_CALL_M_VARARGS = 0x00000004, // the call uses varargs ABI
GTF_CALL_M_RETBUFFARG = 0x00000008, // call has a return buffer argument
GTF_CALL_M_DELEGATE_INV = 0x00000010, // call to Delegate.Invoke
GTF_CALL_M_NOGCCHECK = 0x00000020, // not a call for computing full interruptability and therefore no GC check is required.
GTF_CALL_M_SPECIAL_INTRINSIC = 0x00000040, // function that could be optimized as an intrinsic
// in special cases. Used to optimize fast way out in morphing
GTF_CALL_M_UNMGD_THISCALL = 0x00000080, // "this" pointer (first argument) should be enregistered (only for GTF_CALL_UNMANAGED)
GTF_CALL_M_VIRTSTUB_REL_INDIRECT = 0x00000080, // the virtstub is indirected through a relative address (only for GTF_CALL_VIRT_STUB)
GTF_CALL_M_NONVIRT_SAME_THIS = 0x00000080, // callee "this" pointer is equal to caller this pointer (only for GTF_CALL_NONVIRT)
GTF_CALL_M_FRAME_VAR_DEATH = 0x00000100, // the compLvFrameListRoot variable dies here (last use)
GTF_CALL_M_TAILCALL_VIA_JIT_HELPER = 0x00000200, // call is a tail call dispatched via tail call JIT helper.
#if FEATURE_TAILCALL_OPT
GTF_CALL_M_IMPLICIT_TAILCALL = 0x00000400, // call is an opportunistic tail call and importer has performed tail call checks
GTF_CALL_M_TAILCALL_TO_LOOP = 0x00000800, // call is a fast recursive tail call that can be converted into a loop
#endif
GTF_CALL_M_PINVOKE = 0x00001000, // call is a pinvoke. This mirrors VM flag CORINFO_FLG_PINVOKE.
// A call marked as Pinvoke is not necessarily a GT_CALL_UNMANAGED. For e.g.
// an IL Stub dynamically generated for a PInvoke declaration is flagged as
// a Pinvoke but not as an unmanaged call. See impCheckForPInvokeCall() to
// know when these flags are set.
GTF_CALL_M_R2R_REL_INDIRECT = 0x00002000, // ready to run call is indirected through a relative address
GTF_CALL_M_DOES_NOT_RETURN = 0x00004000, // call does not return
GTF_CALL_M_WRAPPER_DELEGATE_INV = 0x00008000, // call is in wrapper delegate
GTF_CALL_M_FAT_POINTER_CHECK = 0x00010000, // CoreRT managed calli needs transformation, that checks
// special bit in calli address. If it is set, then it is necessary
// to restore real function address and load hidden argument
// as the first argument for calli. It is CoreRT replacement for instantiating
// stubs, because executable code cannot be generated at runtime.
GTF_CALL_M_HELPER_SPECIAL_DCE = 0x00020000, // this helper call can be removed if it is part of a comma and
// the comma result is unused.
GTF_CALL_M_DEVIRTUALIZED = 0x00040000, // this call was devirtualized
GTF_CALL_M_UNBOXED = 0x00080000, // this call was optimized to use the unboxed entry point
GTF_CALL_M_GUARDED_DEVIRT = 0x00100000, // this call is a candidate for guarded devirtualization
GTF_CALL_M_GUARDED_DEVIRT_CHAIN = 0x00200000, // this call is a candidate for chained guarded devirtualization
GTF_CALL_M_GUARDED = 0x00400000, // this call was transformed by guarded devirtualization
GTF_CALL_M_ALLOC_SIDE_EFFECTS = 0x00800000, // this is a call to an allocator with side effects
GTF_CALL_M_SUPPRESS_GC_TRANSITION = 0x01000000, // suppress the GC transition (i.e. during a pinvoke) but a separate GC safe point is required.
GTF_CALL_M_EXP_RUNTIME_LOOKUP = 0x02000000, // this call needs to be tranformed into CFG for the dynamic dictionary expansion feature.
GTF_CALL_M_STRESS_TAILCALL = 0x04000000, // the call is NOT "tail" prefixed but GTF_CALL_M_EXPLICIT_TAILCALL was added because of tail call stress mode
GTF_CALL_M_EXPANDED_EARLY = 0x08000000, // the Virtual Call target address is expanded and placed in gtControlExpr in Morph rather than in Lower
GTF_CALL_M_LATE_DEVIRT = 0x10000000, // this call has late devirtualzation info
};
inline constexpr GenTreeCallFlags operator ~(GenTreeCallFlags a)
{
return (GenTreeCallFlags)(~(unsigned int)a);
}
inline constexpr GenTreeCallFlags operator |(GenTreeCallFlags a, GenTreeCallFlags b)
{
return (GenTreeCallFlags)((unsigned int)a | (unsigned int)b);
}
inline constexpr GenTreeCallFlags operator &(GenTreeCallFlags a, GenTreeCallFlags b)
{
return (GenTreeCallFlags)((unsigned int)a & (unsigned int)b);
}
inline GenTreeCallFlags& operator |=(GenTreeCallFlags& a, GenTreeCallFlags b)
{
return a = (GenTreeCallFlags)((unsigned int)a | (unsigned int)b);
}
inline GenTreeCallFlags& operator &=(GenTreeCallFlags& a, GenTreeCallFlags b)
{
return a = (GenTreeCallFlags)((unsigned int)a & (unsigned int)b);
}
// clang-format on
// Return type descriptor of a GT_CALL node.
// x64 Unix, Arm64, Arm32 and x86 allow a value to be returned in multiple
// registers. For such calls this struct provides the following info
// on their return type
// - type of value returned in each return register
// - ABI return register numbers in which the value is returned
// - count of return registers in which the value is returned
//
// TODO-ARM: Update this to meet the needs of Arm64 and Arm32
//
// TODO-AllArch: Right now it is used for describing multi-reg returned types.
// Eventually we would want to use it for describing even single-reg
// returned types (e.g. structs returned in single register x64/arm).
// This would allow us not to lie or normalize single struct return
// values in importer/morph.
struct ReturnTypeDesc
{
private:
var_types m_regType[MAX_RET_REG_COUNT];
bool m_isEnclosingType;
#ifdef DEBUG
bool m_inited;
#endif
public:
ReturnTypeDesc()
{
Reset();
}
// Initialize the Return Type Descriptor for a method that returns a struct type
void InitializeStructReturnType(Compiler* comp, CORINFO_CLASS_HANDLE retClsHnd, CorInfoCallConvExtension callConv);
// Initialize the Return Type Descriptor for a method that returns a TYP_LONG
// Only needed for X86 and arm32.
void InitializeLongReturnType();
// Reset type descriptor to defaults
void Reset()
{
for (unsigned i = 0; i < MAX_RET_REG_COUNT; ++i)
{
m_regType[i] = TYP_UNKNOWN;
}
m_isEnclosingType = false;
#ifdef DEBUG
m_inited = false;
#endif
}
#ifdef DEBUG
// NOTE: we only use this function when writing out IR dumps. These dumps may take place before the ReturnTypeDesc
// has been initialized.
unsigned TryGetReturnRegCount() const
{
return m_inited ? GetReturnRegCount() : 0;
}
#endif // DEBUG
//--------------------------------------------------------------------------------------------
// GetReturnRegCount: Get the count of return registers in which the return value is returned.
//
// Arguments:
// None
//
// Return Value:
// Count of return registers.
// Returns 0 if the return type is not returned in registers.
unsigned GetReturnRegCount() const
{
assert(m_inited);
int regCount = 0;
for (unsigned i = 0; i < MAX_RET_REG_COUNT; ++i)
{
if (m_regType[i] == TYP_UNKNOWN)
{
break;
}
// otherwise
regCount++;
}
#ifdef DEBUG
// Any remaining elements in m_regTypes[] should also be TYP_UNKNOWN
for (unsigned i = regCount + 1; i < MAX_RET_REG_COUNT; ++i)
{
assert(m_regType[i] == TYP_UNKNOWN);
}
#endif
return regCount;
}
//-----------------------------------------------------------------------
// IsMultiRegRetType: check whether the type is returned in multiple
// return registers.
//
// Arguments:
// None
//
// Return Value:
// Returns true if the type is returned in multiple return registers.
// False otherwise.
// Note that we only have to examine the first two values to determine this
//
bool IsMultiRegRetType() const
{
if (MAX_RET_REG_COUNT < 2)
{
return false;
}
else
{
assert(m_inited);
return ((m_regType[0] != TYP_UNKNOWN) && (m_regType[1] != TYP_UNKNOWN));
}
}
//--------------------------------------------------------------------------
// GetReturnRegType: Get var_type of the return register specified by index.
//
// Arguments:
// index - Index of the return register.
// First return register will have an index 0 and so on.
//
// Return Value:
// var_type of the return register specified by its index.
// asserts if the index does not have a valid register return type.
var_types GetReturnRegType(unsigned index) const
{
var_types result = m_regType[index];
assert(result != TYP_UNKNOWN);
return result;
}
// True if this value is returned in integer register
// that is larger than the type itself.
bool IsEnclosingType() const
{
return m_isEnclosingType;
}
// Get i'th ABI return register
regNumber GetABIReturnReg(unsigned idx) const;
// Get reg mask of ABI return registers
regMaskTP GetABIReturnRegs() const;
};
class TailCallSiteInfo
{
bool m_isCallvirt : 1;
bool m_isCalli : 1;
CORINFO_SIG_INFO m_sig;
CORINFO_RESOLVED_TOKEN m_token;
public:
// Is the tailcall a callvirt instruction?
bool IsCallvirt()
{
return m_isCallvirt;
}
// Is the tailcall a calli instruction?
bool IsCalli()
{
return m_isCalli;
}
// Get the token of the callee
CORINFO_RESOLVED_TOKEN* GetToken()
{
assert(!IsCalli());
return &m_token;
}
// Get the signature of the callee
CORINFO_SIG_INFO* GetSig()
{
return &m_sig;
}
// Mark the tailcall as a calli with the given signature
void SetCalli(CORINFO_SIG_INFO* sig)
{
m_isCallvirt = false;
m_isCalli = true;
m_sig = *sig;
}
// Mark the tailcall as a callvirt with the given signature and token
void SetCallvirt(CORINFO_SIG_INFO* sig, CORINFO_RESOLVED_TOKEN* token)
{
m_isCallvirt = true;
m_isCalli = false;
m_sig = *sig;
m_token = *token;
}
// Mark the tailcall as a call with the given signature and token
void SetCall(CORINFO_SIG_INFO* sig, CORINFO_RESOLVED_TOKEN* token)
{
m_isCallvirt = false;
m_isCalli = false;
m_sig = *sig;
m_token = *token;
}
};
class fgArgInfo;
enum class NonStandardArgKind : unsigned
{
None,
PInvokeFrame,
PInvokeTarget,
PInvokeCookie,
WrapperDelegateCell,
ShiftLow,
ShiftHigh,
FixedRetBuffer,
VirtualStubCell,
R2RIndirectionCell,
ValidateIndirectCallTarget,
// If changing this enum also change getNonStandardArgKindName and isNonStandardArgAddedLate in fgArgInfo
};
#ifdef DEBUG
const char* getNonStandardArgKindName(NonStandardArgKind kind);
#endif
enum class CFGCallKind
{
ValidateAndCall,
Dispatch,
};
struct GenTreeCall final : public GenTree
{
class Use
{
GenTree* m_node;
Use* m_next;
public:
Use(GenTree* node, Use* next = nullptr) : m_node(node), m_next(next)
{
assert(node != nullptr);
}
GenTree*& NodeRef()
{
return m_node;
}
GenTree* GetNode() const
{
assert(m_node != nullptr);
return m_node;
}
void SetNode(GenTree* node)
{
assert(node != nullptr);
m_node = node;
}
Use*& NextRef()
{
return m_next;
}
Use* GetNext() const
{
return m_next;
}
void SetNext(Use* next)
{
m_next = next;
}
};
class UseIterator
{
Use* m_use;
public:
UseIterator(Use* use) : m_use(use)
{
}
Use& operator*() const
{
return *m_use;
}
Use* operator->() const
{
return m_use;
}
Use* GetUse() const
{
return m_use;
}
UseIterator& operator++()
{
m_use = m_use->GetNext();
return *this;
}
bool operator==(const UseIterator& i) const
{
return m_use == i.m_use;
}
bool operator!=(const UseIterator& i) const
{
return m_use != i.m_use;
}
};
class UseList
{
Use* m_uses;
public:
UseList(Use* uses) : m_uses(uses)
{
}
UseIterator begin() const
{
return UseIterator(m_uses);
}
UseIterator end() const
{
return UseIterator(nullptr);
}
};
Use* gtCallThisArg; // The instance argument ('this' pointer)
Use* gtCallArgs; // The list of arguments in original evaluation order
Use* gtCallLateArgs; // On x86: The register arguments in an optimal order
// On ARM/x64: - also includes any outgoing arg space arguments
// - that were evaluated into a temp LclVar
fgArgInfo* fgArgInfo;
UseList Args()
{
return UseList(gtCallArgs);
}
UseList LateArgs()
{
return UseList(gtCallLateArgs);
}
#ifdef DEBUG
// Used to register callsites with the EE
CORINFO_SIG_INFO* callSig;
#endif
union {
TailCallSiteInfo* tailCallInfo;
// Only used for unmanaged calls, which cannot be tail-called
CorInfoCallConvExtension unmgdCallConv;
};
#if FEATURE_MULTIREG_RET
// State required to support multi-reg returning call nodes.
//
// TODO-AllArch: enable for all call nodes to unify single-reg and multi-reg returns.
ReturnTypeDesc gtReturnTypeDesc;
// GetRegNum() would always be the first return reg.
// The following array holds the other reg numbers of multi-reg return.
regNumberSmall gtOtherRegs[MAX_RET_REG_COUNT - 1];
MultiRegSpillFlags gtSpillFlags;
#endif // FEATURE_MULTIREG_RET
//-----------------------------------------------------------------------
// GetReturnTypeDesc: get the type descriptor of return value of the call
//
// Arguments:
// None
//
// Returns
// Type descriptor of the value returned by call
//
// TODO-AllArch: enable for all call nodes to unify single-reg and multi-reg returns.
const ReturnTypeDesc* GetReturnTypeDesc() const
{
#if FEATURE_MULTIREG_RET
return >ReturnTypeDesc;
#else
return nullptr;
#endif
}
void InitializeLongReturnType()
{
#if FEATURE_MULTIREG_RET
gtReturnTypeDesc.InitializeLongReturnType();
#endif
}
void InitializeStructReturnType(Compiler* comp, CORINFO_CLASS_HANDLE retClsHnd, CorInfoCallConvExtension callConv)
{
#if FEATURE_MULTIREG_RET
gtReturnTypeDesc.InitializeStructReturnType(comp, retClsHnd, callConv);
#endif
}
void ResetReturnType()
{
#if FEATURE_MULTIREG_RET
gtReturnTypeDesc.Reset();
#endif
}
//---------------------------------------------------------------------------
// GetRegNumByIdx: get i'th return register allocated to this call node.
//
// Arguments:
// idx - index of the return register
//
// Return Value:
// Return regNumber of i'th return register of call node.
// Returns REG_NA if there is no valid return register for the given index.
//
regNumber GetRegNumByIdx(unsigned idx) const
{
assert(idx < MAX_RET_REG_COUNT);
if (idx == 0)
{
return GetRegNum();
}
#if FEATURE_MULTIREG_RET
return (regNumber)gtOtherRegs[idx - 1];
#else
return REG_NA;
#endif
}
//----------------------------------------------------------------------
// SetRegNumByIdx: set i'th return register of this call node
//
// Arguments:
// reg - reg number
// idx - index of the return register
//
// Return Value:
// None
//
void SetRegNumByIdx(regNumber reg, unsigned idx)
{
assert(idx < MAX_RET_REG_COUNT);
if (idx == 0)
{
SetRegNum(reg);
}
#if FEATURE_MULTIREG_RET
else
{
gtOtherRegs[idx - 1] = (regNumberSmall)reg;
assert(gtOtherRegs[idx - 1] == reg);
}
#else
unreached();
#endif
}
//----------------------------------------------------------------------------
// ClearOtherRegs: clear multi-reg state to indicate no regs are allocated
//
// Arguments:
// None
//
// Return Value:
// None
//
void ClearOtherRegs()
{
#if FEATURE_MULTIREG_RET
for (unsigned i = 0; i < MAX_RET_REG_COUNT - 1; ++i)
{
gtOtherRegs[i] = REG_NA;
}
#endif
}
//----------------------------------------------------------------------------
// CopyOtherRegs: copy multi-reg state from the given call node to this node
//
// Arguments:
// fromCall - GenTreeCall node from which to copy multi-reg state
//
// Return Value:
// None
//
void CopyOtherRegs(GenTreeCall* fromCall)
{
#if FEATURE_MULTIREG_RET
for (unsigned i = 0; i < MAX_RET_REG_COUNT - 1; ++i)
{
this->gtOtherRegs[i] = fromCall->gtOtherRegs[i];
}
#endif
}
// Get reg mask of all the valid registers of gtOtherRegs array
regMaskTP GetOtherRegMask() const;
GenTreeFlags GetRegSpillFlagByIdx(unsigned idx) const
{
#if FEATURE_MULTIREG_RET
return GetMultiRegSpillFlagsByIdx(gtSpillFlags, idx);
#else
assert(!"unreached");
return GTF_EMPTY;
#endif
}
void SetRegSpillFlagByIdx(GenTreeFlags flags, unsigned idx)
{
#if FEATURE_MULTIREG_RET
gtSpillFlags = SetMultiRegSpillFlagsByIdx(gtSpillFlags, flags, idx);
#endif
}
//-------------------------------------------------------------------
// clearOtherRegFlags: clear GTF_* flags associated with gtOtherRegs
//
// Arguments:
// None
//
// Return Value:
// None
void ClearOtherRegFlags()
{
#if FEATURE_MULTIREG_RET
gtSpillFlags = 0;
#endif
}
//-------------------------------------------------------------------------
// CopyOtherRegFlags: copy GTF_* flags associated with gtOtherRegs from
// the given call node.
//
// Arguments:
// fromCall - GenTreeCall node from which to copy
//
// Return Value:
// None
//
void CopyOtherRegFlags(GenTreeCall* fromCall)
{
#if FEATURE_MULTIREG_RET
this->gtSpillFlags = fromCall->gtSpillFlags;
#endif
}
bool IsUnmanaged() const
{
return (gtFlags & GTF_CALL_UNMANAGED) != 0;
}
bool NeedsNullCheck() const
{
return (gtFlags & GTF_CALL_NULLCHECK) != 0;
}
bool CallerPop() const
{
return (gtFlags & GTF_CALL_POP_ARGS) != 0;
}
bool IsVirtual() const
{
return (gtFlags & GTF_CALL_VIRT_KIND_MASK) != GTF_CALL_NONVIRT;
}
bool IsVirtualStub() const
{
return (gtFlags & GTF_CALL_VIRT_KIND_MASK) == GTF_CALL_VIRT_STUB;
}
bool IsVirtualVtable() const
{
return (gtFlags & GTF_CALL_VIRT_KIND_MASK) == GTF_CALL_VIRT_VTABLE;
}
bool IsInlineCandidate() const
{
return (gtFlags & GTF_CALL_INLINE_CANDIDATE) != 0;
}
bool IsR2ROrVirtualStubRelativeIndir()
{
#if defined(FEATURE_READYTORUN)
if (IsR2RRelativeIndir())
{
return true;
}
#endif
return IsVirtualStubRelativeIndir();
}
bool HasNonStandardAddedArgs(Compiler* compiler) const;
int GetNonStandardAddedArgCount(Compiler* compiler) const;
// Returns true if this call uses a retBuf argument and its calling convention
bool HasRetBufArg() const
{
return (gtCallMoreFlags & GTF_CALL_M_RETBUFFARG) != 0;
}
//-------------------------------------------------------------------------
// TreatAsHasRetBufArg:
//
// Arguments:
// compiler, the compiler instance so that we can call eeGetHelperNum
//
// Return Value:
// Returns true if we treat the call as if it has a retBuf argument
// This method may actually have a retBuf argument
// or it could be a JIT helper that we are still transforming during
// the importer phase.
//
// Notes:
// On ARM64 marking the method with the GTF_CALL_M_RETBUFFARG flag
// will make HasRetBufArg() return true, but will also force the
// use of register x8 to pass the RetBuf argument.
//
bool TreatAsHasRetBufArg(Compiler* compiler) const;
bool HasFixedRetBufArg() const
{
if (!(hasFixedRetBuffReg() && HasRetBufArg()))
{
return false;
}
#if !defined(TARGET_ARM)
return !TargetOS::IsWindows || !callConvIsInstanceMethodCallConv(GetUnmanagedCallConv());
#else
return true;
#endif
}
//-----------------------------------------------------------------------------------------
// HasMultiRegRetVal: whether the call node returns its value in multiple return registers.
//
// Arguments:
// None
//
// Return Value:
// True if the call is returning a multi-reg return value. False otherwise.
//
bool HasMultiRegRetVal() const
{
#ifdef FEATURE_MULTIREG_RET
#if defined(TARGET_X86) || defined(TARGET_ARM)
if (varTypeIsLong(gtType))
{
return true;
}
#endif
if (!varTypeIsStruct(gtType) || HasRetBufArg())
{
return false;
}
// Now it is a struct that is returned in registers.
return GetReturnTypeDesc()->IsMultiRegRetType();
#else // !FEATURE_MULTIREG_RET
return false;
#endif // !FEATURE_MULTIREG_RET
}
// Returns true if VM has flagged this method as CORINFO_FLG_PINVOKE.
bool IsPInvoke() const
{
return (gtCallMoreFlags & GTF_CALL_M_PINVOKE) != 0;
}
// Note that the distinction of whether tail prefixed or an implicit tail call
// is maintained on a call node till fgMorphCall() after which it will be
// either a tail call (i.e. IsTailCall() is true) or a non-tail call.
bool IsTailPrefixedCall() const
{
return (gtCallMoreFlags & GTF_CALL_M_EXPLICIT_TAILCALL) != 0;
}
// Returns true if this call didn't have an explicit tail. prefix in the IL
// but was marked as an explicit tail call because of tail call stress mode.
bool IsStressTailCall() const
{
return (gtCallMoreFlags & GTF_CALL_M_STRESS_TAILCALL) != 0;
}
// This method returning "true" implies that tail call flowgraph morhphing has
// performed final checks and committed to making a tail call.
bool IsTailCall() const
{
return (gtCallMoreFlags & GTF_CALL_M_TAILCALL) != 0;
}
// This method returning "true" implies that importer has performed tail call checks
// and providing a hint that this can be converted to a tail call.
bool CanTailCall() const
{
return IsTailPrefixedCall() || IsImplicitTailCall();
}
// Check whether this is a tailcall dispatched via JIT helper. We only use
// this mechanism on x86 as it is faster than our other more general
// tailcall mechanism.
bool IsTailCallViaJitHelper() const
{
#ifdef TARGET_X86
return IsTailCall() && (gtCallMoreFlags & GTF_CALL_M_TAILCALL_VIA_JIT_HELPER);
#else
return false;
#endif
}
#if FEATURE_FASTTAILCALL
bool IsFastTailCall() const
{
#ifdef TARGET_X86
return IsTailCall() && !(gtCallMoreFlags & GTF_CALL_M_TAILCALL_VIA_JIT_HELPER);
#else
return IsTailCall();
#endif
}
#else // !FEATURE_FASTTAILCALL
bool IsFastTailCall() const
{
return false;
}
#endif // !FEATURE_FASTTAILCALL
#if FEATURE_TAILCALL_OPT
// Returns true if this is marked for opportunistic tail calling.
// That is, can be tail called though not explicitly prefixed with "tail" prefix.
bool IsImplicitTailCall() const
{
return (gtCallMoreFlags & GTF_CALL_M_IMPLICIT_TAILCALL) != 0;
}
bool IsTailCallConvertibleToLoop() const
{
return (gtCallMoreFlags & GTF_CALL_M_TAILCALL_TO_LOOP) != 0;
}
#else // !FEATURE_TAILCALL_OPT
bool IsImplicitTailCall() const
{
return false;
}
bool IsTailCallConvertibleToLoop() const
{
return false;
}
#endif // !FEATURE_TAILCALL_OPT
bool NormalizesSmallTypesOnReturn()
{
return GetUnmanagedCallConv() == CorInfoCallConvExtension::Managed;
}
bool IsSameThis() const
{
return (gtCallMoreFlags & GTF_CALL_M_NONVIRT_SAME_THIS) != 0;
}
bool IsDelegateInvoke() const
{
return (gtCallMoreFlags & GTF_CALL_M_DELEGATE_INV) != 0;
}
bool IsVirtualStubRelativeIndir() const
{
return IsVirtualStub() && (gtCallMoreFlags & GTF_CALL_M_VIRTSTUB_REL_INDIRECT) != 0;
}
bool IsR2RRelativeIndir() const
{
#ifdef FEATURE_READYTORUN
return (gtCallMoreFlags & GTF_CALL_M_R2R_REL_INDIRECT) != 0;
#else
return false;
#endif
}
#ifdef FEATURE_READYTORUN
void setEntryPoint(const CORINFO_CONST_LOOKUP& entryPoint)
{
gtEntryPoint = entryPoint;
if (gtEntryPoint.accessType == IAT_PVALUE)
{
gtCallMoreFlags |= GTF_CALL_M_R2R_REL_INDIRECT;
}
}
#endif // FEATURE_READYTORUN
bool IsVarargs() const
{
return (gtCallMoreFlags & GTF_CALL_M_VARARGS) != 0;
}
bool IsNoReturn() const
{
return (gtCallMoreFlags & GTF_CALL_M_DOES_NOT_RETURN) != 0;
}
bool IsFatPointerCandidate() const
{
return (gtCallMoreFlags & GTF_CALL_M_FAT_POINTER_CHECK) != 0;
}
bool IsGuardedDevirtualizationCandidate() const
{
return (gtCallMoreFlags & GTF_CALL_M_GUARDED_DEVIRT) != 0;
}
bool IsPure(Compiler* compiler) const;
bool HasSideEffects(Compiler* compiler, bool ignoreExceptions = false, bool ignoreCctors = false) const;
void ClearFatPointerCandidate()
{
gtCallMoreFlags &= ~GTF_CALL_M_FAT_POINTER_CHECK;
}
void SetFatPointerCandidate()
{
gtCallMoreFlags |= GTF_CALL_M_FAT_POINTER_CHECK;
}
bool IsDevirtualized() const
{
return (gtCallMoreFlags & GTF_CALL_M_DEVIRTUALIZED) != 0;
}
bool IsGuarded() const
{
return (gtCallMoreFlags & GTF_CALL_M_GUARDED) != 0;
}
bool IsUnboxed() const
{
return (gtCallMoreFlags & GTF_CALL_M_UNBOXED) != 0;
}
bool IsSuppressGCTransition() const
{
return (gtCallMoreFlags & GTF_CALL_M_SUPPRESS_GC_TRANSITION) != 0;
}
void ClearGuardedDevirtualizationCandidate()
{
gtCallMoreFlags &= ~GTF_CALL_M_GUARDED_DEVIRT;
}
void SetGuardedDevirtualizationCandidate()
{
gtCallMoreFlags |= GTF_CALL_M_GUARDED_DEVIRT;
}
void SetIsGuarded()
{
gtCallMoreFlags |= GTF_CALL_M_GUARDED;
}
void SetExpRuntimeLookup()
{
gtCallMoreFlags |= GTF_CALL_M_EXP_RUNTIME_LOOKUP;
}
void ClearExpRuntimeLookup()
{
gtCallMoreFlags &= ~GTF_CALL_M_EXP_RUNTIME_LOOKUP;
}
bool IsExpRuntimeLookup() const
{
return (gtCallMoreFlags & GTF_CALL_M_EXP_RUNTIME_LOOKUP) != 0;
}
void SetExpandedEarly()
{
gtCallMoreFlags |= GTF_CALL_M_EXPANDED_EARLY;
}
void ClearExpandedEarly()
{
gtCallMoreFlags &= ~GTF_CALL_M_EXPANDED_EARLY;
}
bool IsExpandedEarly() const
{
return (gtCallMoreFlags & GTF_CALL_M_EXPANDED_EARLY) != 0;
}
//-----------------------------------------------------------------------------------------
// GetIndirectionCellArgKind: Get the kind of indirection cell used by this call.
//
// Arguments:
// None
//
// Return Value:
// The kind (either R2RIndirectionCell or VirtualStubCell),
// or NonStandardArgKind::None if this call does not have an indirection cell.
//
NonStandardArgKind GetIndirectionCellArgKind() const
{
if (IsVirtualStub())
{
return NonStandardArgKind::VirtualStubCell;
}
#if defined(TARGET_ARMARCH)
// For ARM architectures, we always use an indirection cell for R2R calls.
if (IsR2RRelativeIndir())
{
return NonStandardArgKind::R2RIndirectionCell;
}
#elif defined(TARGET_XARCH)
// On XARCH we disassemble it from callsite except for tailcalls that need indirection cell.
if (IsR2RRelativeIndir() && IsFastTailCall())
{
return NonStandardArgKind::R2RIndirectionCell;
}
#endif
return NonStandardArgKind::None;
}
CFGCallKind GetCFGCallKind()
{
#if defined(TARGET_AMD64)
// On x64 the dispatcher is more performant, but we cannot use it when
// we need to pass indirection cells as those go into registers that
// are clobbered by the dispatch helper.
bool mayUseDispatcher = GetIndirectionCellArgKind() == NonStandardArgKind::None;
bool shouldUseDispatcher = true;
#elif defined(TARGET_ARM64)
bool mayUseDispatcher = true;
// Branch predictors on ARM64 generally do not handle the dispatcher as
// well as on x64 hardware, so only use the validator by default.
bool shouldUseDispatcher = false;
#else
// Other platforms do not even support the dispatcher.
bool mayUseDispatcher = false;
bool shouldUseDispatcher = false;
#endif
#ifdef DEBUG
switch (JitConfig.JitCFGUseDispatcher())
{
case 0:
shouldUseDispatcher = false;
break;
case 1:
shouldUseDispatcher = true;
break;
default:
break;
}
#endif
return mayUseDispatcher && shouldUseDispatcher ? CFGCallKind::Dispatch : CFGCallKind::ValidateAndCall;
}
void ResetArgInfo();
GenTreeCallFlags gtCallMoreFlags; // in addition to gtFlags
gtCallTypes gtCallType : 3; // value from the gtCallTypes enumeration
var_types gtReturnType : 5; // exact return type
CORINFO_CLASS_HANDLE gtRetClsHnd; // The return type handle of the call if it is a struct; always available
void* gtStubCallStubAddr; // GTF_CALL_VIRT_STUB - these are never inlined
union {
// only used for CALLI unmanaged calls (CT_INDIRECT)
GenTree* gtCallCookie;
// gtInlineCandidateInfo is only used when inlining methods
InlineCandidateInfo* gtInlineCandidateInfo;
GuardedDevirtualizationCandidateInfo* gtGuardedDevirtualizationCandidateInfo;
ClassProfileCandidateInfo* gtClassProfileCandidateInfo;
LateDevirtualizationInfo* gtLateDevirtualizationInfo;
CORINFO_GENERIC_HANDLE compileTimeHelperArgumentHandle; // Used to track type handle argument of dynamic helpers
void* gtDirectCallAddress; // Used to pass direct call address between lower and codegen
};
// expression evaluated after args are placed which determines the control target
GenTree* gtControlExpr;
union {
CORINFO_METHOD_HANDLE gtCallMethHnd; // CT_USER_FUNC or CT_HELPER
GenTree* gtCallAddr; // CT_INDIRECT
};
#ifdef FEATURE_READYTORUN
// Call target lookup info for method call from a Ready To Run module
CORINFO_CONST_LOOKUP gtEntryPoint;
#endif
#if defined(DEBUG) || defined(INLINE_DATA)
// For non-inline candidates, track the first observation
// that blocks candidacy.
InlineObservation gtInlineObservation;
// IL offset of the call wrt its parent method.
IL_OFFSET gtRawILOffset;
// In DEBUG we report even non inline candidates in the inline tree in
// fgNoteNonInlineCandidate. We need to keep around the inline context for
// this as normally it's part of the candidate info.
class InlineContext* gtInlineContext;
#endif // defined(DEBUG) || defined(INLINE_DATA)
bool IsHelperCall() const
{
return gtCallType == CT_HELPER;
}
bool IsHelperCall(CORINFO_METHOD_HANDLE callMethHnd) const
{
return IsHelperCall() && (callMethHnd == gtCallMethHnd);
}
bool IsHelperCall(Compiler* compiler, unsigned helper) const;
void ReplaceCallOperand(GenTree** operandUseEdge, GenTree* replacement);
bool AreArgsComplete() const;
CorInfoCallConvExtension GetUnmanagedCallConv() const
{
return IsUnmanaged() ? unmgdCallConv : CorInfoCallConvExtension::Managed;
}
static bool Equals(GenTreeCall* c1, GenTreeCall* c2);
GenTreeCall(var_types type) : GenTree(GT_CALL, type)
{
fgArgInfo = nullptr;
gtRetBufArg = nullptr;
}
#if DEBUGGABLE_GENTREE
GenTreeCall() : GenTree()
{
}
#endif
GenTree* GetLclRetBufArgNode() const
{
if (gtRetBufArg == nullptr)
{
return nullptr;
}
assert(HasRetBufArg());
GenTree* lclRetBufArgNode = gtRetBufArg->GetNode();
switch (lclRetBufArgNode->OperGet())
{
// Get the true value from setup args
case GT_ASG:
return lclRetBufArgNode->AsOp()->gtGetOp2();
case GT_STORE_LCL_VAR:
return lclRetBufArgNode->AsUnOp()->gtGetOp1();
// Get the value from putarg wrapper nodes
case GT_PUTARG_REG:
case GT_PUTARG_STK:
return lclRetBufArgNode->AsOp()->gtGetOp1();
// Otherwise the node should be in the Use*
default:
return lclRetBufArgNode;
}
}
void SetLclRetBufArg(Use* retBufArg);
Use* gtRetBufArg; // The argument that holds return buffer argument
};
struct GenTreeCmpXchg : public GenTree
{
GenTree* gtOpLocation;
GenTree* gtOpValue;
GenTree* gtOpComparand;
GenTreeCmpXchg(var_types type, GenTree* loc, GenTree* val, GenTree* comparand)
: GenTree(GT_CMPXCHG, type), gtOpLocation(loc), gtOpValue(val), gtOpComparand(comparand)
{
// There's no reason to do a compare-exchange on a local location, so we'll assume that all of these
// have global effects.
gtFlags |= (GTF_GLOB_REF | GTF_ASG);
// Merge in flags from operands
gtFlags |= gtOpLocation->gtFlags & GTF_ALL_EFFECT;
gtFlags |= gtOpValue->gtFlags & GTF_ALL_EFFECT;
gtFlags |= gtOpComparand->gtFlags & GTF_ALL_EFFECT;
}
#if DEBUGGABLE_GENTREE
GenTreeCmpXchg() : GenTree()
{
}
#endif
};
#if !defined(TARGET_64BIT)
struct GenTreeMultiRegOp : public GenTreeOp
{
regNumber gtOtherReg;
// GTF_SPILL or GTF_SPILLED flag on a multi-reg node indicates that one or
// more of its result regs are in that state. The spill flag of each of the
// return register is stored here. We only need 2 bits per returned register,
// so this is treated as a 2-bit array. No architecture needs more than 8 bits.
MultiRegSpillFlags gtSpillFlags;
GenTreeMultiRegOp(genTreeOps oper, var_types type, GenTree* op1, GenTree* op2)
: GenTreeOp(oper, type, op1, op2), gtOtherReg(REG_NA)
{
ClearOtherRegFlags();
}
unsigned GetRegCount() const
{
return (TypeGet() == TYP_LONG) ? 2 : 1;
}
//---------------------------------------------------------------------------
// GetRegNumByIdx: get i'th register allocated to this struct argument.
//
// Arguments:
// idx - index of the register
//
// Return Value:
// Return regNumber of i'th register of this register argument
//
regNumber GetRegNumByIdx(unsigned idx) const
{
assert(idx < 2);
if (idx == 0)
{
return GetRegNum();
}
return gtOtherReg;
}
GenTreeFlags GetRegSpillFlagByIdx(unsigned idx) const
{
return GetMultiRegSpillFlagsByIdx(gtSpillFlags, idx);
}
void SetRegSpillFlagByIdx(GenTreeFlags flags, unsigned idx)
{
#if FEATURE_MULTIREG_RET
gtSpillFlags = SetMultiRegSpillFlagsByIdx(gtSpillFlags, flags, idx);
#endif
}
//--------------------------------------------------------------------------
// GetRegType: Get var_type of the register specified by index.
//
// Arguments:
// index - Index of the register.
// First register will have an index 0 and so on.
//
// Return Value:
// var_type of the register specified by its index.
//
var_types GetRegType(unsigned index) const
{
assert(index < 2);
// The type of register is usually the same as GenTree type, since GenTreeMultiRegOp usually defines a single
// reg.
// The special case is when we have TYP_LONG, which may be a MUL_LONG, or a DOUBLE arg passed as LONG,
// in which case we need to separate them into int for each index.
var_types result = TypeGet();
if (result == TYP_LONG)
{
result = TYP_INT;
}
return result;
}
//-------------------------------------------------------------------
// clearOtherRegFlags: clear GTF_* flags associated with gtOtherRegs
//
// Arguments:
// None
//
// Return Value:
// None
//
void ClearOtherRegFlags()
{
gtSpillFlags = 0;
}
#if DEBUGGABLE_GENTREE
GenTreeMultiRegOp() : GenTreeOp()
{
}
#endif
};
#endif // !defined(TARGET_64BIT)
struct GenTreeFptrVal : public GenTree
{
CORINFO_METHOD_HANDLE gtFptrMethod;
bool gtFptrDelegateTarget;
#ifdef FEATURE_READYTORUN
CORINFO_CONST_LOOKUP gtEntryPoint;
#endif
GenTreeFptrVal(var_types type, CORINFO_METHOD_HANDLE meth)
: GenTree(GT_FTN_ADDR, type), gtFptrMethod(meth), gtFptrDelegateTarget(false)
{
#ifdef FEATURE_READYTORUN
gtEntryPoint.addr = nullptr;
gtEntryPoint.accessType = IAT_VALUE;
#endif
}
#if DEBUGGABLE_GENTREE
GenTreeFptrVal() : GenTree()
{
}
#endif
};
/* gtQmark */
struct GenTreeQmark : public GenTreeOp
{
GenTreeQmark(var_types type, GenTree* cond, GenTreeColon* colon) : GenTreeOp(GT_QMARK, type, cond, colon)
{
// These must follow a specific form.
assert((cond != nullptr) && cond->TypeIs(TYP_INT));
assert((colon != nullptr) && colon->OperIs(GT_COLON));
}
#if DEBUGGABLE_GENTREE
GenTreeQmark() : GenTreeOp()
{
}
#endif
};
/* gtIntrinsic -- intrinsic (possibly-binary op [NULL op2 is allowed] with an additional field) */
struct GenTreeIntrinsic : public GenTreeOp
{
NamedIntrinsic gtIntrinsicName;
CORINFO_METHOD_HANDLE gtMethodHandle; // Method handle of the method which is treated as an intrinsic.
#ifdef FEATURE_READYTORUN
// Call target lookup info for method call from a Ready To Run module
CORINFO_CONST_LOOKUP gtEntryPoint;
#endif
GenTreeIntrinsic(var_types type, GenTree* op1, NamedIntrinsic intrinsicName, CORINFO_METHOD_HANDLE methodHandle)
: GenTreeOp(GT_INTRINSIC, type, op1, nullptr), gtIntrinsicName(intrinsicName), gtMethodHandle(methodHandle)
{
assert(intrinsicName != NI_Illegal);
}
GenTreeIntrinsic(
var_types type, GenTree* op1, GenTree* op2, NamedIntrinsic intrinsicName, CORINFO_METHOD_HANDLE methodHandle)
: GenTreeOp(GT_INTRINSIC, type, op1, op2), gtIntrinsicName(intrinsicName), gtMethodHandle(methodHandle)
{
assert(intrinsicName != NI_Illegal);
}
#if DEBUGGABLE_GENTREE
GenTreeIntrinsic() : GenTreeOp()
{
}
#endif
};
// GenTreeMultiOp - a node with a flexible count of operands stored in an array.
// The array can be an inline one, or a dynamic one, or both, with switching
// between them supported. See GenTreeJitIntrinsic for an example of a node
// utilizing GenTreeMultiOp. GTF_REVERSE_OPS is supported for GenTreeMultiOp's
// with two operands.
//
struct GenTreeMultiOp : public GenTree
{
public:
class Iterator
{
protected:
GenTree** m_use;
Iterator(GenTree** use) : m_use(use)
{
}
public:
Iterator& operator++()
{
m_use++;
return *this;
}
bool operator==(const Iterator& other) const
{
return m_use == other.m_use;
}
bool operator!=(const Iterator& other) const
{
return m_use != other.m_use;
}
};
class OperandsIterator final : public Iterator
{
public:
OperandsIterator(GenTree** use) : Iterator(use)
{
}
GenTree* operator*()
{
return *m_use;
}
};
class UseEdgesIterator final : public Iterator
{
public:
UseEdgesIterator(GenTree** use) : Iterator(use)
{
}
GenTree** operator*()
{
return m_use;
}
};
private:
GenTree** m_operands;
protected:
template <unsigned InlineOperandCount, typename... Operands>
GenTreeMultiOp(genTreeOps oper,
var_types type,
CompAllocator allocator,
GenTree* (&inlineOperands)[InlineOperandCount] DEBUGARG(bool largeNode),
Operands... operands)
: GenTree(oper, type DEBUGARG(largeNode))
{
const size_t OperandCount = sizeof...(Operands);
m_operands = (OperandCount <= InlineOperandCount) ? inlineOperands : allocator.allocate<GenTree*>(OperandCount);
// "OperandCount + 1" so that it works well when OperandCount is 0.
GenTree* operandsArray[OperandCount + 1]{operands...};
InitializeOperands(operandsArray, OperandCount);
}
// Note that this constructor takes the owndership of the "operands" array.
template <unsigned InlineOperandCount>
GenTreeMultiOp(genTreeOps oper,
var_types type,
GenTree** operands,
size_t operandCount,
GenTree* (&inlineOperands)[InlineOperandCount] DEBUGARG(bool largeNode))
: GenTree(oper, type DEBUGARG(largeNode))
{
m_operands = (operandCount <= InlineOperandCount) ? inlineOperands : operands;
InitializeOperands(operands, operandCount);
}
public:
#if DEBUGGABLE_GENTREE
GenTreeMultiOp() : GenTree()
{
}
#endif
GenTree*& Op(size_t index)
{
size_t actualIndex = index - 1;
assert(actualIndex < m_operandCount);
assert(m_operands[actualIndex] != nullptr);
return m_operands[actualIndex];
}
GenTree* Op(size_t index) const
{
return const_cast<GenTreeMultiOp*>(this)->Op(index);
}
// Note that unlike the general "Operands" iterator, this specialized version does not respect GTF_REVERSE_OPS.
IteratorPair<OperandsIterator> Operands()
{
return MakeIteratorPair(OperandsIterator(GetOperandArray()),
OperandsIterator(GetOperandArray() + GetOperandCount()));
}
// Note that unlike the general "UseEdges" iterator, this specialized version does not respect GTF_REVERSE_OPS.
IteratorPair<UseEdgesIterator> UseEdges()
{
return MakeIteratorPair(UseEdgesIterator(GetOperandArray()),
UseEdgesIterator(GetOperandArray() + GetOperandCount()));
}
size_t GetOperandCount() const
{
return m_operandCount;
}
GenTree** GetOperandArray(size_t startIndex = 0) const
{
return m_operands + startIndex;
}
protected:
// Reconfigures the operand array, leaving it in a "dirty" state.
void ResetOperandArray(size_t newOperandCount,
Compiler* compiler,
GenTree** inlineOperands,
size_t inlineOperandCount);
static bool OperandsAreEqual(GenTreeMultiOp* op1, GenTreeMultiOp* op2);
private:
void InitializeOperands(GenTree** operands, size_t operandCount);
void SetOperandCount(size_t newOperandCount)
{
assert(FitsIn<uint8_t>(newOperandCount));
m_operandCount = static_cast<uint8_t>(newOperandCount);
}
};
// Helper class used to implement the constructor of GenTreeJitIntrinsic which
// transfers the ownership of the passed-in array to the underlying MultiOp node.
class IntrinsicNodeBuilder final
{
friend struct GenTreeJitIntrinsic;
GenTree** m_operands;
size_t m_operandCount;
GenTree* m_inlineOperands[2];
public:
IntrinsicNodeBuilder(CompAllocator allocator, size_t operandCount) : m_operandCount(operandCount)
{
m_operands =
(operandCount <= ArrLen(m_inlineOperands)) ? m_inlineOperands : allocator.allocate<GenTree*>(operandCount);
#ifdef DEBUG
for (size_t i = 0; i < operandCount; i++)
{
m_operands[i] = nullptr;
}
#endif // DEBUG
}
IntrinsicNodeBuilder(CompAllocator allocator, GenTreeMultiOp* source) : m_operandCount(source->GetOperandCount())
{
m_operands = (m_operandCount <= ArrLen(m_inlineOperands)) ? m_inlineOperands
: allocator.allocate<GenTree*>(m_operandCount);
for (size_t i = 0; i < m_operandCount; i++)
{
m_operands[i] = source->Op(i + 1);
}
}
void AddOperand(size_t index, GenTree* operand)
{
assert(index < m_operandCount);
assert(m_operands[index] == nullptr);
m_operands[index] = operand;
}
GenTree* GetOperand(size_t index) const
{
assert(index < m_operandCount);
assert(m_operands[index] != nullptr);
return m_operands[index];
}
size_t GetOperandCount() const
{
return m_operandCount;
}
private:
GenTree** GetBuiltOperands()
{
#ifdef DEBUG
for (size_t i = 0; i < m_operandCount; i++)
{
assert(m_operands[i] != nullptr);
}
#endif // DEBUG
return m_operands;
}
};
struct GenTreeJitIntrinsic : public GenTreeMultiOp
{
protected:
GenTree* gtInlineOperands[2];
uint16_t gtLayoutNum;
unsigned char gtAuxiliaryJitType; // For intrinsics than need another type (e.g. Avx2.Gather* or SIMD (by element))
regNumberSmall gtOtherReg; // For intrinsics that return 2 registers
unsigned char gtSimdBaseJitType; // SIMD vector base JIT type
unsigned char gtSimdSize; // SIMD vector size in bytes, use 0 for scalar intrinsics
#if defined(FEATURE_SIMD)
union {
SIMDIntrinsicID gtSIMDIntrinsicID; // operation Id
NamedIntrinsic gtHWIntrinsicId;
};
#else
NamedIntrinsic gtHWIntrinsicId;
#endif
public:
unsigned GetLayoutNum() const
{
return gtLayoutNum;
}
void SetLayoutNum(unsigned layoutNum)
{
assert(FitsIn<uint16_t>(layoutNum));
gtLayoutNum = static_cast<uint16_t>(layoutNum);
}
regNumber GetOtherReg() const
{
return (regNumber)gtOtherReg;
}
void SetOtherReg(regNumber reg)
{
gtOtherReg = (regNumberSmall)reg;
assert(gtOtherReg == reg);
}
CorInfoType GetAuxiliaryJitType() const
{
return (CorInfoType)gtAuxiliaryJitType;
}
void SetAuxiliaryJitType(CorInfoType auxiliaryJitType)
{
gtAuxiliaryJitType = (unsigned char)auxiliaryJitType;
assert(gtAuxiliaryJitType == auxiliaryJitType);
}
var_types GetAuxiliaryType() const;
CorInfoType GetSimdBaseJitType() const
{
return (CorInfoType)gtSimdBaseJitType;
}
CorInfoType GetNormalizedSimdBaseJitType() const
{
CorInfoType simdBaseJitType = GetSimdBaseJitType();
switch (simdBaseJitType)
{
case CORINFO_TYPE_NATIVEINT:
{
#ifdef TARGET_64BIT
return CORINFO_TYPE_LONG;
#else
return CORINFO_TYPE_INT;
#endif
}
case CORINFO_TYPE_NATIVEUINT:
{
#ifdef TARGET_64BIT
return CORINFO_TYPE_ULONG;
#else
return CORINFO_TYPE_UINT;
#endif
}
default:
return simdBaseJitType;
}
}
void SetSimdBaseJitType(CorInfoType simdBaseJitType)
{
gtSimdBaseJitType = (unsigned char)simdBaseJitType;
assert(gtSimdBaseJitType == simdBaseJitType);
}
var_types GetSimdBaseType() const;
unsigned char GetSimdSize() const
{
return gtSimdSize;
}
void SetSimdSize(unsigned simdSize)
{
gtSimdSize = (unsigned char)simdSize;
assert(gtSimdSize == simdSize);
}
template <typename... Operands>
GenTreeJitIntrinsic(genTreeOps oper,
var_types type,
CompAllocator allocator,
CorInfoType simdBaseJitType,
unsigned simdSize,
Operands... operands)
: GenTreeMultiOp(oper, type, allocator, gtInlineOperands DEBUGARG(false), operands...)
, gtLayoutNum(0)
, gtAuxiliaryJitType(CORINFO_TYPE_UNDEF)
, gtOtherReg(REG_NA)
, gtSimdBaseJitType((unsigned char)simdBaseJitType)
, gtSimdSize((unsigned char)simdSize)
, gtHWIntrinsicId(NI_Illegal)
{
assert(gtSimdBaseJitType == simdBaseJitType);
assert(gtSimdSize == simdSize);
}
#if DEBUGGABLE_GENTREE
GenTreeJitIntrinsic() : GenTreeMultiOp()
{
}
#endif
protected:
GenTreeJitIntrinsic(genTreeOps oper,
var_types type,
IntrinsicNodeBuilder&& nodeBuilder,
CorInfoType simdBaseJitType,
unsigned simdSize)
: GenTreeMultiOp(oper,
type,
nodeBuilder.GetBuiltOperands(),
nodeBuilder.GetOperandCount(),
gtInlineOperands DEBUGARG(false))
, gtLayoutNum(0)
, gtAuxiliaryJitType(CORINFO_TYPE_UNDEF)
, gtOtherReg(REG_NA)
, gtSimdBaseJitType((unsigned char)simdBaseJitType)
, gtSimdSize((unsigned char)simdSize)
, gtHWIntrinsicId(NI_Illegal)
{
assert(gtSimdBaseJitType == simdBaseJitType);
assert(gtSimdSize == simdSize);
}
public:
bool isSIMD() const
{
return gtSimdSize != 0;
}
};
#ifdef FEATURE_SIMD
/* gtSIMD -- SIMD intrinsic (possibly-binary op [NULL op2 is allowed] with additional fields) */
struct GenTreeSIMD : public GenTreeJitIntrinsic
{
GenTreeSIMD(var_types type,
IntrinsicNodeBuilder&& nodeBuilder,
SIMDIntrinsicID simdIntrinsicID,
CorInfoType simdBaseJitType,
unsigned simdSize)
: GenTreeJitIntrinsic(GT_SIMD, type, std::move(nodeBuilder), simdBaseJitType, simdSize)
{
gtSIMDIntrinsicID = simdIntrinsicID;
}
GenTreeSIMD(var_types type,
CompAllocator allocator,
GenTree* op1,
SIMDIntrinsicID simdIntrinsicID,
CorInfoType simdBaseJitType,
unsigned simdSize)
: GenTreeJitIntrinsic(GT_SIMD, type, allocator, simdBaseJitType, simdSize, op1)
{
gtSIMDIntrinsicID = simdIntrinsicID;
}
GenTreeSIMD(var_types type,
CompAllocator allocator,
GenTree* op1,
GenTree* op2,
SIMDIntrinsicID simdIntrinsicID,
CorInfoType simdBaseJitType,
unsigned simdSize)
: GenTreeJitIntrinsic(GT_SIMD, type, allocator, simdBaseJitType, simdSize, op1, op2)
{
gtSIMDIntrinsicID = simdIntrinsicID;
}
#if DEBUGGABLE_GENTREE
GenTreeSIMD() : GenTreeJitIntrinsic()
{
}
#endif
bool OperIsMemoryLoad() const; // Returns true for the SIMD Intrinsic instructions that have MemoryLoad semantics,
// false otherwise
SIMDIntrinsicID GetSIMDIntrinsicId() const
{
return gtSIMDIntrinsicID;
}
static bool Equals(GenTreeSIMD* op1, GenTreeSIMD* op2);
};
#endif // FEATURE_SIMD
#ifdef FEATURE_HW_INTRINSICS
struct GenTreeHWIntrinsic : public GenTreeJitIntrinsic
{
GenTreeHWIntrinsic(var_types type,
IntrinsicNodeBuilder&& nodeBuilder,
NamedIntrinsic hwIntrinsicID,
CorInfoType simdBaseJitType,
unsigned simdSize,
bool isSimdAsHWIntrinsic)
: GenTreeJitIntrinsic(GT_HWINTRINSIC, type, std::move(nodeBuilder), simdBaseJitType, simdSize)
{
SetHWIntrinsicId(hwIntrinsicID);
if (OperIsMemoryStore())
{
gtFlags |= (GTF_GLOB_REF | GTF_ASG);
}
if (isSimdAsHWIntrinsic)
{
gtFlags |= GTF_SIMDASHW_OP;
}
}
template <typename... Operands>
GenTreeHWIntrinsic(var_types type,
CompAllocator allocator,
NamedIntrinsic hwIntrinsicID,
CorInfoType simdBaseJitType,
unsigned simdSize,
bool isSimdAsHWIntrinsic,
Operands... operands)
: GenTreeJitIntrinsic(GT_HWINTRINSIC, type, allocator, simdBaseJitType, simdSize, operands...)
{
SetHWIntrinsicId(hwIntrinsicID);
if ((sizeof...(Operands) > 0) && OperIsMemoryStore())
{
gtFlags |= (GTF_GLOB_REF | GTF_ASG);
}
if (isSimdAsHWIntrinsic)
{
gtFlags |= GTF_SIMDASHW_OP;
}
}
#if DEBUGGABLE_GENTREE
GenTreeHWIntrinsic() : GenTreeJitIntrinsic()
{
}
#endif
bool OperIsMemoryLoad() const; // Returns true for the HW Intrinsic instructions that have MemoryLoad semantics,
// false otherwise
bool OperIsMemoryStore() const; // Returns true for the HW Intrinsic instructions that have MemoryStore semantics,
// false otherwise
bool OperIsMemoryLoadOrStore() const; // Returns true for the HW Intrinsic instructions that have MemoryLoad or
// MemoryStore semantics, false otherwise
bool IsSimdAsHWIntrinsic() const
{
return (gtFlags & GTF_SIMDASHW_OP) != 0;
}
unsigned GetResultOpNumForFMA(GenTree* use, GenTree* op1, GenTree* op2, GenTree* op3);
NamedIntrinsic GetHWIntrinsicId() const;
//---------------------------------------------------------------------------------------
// ChangeHWIntrinsicId: Change the intrinsic id for this node.
//
// This method just sets the intrinsic id, asserting that the new intrinsic
// has the same number of operands as the old one, optionally setting some of
// the new operands. Intrinsics with an unknown number of operands are exempt
// from the "do I have the same number of operands" check however, so this method must
// be used with care. Use "ResetHWIntrinsicId" if you need to fully reconfigure
// the node for a different intrinsic, with a possibly different number of operands.
//
// Arguments:
// intrinsicId - the new intrinsic id for the node
// operands - optional operands to set while changing the id
//
// Notes:
// It is the caller's responsibility to update side effect flags.
//
template <typename... Operands>
void ChangeHWIntrinsicId(NamedIntrinsic intrinsicId, Operands... operands)
{
const size_t OperandCount = sizeof...(Operands);
assert(OperandCount <= GetOperandCount());
SetHWIntrinsicId(intrinsicId);
GenTree* operandsArray[OperandCount + 1]{operands...};
GenTree** operandsStore = GetOperandArray();
for (size_t i = 0; i < OperandCount; i++)
{
operandsStore[i] = operandsArray[i];
}
}
//---------------------------------------------------------------------------------------
// ResetHWIntrinsicId: Reset the intrinsic id for this node.
//
// This method resets the intrinsic id, fully reconfiguring the node. It must
// be supplied with all the operands the new node needs, and can allocate a
// new dynamic array if the operands do not fit into in an inline one, in which
// case a compiler argument is used to get the memory allocator.
//
// This method is similar to "ChangeHWIntrinsicId" but is more versatile and
// thus more expensive. Use it when you need to bash to an intrinsic id with
// a different number of operands than what the original node had, or, which
// is equivalent, when you do not know the original number of operands.
//
// Arguments:
// intrinsicId - the new intrinsic id for the node
// compiler - compiler to allocate memory with, can be "nullptr" if the
// number of new operands does not exceed the length of the
// inline array (so, there are 2 or fewer of them)
// operands - *all* operands for the new node
//
// Notes:
// It is the caller's responsibility to update side effect flags.
//
template <typename... Operands>
void ResetHWIntrinsicId(NamedIntrinsic intrinsicId, Compiler* compiler, Operands... operands)
{
const size_t NewOperandCount = sizeof...(Operands);
assert((compiler != nullptr) || (NewOperandCount <= ArrLen(gtInlineOperands)));
ResetOperandArray(NewOperandCount, compiler, gtInlineOperands, ArrLen(gtInlineOperands));
ChangeHWIntrinsicId(intrinsicId, operands...);
}
void ResetHWIntrinsicId(NamedIntrinsic intrinsicId, GenTree* op1, GenTree* op2)
{
ResetHWIntrinsicId(intrinsicId, static_cast<Compiler*>(nullptr), op1, op2);
}
void ResetHWIntrinsicId(NamedIntrinsic intrinsicId, GenTree* op1)
{
ResetHWIntrinsicId(intrinsicId, static_cast<Compiler*>(nullptr), op1);
}
void ResetHWIntrinsicId(NamedIntrinsic intrinsicId)
{
ResetHWIntrinsicId(intrinsicId, static_cast<Compiler*>(nullptr));
}
static bool Equals(GenTreeHWIntrinsic* op1, GenTreeHWIntrinsic* op2);
private:
void SetHWIntrinsicId(NamedIntrinsic intrinsicId);
};
#endif // FEATURE_HW_INTRINSICS
/* gtIndex -- array access */
struct GenTreeIndex : public GenTreeOp
{
GenTree*& Arr()
{
return gtOp1;
}
GenTree*& Index()
{
return gtOp2;
}
unsigned gtIndElemSize; // size of elements in the array
CORINFO_CLASS_HANDLE gtStructElemClass; // If the element type is a struct, this is the struct type.
GenTreeIndex(var_types type, GenTree* arr, GenTree* ind, unsigned indElemSize)
: GenTreeOp(GT_INDEX, type, arr, ind)
, gtIndElemSize(indElemSize)
, gtStructElemClass(nullptr) // We always initialize this after construction.
{
#ifdef DEBUG
if (JitConfig.JitSkipArrayBoundCheck() == 1)
{
// Skip bounds check
}
else
#endif
{
// Do bounds check
gtFlags |= GTF_INX_RNGCHK;
}
gtFlags |= GTF_EXCEPT | GTF_GLOB_REF;
}
#if DEBUGGABLE_GENTREE
GenTreeIndex() : GenTreeOp()
{
}
#endif
};
// gtIndexAddr: given an array object and an index, checks that the index is within the bounds of the array if
// necessary and produces the address of the value at that index of the array.
struct GenTreeIndexAddr : public GenTreeOp
{
GenTree*& Arr()
{
return gtOp1;
}
GenTree*& Index()
{
return gtOp2;
}
CORINFO_CLASS_HANDLE gtStructElemClass; // If the element type is a struct, this is the struct type.
BasicBlock* gtIndRngFailBB; // Basic block to jump to for array-index-out-of-range
var_types gtElemType; // The element type of the array.
unsigned gtElemSize; // size of elements in the array
unsigned gtLenOffset; // The offset from the array's base address to its length.
unsigned gtElemOffset; // The offset from the array's base address to its first element.
GenTreeIndexAddr(GenTree* arr,
GenTree* ind,
var_types elemType,
CORINFO_CLASS_HANDLE structElemClass,
unsigned elemSize,
unsigned lenOffset,
unsigned elemOffset)
: GenTreeOp(GT_INDEX_ADDR, TYP_BYREF, arr, ind)
, gtStructElemClass(structElemClass)
, gtIndRngFailBB(nullptr)
, gtElemType(elemType)
, gtElemSize(elemSize)
, gtLenOffset(lenOffset)
, gtElemOffset(elemOffset)
{
#ifdef DEBUG
if (JitConfig.JitSkipArrayBoundCheck() == 1)
{
// Skip bounds check
}
else
#endif
{
// Do bounds check
gtFlags |= GTF_INX_RNGCHK;
}
gtFlags |= GTF_EXCEPT | GTF_GLOB_REF;
}
#if DEBUGGABLE_GENTREE
GenTreeIndexAddr() : GenTreeOp()
{
}
#endif
};
/* gtArrLen -- array length (GT_ARR_LENGTH)
GT_ARR_LENGTH is used for "arr.length" */
struct GenTreeArrLen : public GenTreeUnOp
{
GenTree*& ArrRef()
{
return gtOp1;
} // the array address node
private:
int gtArrLenOffset; // constant to add to "gtArrRef" to get the address of the array length.
public:
inline int ArrLenOffset()
{
return gtArrLenOffset;
}
GenTreeArrLen(var_types type, GenTree* arrRef, int lenOffset)
: GenTreeUnOp(GT_ARR_LENGTH, type, arrRef), gtArrLenOffset(lenOffset)
{
}
#if DEBUGGABLE_GENTREE
GenTreeArrLen() : GenTreeUnOp()
{
}
#endif
};
// This takes:
// - a length value
// - an index value, and
// - the label to jump to if the index is out of range.
// - the "kind" of the throw block to branch to on failure
// It generates no result.
//
struct GenTreeBoundsChk : public GenTreeOp
{
BasicBlock* gtIndRngFailBB; // Basic block to jump to for index-out-of-range
SpecialCodeKind gtThrowKind; // Kind of throw block to branch to on failure
GenTreeBoundsChk(GenTree* index, GenTree* length, SpecialCodeKind kind)
: GenTreeOp(GT_BOUNDS_CHECK, TYP_VOID, index, length), gtIndRngFailBB(nullptr), gtThrowKind(kind)
{
gtFlags |= GTF_EXCEPT;
}
#if DEBUGGABLE_GENTREE
GenTreeBoundsChk() : GenTreeOp()
{
}
#endif
// If this check is against GT_ARR_LENGTH, returns array reference, else "NULL".
GenTree* GetArray() const
{
return GetArrayLength()->OperIs(GT_ARR_LENGTH) ? GetArrayLength()->AsArrLen()->ArrRef() : nullptr;
}
// The index expression.
GenTree* GetIndex() const
{
return gtOp1;
}
// An expression for the length.
GenTree* GetArrayLength() const
{
return gtOp2;
}
};
// GenTreeArrElem - bounds checked address (byref) of a general array element,
// for multidimensional arrays, or 1-d arrays with non-zero lower bounds.
//
struct GenTreeArrElem : public GenTree
{
GenTree* gtArrObj;
#define GT_ARR_MAX_RANK 3
GenTree* gtArrInds[GT_ARR_MAX_RANK]; // Indices
unsigned char gtArrRank; // Rank of the array
unsigned char gtArrElemSize; // !!! Caution, this is an "unsigned char", it is used only
// on the optimization path of array intrisics.
// It stores the size of array elements WHEN it can fit
// into an "unsigned char".
// This has caused VSW 571394.
var_types gtArrElemType; // The array element type
// Requires that "inds" is a pointer to an array of "rank" nodes for the indices.
GenTreeArrElem(
var_types type, GenTree* arr, unsigned char rank, unsigned char elemSize, var_types elemType, GenTree** inds)
: GenTree(GT_ARR_ELEM, type), gtArrObj(arr), gtArrRank(rank), gtArrElemSize(elemSize), gtArrElemType(elemType)
{
gtFlags |= (arr->gtFlags & GTF_ALL_EFFECT);
for (unsigned char i = 0; i < rank; i++)
{
gtArrInds[i] = inds[i];
gtFlags |= (inds[i]->gtFlags & GTF_ALL_EFFECT);
}
gtFlags |= GTF_EXCEPT;
}
#if DEBUGGABLE_GENTREE
GenTreeArrElem() : GenTree()
{
}
#endif
};
//--------------------------------------------
//
// GenTreeArrIndex (gtArrIndex): Expression to bounds-check the index for one dimension of a
// multi-dimensional or non-zero-based array., and compute the effective index
// (i.e. subtracting the lower bound).
//
// Notes:
// This node is similar in some ways to GenTreeBoundsChk, which ONLY performs the check.
// The reason that this node incorporates the check into the effective index computation is
// to avoid duplicating the codegen, as the effective index is required to compute the
// offset anyway.
// TODO-CQ: Enable optimization of the lower bound and length by replacing this:
// /--* <arrObj>
// +--* <index0>
// +--* ArrIndex[i, ]
// with something like:
// /--* <arrObj>
// /--* ArrLowerBound[i, ]
// | /--* <arrObj>
// +--* ArrLen[i, ] (either generalize GT_ARR_LENGTH or add a new node)
// +--* <index0>
// +--* ArrIndex[i, ]
// Which could, for example, be optimized to the following when known to be within bounds:
// /--* TempForLowerBoundDim0
// +--* <index0>
// +--* - (GT_SUB)
//
struct GenTreeArrIndex : public GenTreeOp
{
// The array object - may be any expression producing an Array reference, but is likely to be a lclVar.
GenTree*& ArrObj()
{
return gtOp1;
}
// The index expression - may be any integral expression.
GenTree*& IndexExpr()
{
return gtOp2;
}
unsigned char gtCurrDim; // The current dimension
unsigned char gtArrRank; // Rank of the array
var_types gtArrElemType; // The array element type
GenTreeArrIndex(var_types type,
GenTree* arrObj,
GenTree* indexExpr,
unsigned char currDim,
unsigned char arrRank,
var_types elemType)
: GenTreeOp(GT_ARR_INDEX, type, arrObj, indexExpr)
, gtCurrDim(currDim)
, gtArrRank(arrRank)
, gtArrElemType(elemType)
{
gtFlags |= GTF_EXCEPT;
}
#if DEBUGGABLE_GENTREE
protected:
friend GenTree;
// Used only for GenTree::GetVtableForOper()
GenTreeArrIndex() : GenTreeOp()
{
}
#endif
};
//--------------------------------------------
//
// GenTreeArrOffset (gtArrOffset): Expression to compute the accumulated offset for the address
// of an element of a multi-dimensional or non-zero-based array.
//
// Notes:
// The result of this expression is (gtOffset * dimSize) + gtIndex
// where dimSize is the length/stride/size of the dimension, and is obtained from gtArrObj.
// This node is generated in conjunction with the GenTreeArrIndex node, which computes the
// effective index for a single dimension. The sub-trees can be separately optimized, e.g.
// within a loop body where the expression for the 0th dimension may be invariant.
//
// Here is an example of how the tree might look for a two-dimension array reference:
// /--* const 0
// | /--* <arrObj>
// | +--* <index0>
// +--* ArrIndex[i, ]
// +--* <arrObj>
// /--| arrOffs[i, ]
// | +--* <arrObj>
// | +--* <index1>
// +--* ArrIndex[*,j]
// +--* <arrObj>
// /--| arrOffs[*,j]
// TODO-CQ: see comment on GenTreeArrIndex for how its representation may change. When that
// is done, we will also want to replace the <arrObj> argument to arrOffs with the
// ArrLen as for GenTreeArrIndex.
//
struct GenTreeArrOffs : public GenTree
{
GenTree* gtOffset; // The accumulated offset for lower dimensions - must be TYP_I_IMPL, and
// will either be a CSE temp, the constant 0, or another GenTreeArrOffs node.
GenTree* gtIndex; // The effective index for the current dimension - must be non-negative
// and can be any expression (though it is likely to be either a GenTreeArrIndex,
// node, a lclVar, or a constant).
GenTree* gtArrObj; // The array object - may be any expression producing an Array reference,
// but is likely to be a lclVar.
unsigned char gtCurrDim; // The current dimension
unsigned char gtArrRank; // Rank of the array
var_types gtArrElemType; // The array element type
GenTreeArrOffs(var_types type,
GenTree* offset,
GenTree* index,
GenTree* arrObj,
unsigned char currDim,
unsigned char rank,
var_types elemType)
: GenTree(GT_ARR_OFFSET, type)
, gtOffset(offset)
, gtIndex(index)
, gtArrObj(arrObj)
, gtCurrDim(currDim)
, gtArrRank(rank)
, gtArrElemType(elemType)
{
assert(index->gtFlags & GTF_EXCEPT);
gtFlags |= GTF_EXCEPT;
}
#if DEBUGGABLE_GENTREE
GenTreeArrOffs() : GenTree()
{
}
#endif
};
/* gtAddrMode -- Target-specific canonicalized addressing expression (GT_LEA) */
struct GenTreeAddrMode : public GenTreeOp
{
// Address is Base + Index*Scale + Offset.
// These are the legal patterns:
//
// Base // Base != nullptr && Index == nullptr && Scale == 0 && Offset == 0
// Base + Index*Scale // Base != nullptr && Index != nullptr && Scale != 0 && Offset == 0
// Base + Offset // Base != nullptr && Index == nullptr && Scale == 0 && Offset != 0
// Base + Index*Scale + Offset // Base != nullptr && Index != nullptr && Scale != 0 && Offset != 0
// Index*Scale // Base == nullptr && Index != nullptr && Scale > 1 && Offset == 0
// Index*Scale + Offset // Base == nullptr && Index != nullptr && Scale > 1 && Offset != 0
// Offset // Base == nullptr && Index == nullptr && Scale == 0 && Offset != 0
//
// So, for example:
// 1. Base + Index is legal with Scale==1
// 2. If Index is null, Scale should be zero (or unintialized / unused)
// 3. If Scale==1, then we should have "Base" instead of "Index*Scale", and "Base + Offset" instead of
// "Index*Scale + Offset".
// First operand is base address/pointer
bool HasBase() const
{
return gtOp1 != nullptr;
}
GenTree*& Base()
{
return gtOp1;
}
void SetBase(GenTree* base)
{
gtOp1 = base;
}
// Second operand is scaled index value
bool HasIndex() const
{
return gtOp2 != nullptr;
}
GenTree*& Index()
{
return gtOp2;
}
void SetIndex(GenTree* index)
{
gtOp2 = index;
}
unsigned GetScale() const
{
return gtScale;
}
void SetScale(unsigned scale)
{
gtScale = scale;
}
int Offset()
{
return static_cast<int>(gtOffset);
}
void SetOffset(int offset)
{
gtOffset = offset;
}
unsigned gtScale; // The scale factor
private:
ssize_t gtOffset; // The offset to add
public:
GenTreeAddrMode(var_types type, GenTree* base, GenTree* index, unsigned scale, ssize_t offset)
: GenTreeOp(GT_LEA, type, base, index)
{
assert(base != nullptr || index != nullptr);
gtScale = scale;
gtOffset = offset;
}
#if DEBUGGABLE_GENTREE
protected:
friend GenTree;
// Used only for GenTree::GetVtableForOper()
GenTreeAddrMode() : GenTreeOp()
{
}
#endif
};
// Indir is just an op, no additional data, but some additional abstractions
struct GenTreeIndir : public GenTreeOp
{
// The address for the indirection.
GenTree*& Addr()
{
return gtOp1;
}
void SetAddr(GenTree* addr)
{
assert(addr != nullptr);
assert(addr->TypeIs(TYP_I_IMPL, TYP_BYREF));
gtOp1 = addr;
}
// these methods provide an interface to the indirection node which
bool HasBase();
bool HasIndex();
GenTree* Base();
GenTree* Index();
unsigned Scale();
ssize_t Offset();
GenTreeIndir(genTreeOps oper, var_types type, GenTree* addr, GenTree* data) : GenTreeOp(oper, type, addr, data)
{
}
// True if this indirection is a volatile memory operation.
bool IsVolatile() const
{
return (gtFlags & GTF_IND_VOLATILE) != 0;
}
// True if this indirection is an unaligned memory operation.
bool IsUnaligned() const
{
return (gtFlags & GTF_IND_UNALIGNED) != 0;
}
#if DEBUGGABLE_GENTREE
// Used only for GenTree::GetVtableForOper()
GenTreeIndir() : GenTreeOp()
{
}
#else
// Used by XARCH codegen to construct temporary trees to pass to the emitter.
GenTreeIndir() : GenTreeOp(GT_NOP, TYP_UNDEF)
{
}
#endif
};
// gtBlk -- 'block' (GT_BLK, GT_STORE_BLK).
//
// This is the base type for all of the nodes that represent block or struct
// values.
// Since it can be a store, it includes gtBlkOpKind to specify the type of
// code generation that will be used for the block operation.
struct GenTreeBlk : public GenTreeIndir
{
private:
ClassLayout* m_layout;
public:
ClassLayout* GetLayout() const
{
return m_layout;
}
void SetLayout(ClassLayout* layout)
{
assert((layout != nullptr) || OperIs(GT_STORE_DYN_BLK));
m_layout = layout;
}
// The data to be stored (null for GT_BLK)
GenTree*& Data()
{
return gtOp2;
}
void SetData(GenTree* dataNode)
{
gtOp2 = dataNode;
}
// The size of the buffer to be copied.
unsigned Size() const
{
assert((m_layout != nullptr) || OperIs(GT_STORE_DYN_BLK));
return (m_layout != nullptr) ? m_layout->GetSize() : 0;
}
// Instruction selection: during codegen time, what code sequence we will be using
// to encode this operation.
enum
{
BlkOpKindInvalid,
#ifndef TARGET_X86
BlkOpKindHelper,
#endif
#ifdef TARGET_XARCH
BlkOpKindRepInstr,
#endif
BlkOpKindUnroll,
} gtBlkOpKind;
#ifndef JIT32_GCENCODER
bool gtBlkOpGcUnsafe;
#endif
#ifdef TARGET_XARCH
bool IsOnHeapAndContainsReferences()
{
return (m_layout != nullptr) && m_layout->HasGCPtr() && !Addr()->OperIsLocalAddr();
}
#endif
GenTreeBlk(genTreeOps oper, var_types type, GenTree* addr, ClassLayout* layout)
: GenTreeIndir(oper, type, addr, nullptr)
, m_layout(layout)
, gtBlkOpKind(BlkOpKindInvalid)
#ifndef JIT32_GCENCODER
, gtBlkOpGcUnsafe(false)
#endif
{
assert(OperIsBlk(oper));
assert((layout != nullptr) || OperIs(GT_STORE_DYN_BLK));
gtFlags |= (addr->gtFlags & GTF_ALL_EFFECT);
}
GenTreeBlk(genTreeOps oper, var_types type, GenTree* addr, GenTree* data, ClassLayout* layout)
: GenTreeIndir(oper, type, addr, data)
, m_layout(layout)
, gtBlkOpKind(BlkOpKindInvalid)
#ifndef JIT32_GCENCODER
, gtBlkOpGcUnsafe(false)
#endif
{
assert(OperIsBlk(oper));
assert((layout != nullptr) || OperIs(GT_STORE_DYN_BLK));
gtFlags |= (addr->gtFlags & GTF_ALL_EFFECT);
gtFlags |= (data->gtFlags & GTF_ALL_EFFECT);
}
#if DEBUGGABLE_GENTREE
protected:
friend GenTree;
GenTreeBlk() : GenTreeIndir()
{
}
#endif // DEBUGGABLE_GENTREE
};
// gtObj -- 'object' (GT_OBJ).
//
// This node is used for block values that may have GC pointers.
struct GenTreeObj : public GenTreeBlk
{
void Init()
{
// By default, an OBJ is assumed to be a global reference, unless it is local.
GenTreeLclVarCommon* lcl = Addr()->IsLocalAddrExpr();
if ((lcl == nullptr) || ((lcl->gtFlags & GTF_GLOB_EFFECT) != 0))
{
gtFlags |= GTF_GLOB_REF;
}
noway_assert(GetLayout()->GetClassHandle() != NO_CLASS_HANDLE);
}
GenTreeObj(var_types type, GenTree* addr, ClassLayout* layout) : GenTreeBlk(GT_OBJ, type, addr, layout)
{
Init();
}
GenTreeObj(var_types type, GenTree* addr, GenTree* data, ClassLayout* layout)
: GenTreeBlk(GT_STORE_OBJ, type, addr, data, layout)
{
Init();
}
#if DEBUGGABLE_GENTREE
GenTreeObj() : GenTreeBlk()
{
}
#endif
};
// GenTreeStoreDynBlk -- 'dynamic block store' (GT_STORE_DYN_BLK).
//
// This node is used to represent stores that have a dynamic size - the "cpblk" and "initblk"
// IL instructions are implemented with it. Note that such stores assume the input has no GC
// pointers in it, and as such do not ever use write barriers.
//
// The "Data()" member of this node will either be a "dummy" IND(struct) node, for "cpblk", or
// the zero constant/INIT_VAL for "initblk".
//
struct GenTreeStoreDynBlk : public GenTreeBlk
{
public:
GenTree* gtDynamicSize;
GenTreeStoreDynBlk(GenTree* dstAddr, GenTree* data, GenTree* dynamicSize)
: GenTreeBlk(GT_STORE_DYN_BLK, TYP_VOID, dstAddr, data, nullptr), gtDynamicSize(dynamicSize)
{
// Conservatively the 'dstAddr' could be null or point into the global heap.
// Likewise, this is a store and so must be marked with the GTF_ASG flag.
gtFlags |= (GTF_ASG | GTF_EXCEPT | GTF_GLOB_REF);
gtFlags |= (dynamicSize->gtFlags & GTF_ALL_EFFECT);
}
#if DEBUGGABLE_GENTREE
protected:
friend GenTree;
GenTreeStoreDynBlk() : GenTreeBlk()
{
}
#endif // DEBUGGABLE_GENTREE
};
// Read-modify-write status of a RMW memory op rooted at a storeInd
enum RMWStatus
{
STOREIND_RMW_STATUS_UNKNOWN, // RMW status of storeInd unknown
// Default status unless modified by IsRMWMemOpRootedAtStoreInd()
// One of these denote storeind is a RMW memory operation.
STOREIND_RMW_DST_IS_OP1, // StoreInd is known to be a RMW memory op and dst candidate is op1
STOREIND_RMW_DST_IS_OP2, // StoreInd is known to be a RMW memory op and dst candidate is op2
// One of these denote the reason for storeind is marked as non-RMW operation
STOREIND_RMW_UNSUPPORTED_ADDR, // Addr mode is not yet supported for RMW memory
STOREIND_RMW_UNSUPPORTED_OPER, // Operation is not supported for RMW memory
STOREIND_RMW_UNSUPPORTED_TYPE, // Type is not supported for RMW memory
STOREIND_RMW_INDIR_UNEQUAL // Indir to read value is not equivalent to indir that writes the value
};
#ifdef DEBUG
inline const char* RMWStatusDescription(RMWStatus status)
{
switch (status)
{
case STOREIND_RMW_STATUS_UNKNOWN:
return "RMW status unknown";
case STOREIND_RMW_DST_IS_OP1:
return "dst candidate is op1";
case STOREIND_RMW_DST_IS_OP2:
return "dst candidate is op2";
case STOREIND_RMW_UNSUPPORTED_ADDR:
return "address mode is not supported";
case STOREIND_RMW_UNSUPPORTED_OPER:
return "oper is not supported";
case STOREIND_RMW_UNSUPPORTED_TYPE:
return "type is not supported";
case STOREIND_RMW_INDIR_UNEQUAL:
return "read indir is not equivalent to write indir";
default:
unreached();
}
}
#endif
// StoreInd is just a BinOp, with additional RMW status
struct GenTreeStoreInd : public GenTreeIndir
{
#if !CPU_LOAD_STORE_ARCH
// The below flag is set and used during lowering
RMWStatus gtRMWStatus;
bool IsRMWStatusUnknown()
{
return gtRMWStatus == STOREIND_RMW_STATUS_UNKNOWN;
}
bool IsNonRMWMemoryOp()
{
return gtRMWStatus == STOREIND_RMW_UNSUPPORTED_ADDR || gtRMWStatus == STOREIND_RMW_UNSUPPORTED_OPER ||
gtRMWStatus == STOREIND_RMW_UNSUPPORTED_TYPE || gtRMWStatus == STOREIND_RMW_INDIR_UNEQUAL;
}
bool IsRMWMemoryOp()
{
return gtRMWStatus == STOREIND_RMW_DST_IS_OP1 || gtRMWStatus == STOREIND_RMW_DST_IS_OP2;
}
bool IsRMWDstOp1()
{
return gtRMWStatus == STOREIND_RMW_DST_IS_OP1;
}
bool IsRMWDstOp2()
{
return gtRMWStatus == STOREIND_RMW_DST_IS_OP2;
}
#endif //! CPU_LOAD_STORE_ARCH
RMWStatus GetRMWStatus()
{
#if !CPU_LOAD_STORE_ARCH
return gtRMWStatus;
#else
return STOREIND_RMW_STATUS_UNKNOWN;
#endif
}
void SetRMWStatusDefault()
{
#if !CPU_LOAD_STORE_ARCH
gtRMWStatus = STOREIND_RMW_STATUS_UNKNOWN;
#endif
}
void SetRMWStatus(RMWStatus status)
{
#if !CPU_LOAD_STORE_ARCH
gtRMWStatus = status;
#endif
}
GenTree*& Data()
{
return gtOp2;
}
GenTreeStoreInd(var_types type, GenTree* destPtr, GenTree* data) : GenTreeIndir(GT_STOREIND, type, destPtr, data)
{
SetRMWStatusDefault();
}
#if DEBUGGABLE_GENTREE
protected:
friend GenTree;
// Used only for GenTree::GetVtableForOper()
GenTreeStoreInd() : GenTreeIndir()
{
SetRMWStatusDefault();
}
#endif
};
/* gtRetExp -- Place holder for the return expression from an inline candidate (GT_RET_EXPR) */
struct GenTreeRetExpr : public GenTree
{
GenTree* gtInlineCandidate;
BasicBlockFlags bbFlags;
CORINFO_CLASS_HANDLE gtRetClsHnd;
GenTreeRetExpr(var_types type) : GenTree(GT_RET_EXPR, type)
{
}
#if DEBUGGABLE_GENTREE
GenTreeRetExpr() : GenTree()
{
}
#endif
};
// In LIR there are no longer statements so debug information is inserted linearly using these nodes.
struct GenTreeILOffset : public GenTree
{
DebugInfo gtStmtDI; // debug info
#ifdef DEBUG
IL_OFFSET gtStmtLastILoffs; // instr offset at end of stmt
#endif
GenTreeILOffset(const DebugInfo& di DEBUGARG(IL_OFFSET lastOffset = BAD_IL_OFFSET))
: GenTree(GT_IL_OFFSET, TYP_VOID)
, gtStmtDI(di)
#ifdef DEBUG
, gtStmtLastILoffs(lastOffset)
#endif
{
}
#if DEBUGGABLE_GENTREE
GenTreeILOffset() : GenTree(GT_IL_OFFSET, TYP_VOID)
{
}
#endif
};
// GenTreeList: adapter class for forward iteration of the execution order GenTree linked list
// using range-based `for`, normally used via Statement::TreeList(), e.g.:
// for (GenTree* const tree : stmt->TreeList()) ...
//
class GenTreeList
{
GenTree* m_trees;
// Forward iterator for the execution order GenTree linked list (using `gtNext` pointer).
//
class iterator
{
GenTree* m_tree;
public:
iterator(GenTree* tree) : m_tree(tree)
{
}
GenTree* operator*() const
{
return m_tree;
}
iterator& operator++()
{
m_tree = m_tree->gtNext;
return *this;
}
bool operator!=(const iterator& i) const
{
return m_tree != i.m_tree;
}
};
public:
GenTreeList(GenTree* trees) : m_trees(trees)
{
}
iterator begin() const
{
return iterator(m_trees);
}
iterator end() const
{
return iterator(nullptr);
}
};
// We use the following format when printing the Statement number: Statement->GetID()
// This define is used with string concatenation to put this in printf format strings (Note that %u means unsigned int)
#define FMT_STMT "STMT%05u"
struct Statement
{
public:
Statement(GenTree* expr DEBUGARG(unsigned stmtID))
: m_rootNode(expr)
, m_treeList(nullptr)
, m_next(nullptr)
, m_prev(nullptr)
#ifdef DEBUG
, m_lastILOffset(BAD_IL_OFFSET)
, m_stmtID(stmtID)
#endif
{
}
GenTree* GetRootNode() const
{
return m_rootNode;
}
GenTree** GetRootNodePointer()
{
return &m_rootNode;
}
void SetRootNode(GenTree* treeRoot)
{
m_rootNode = treeRoot;
}
GenTree* GetTreeList() const
{
return m_treeList;
}
void SetTreeList(GenTree* treeHead)
{
m_treeList = treeHead;
}
// TreeList: convenience method for enabling range-based `for` iteration over the
// execution order of the GenTree linked list, e.g.:
// for (GenTree* const tree : stmt->TreeList()) ...
//
GenTreeList TreeList() const
{
return GenTreeList(GetTreeList());
}
const DebugInfo& GetDebugInfo() const
{
return m_debugInfo;
}
void SetDebugInfo(const DebugInfo& di)
{
m_debugInfo = di;
di.Validate();
}
#ifdef DEBUG
IL_OFFSET GetLastILOffset() const
{
return m_lastILOffset;
}
void SetLastILOffset(IL_OFFSET lastILOffset)
{
m_lastILOffset = lastILOffset;
}
unsigned GetID() const
{
return m_stmtID;
}
#endif // DEBUG
Statement* GetNextStmt() const
{
return m_next;
}
void SetNextStmt(Statement* nextStmt)
{
m_next = nextStmt;
}
Statement* GetPrevStmt() const
{
return m_prev;
}
void SetPrevStmt(Statement* prevStmt)
{
m_prev = prevStmt;
}
bool IsPhiDefnStmt() const
{
return m_rootNode->IsPhiDefn();
}
unsigned char GetCostSz() const
{
return m_rootNode->GetCostSz();
}
unsigned char GetCostEx() const
{
return m_rootNode->GetCostEx();
}
private:
// The root of the expression tree.
// Note: It will be the last node in evaluation order.
GenTree* m_rootNode;
// The tree list head (for forward walks in evaluation order).
// The value is `nullptr` until we have set the sequencing of the nodes.
GenTree* m_treeList;
// The statement nodes are doubly-linked. The first statement node in a block points
// to the last node in the block via its `m_prev` link. Note that the last statement node
// does not point to the first: it has `m_next == nullptr`; that is, the list is not fully circular.
Statement* m_next;
Statement* m_prev;
DebugInfo m_debugInfo;
#ifdef DEBUG
IL_OFFSET m_lastILOffset; // The instr offset at the end of this statement.
unsigned m_stmtID;
#endif
};
// StatementList: adapter class for forward iteration of the statement linked list using range-based `for`,
// normally used via BasicBlock::Statements(), e.g.:
// for (Statement* const stmt : block->Statements()) ...
// or:
// for (Statement* const stmt : block->NonPhiStatements()) ...
//
class StatementList
{
Statement* m_stmts;
// Forward iterator for the statement linked list.
//
class iterator
{
Statement* m_stmt;
public:
iterator(Statement* stmt) : m_stmt(stmt)
{
}
Statement* operator*() const
{
return m_stmt;
}
iterator& operator++()
{
m_stmt = m_stmt->GetNextStmt();
return *this;
}
bool operator!=(const iterator& i) const
{
return m_stmt != i.m_stmt;
}
};
public:
StatementList(Statement* stmts) : m_stmts(stmts)
{
}
iterator begin() const
{
return iterator(m_stmts);
}
iterator end() const
{
return iterator(nullptr);
}
};
/* NOTE: Any tree nodes that are larger than 8 bytes (two ints or
pointers) must be flagged as 'large' in GenTree::InitNodeSize().
*/
/* AsClsVar() -- 'static data member' (GT_CLS_VAR) */
struct GenTreeClsVar : public GenTree
{
CORINFO_FIELD_HANDLE gtClsVarHnd;
FieldSeqNode* gtFieldSeq;
GenTreeClsVar(var_types type, CORINFO_FIELD_HANDLE clsVarHnd, FieldSeqNode* fldSeq)
: GenTree(GT_CLS_VAR, type), gtClsVarHnd(clsVarHnd), gtFieldSeq(fldSeq)
{
gtFlags |= GTF_GLOB_REF;
}
GenTreeClsVar(genTreeOps oper, var_types type, CORINFO_FIELD_HANDLE clsVarHnd, FieldSeqNode* fldSeq)
: GenTree(oper, type), gtClsVarHnd(clsVarHnd), gtFieldSeq(fldSeq)
{
assert((oper == GT_CLS_VAR) || (oper == GT_CLS_VAR_ADDR));
gtFlags |= GTF_GLOB_REF;
}
#if DEBUGGABLE_GENTREE
GenTreeClsVar() : GenTree()
{
}
#endif
};
/* gtArgPlace -- 'register argument placeholder' (GT_ARGPLACE) */
struct GenTreeArgPlace : public GenTree
{
CORINFO_CLASS_HANDLE gtArgPlaceClsHnd; // Needed when we have a TYP_STRUCT argument
GenTreeArgPlace(var_types type, CORINFO_CLASS_HANDLE clsHnd) : GenTree(GT_ARGPLACE, type), gtArgPlaceClsHnd(clsHnd)
{
}
#if DEBUGGABLE_GENTREE
GenTreeArgPlace() : GenTree()
{
}
#endif
};
/* gtPhiArg -- phi node rhs argument, var = phi(phiarg, phiarg, phiarg...); GT_PHI_ARG */
struct GenTreePhiArg : public GenTreeLclVarCommon
{
BasicBlock* gtPredBB;
GenTreePhiArg(var_types type, unsigned lclNum, unsigned ssaNum, BasicBlock* block)
: GenTreeLclVarCommon(GT_PHI_ARG, type, lclNum), gtPredBB(block)
{
SetSsaNum(ssaNum);
}
#if DEBUGGABLE_GENTREE
GenTreePhiArg() : GenTreeLclVarCommon()
{
}
#endif
};
/* gtPutArgStk -- Argument passed on stack (GT_PUTARG_STK) */
struct GenTreePutArgStk : public GenTreeUnOp
{
private:
unsigned m_byteOffset;
#ifdef FEATURE_PUT_STRUCT_ARG_STK
unsigned m_byteSize; // The number of bytes that this argument is occupying on the stack with padding.
#endif
public:
#if defined(DEBUG_ARG_SLOTS)
unsigned gtSlotNum; // Slot number of the argument to be passed on stack
#if defined(FEATURE_PUT_STRUCT_ARG_STK)
unsigned gtNumSlots; // Number of slots for the argument to be passed on stack
#endif
#endif
#if defined(UNIX_X86_ABI)
unsigned gtPadAlign; // Number of padding slots for stack alignment
#endif
#if defined(DEBUG) || defined(UNIX_X86_ABI)
GenTreeCall* gtCall; // the call node to which this argument belongs
#endif
#if FEATURE_FASTTAILCALL
bool gtPutInIncomingArgArea; // Whether this arg needs to be placed in incoming arg area.
// By default this is false and will be placed in out-going arg area.
// Fast tail calls set this to true.
// In future if we need to add more such bool fields consider bit fields.
#endif
#ifdef FEATURE_PUT_STRUCT_ARG_STK
// Instruction selection: during codegen time, what code sequence we will be using
// to encode this operation.
// TODO-Throughput: The following information should be obtained from the child
// block node.
enum class Kind : __int8{
Invalid, RepInstr, PartialRepInstr, Unroll, Push, PushAllSlots,
};
Kind gtPutArgStkKind;
#endif
GenTreePutArgStk(genTreeOps oper,
var_types type,
GenTree* op1,
unsigned stackByteOffset,
#if defined(FEATURE_PUT_STRUCT_ARG_STK)
unsigned stackByteSize,
#endif
#if defined(DEBUG_ARG_SLOTS)
unsigned slotNum,
#if defined(FEATURE_PUT_STRUCT_ARG_STK)
unsigned numSlots,
#endif
#endif
GenTreeCall* callNode,
bool putInIncomingArgArea)
: GenTreeUnOp(oper, type, op1 DEBUGARG(/*largeNode*/ false))
, m_byteOffset(stackByteOffset)
#if defined(FEATURE_PUT_STRUCT_ARG_STK)
, m_byteSize(stackByteSize)
#endif
#if defined(DEBUG_ARG_SLOTS)
, gtSlotNum(slotNum)
#if defined(FEATURE_PUT_STRUCT_ARG_STK)
, gtNumSlots(numSlots)
#endif
#endif
#if defined(UNIX_X86_ABI)
, gtPadAlign(0)
#endif
#if defined(DEBUG) || defined(UNIX_X86_ABI)
, gtCall(callNode)
#endif
#if FEATURE_FASTTAILCALL
, gtPutInIncomingArgArea(putInIncomingArgArea)
#endif // FEATURE_FASTTAILCALL
#if defined(FEATURE_PUT_STRUCT_ARG_STK)
, gtPutArgStkKind(Kind::Invalid)
#endif
{
DEBUG_ARG_SLOTS_ASSERT(m_byteOffset == slotNum * TARGET_POINTER_SIZE);
#if defined(FEATURE_PUT_STRUCT_ARG_STK)
DEBUG_ARG_SLOTS_ASSERT(m_byteSize == gtNumSlots * TARGET_POINTER_SIZE);
#endif
}
GenTree*& Data()
{
return gtOp1;
}
#if FEATURE_FASTTAILCALL
bool putInIncomingArgArea() const
{
return gtPutInIncomingArgArea;
}
#else // !FEATURE_FASTTAILCALL
bool putInIncomingArgArea() const
{
return false;
}
#endif // !FEATURE_FASTTAILCALL
unsigned getArgOffset() const
{
DEBUG_ARG_SLOTS_ASSERT(m_byteOffset / TARGET_POINTER_SIZE == gtSlotNum);
DEBUG_ARG_SLOTS_ASSERT(m_byteOffset % TARGET_POINTER_SIZE == 0);
return m_byteOffset;
}
#if defined(UNIX_X86_ABI)
unsigned getArgPadding() const
{
return gtPadAlign;
}
void setArgPadding(unsigned padAlign)
{
gtPadAlign = padAlign;
}
#endif
#ifdef FEATURE_PUT_STRUCT_ARG_STK
unsigned GetStackByteSize() const
{
return m_byteSize;
}
// Return true if this is a PutArgStk of a SIMD12 struct.
// This is needed because such values are re-typed to SIMD16, and the type of PutArgStk is VOID.
unsigned isSIMD12() const
{
return (varTypeIsSIMD(gtOp1) && (GetStackByteSize() == 12));
}
bool isPushKind() const
{
return (gtPutArgStkKind == Kind::Push) || (gtPutArgStkKind == Kind::PushAllSlots);
}
#else // !FEATURE_PUT_STRUCT_ARG_STK
unsigned GetStackByteSize() const;
#endif // !FEATURE_PUT_STRUCT_ARG_STK
#if DEBUGGABLE_GENTREE
GenTreePutArgStk() : GenTreeUnOp()
{
}
#endif
};
#if FEATURE_ARG_SPLIT
// Represent the struct argument: split value in register(s) and stack
struct GenTreePutArgSplit : public GenTreePutArgStk
{
unsigned gtNumRegs;
GenTreePutArgSplit(GenTree* op1,
unsigned stackByteOffset,
#if defined(FEATURE_PUT_STRUCT_ARG_STK)
unsigned stackByteSize,
#endif
#if defined(DEBUG_ARG_SLOTS)
unsigned slotNum,
#if defined(FEATURE_PUT_STRUCT_ARG_STK)
unsigned numSlots,
#endif
#endif
unsigned numRegs,
GenTreeCall* callNode,
bool putIncomingArgArea)
: GenTreePutArgStk(GT_PUTARG_SPLIT,
TYP_STRUCT,
op1,
stackByteOffset,
#if defined(FEATURE_PUT_STRUCT_ARG_STK)
stackByteSize,
#endif
#if defined(DEBUG_ARG_SLOTS)
slotNum,
#if defined(FEATURE_PUT_STRUCT_ARG_STK)
numSlots,
#endif
#endif
callNode,
putIncomingArgArea)
, gtNumRegs(numRegs)
{
ClearOtherRegs();
ClearOtherRegFlags();
}
// Type required to support multi-reg struct arg.
var_types m_regType[MAX_REG_ARG];
// First reg of struct is always given by GetRegNum().
// gtOtherRegs holds the other reg numbers of struct.
regNumberSmall gtOtherRegs[MAX_REG_ARG - 1];
MultiRegSpillFlags gtSpillFlags;
//---------------------------------------------------------------------------
// GetRegNumByIdx: get i'th register allocated to this struct argument.
//
// Arguments:
// idx - index of the struct
//
// Return Value:
// Return regNumber of i'th register of this struct argument
//
regNumber GetRegNumByIdx(unsigned idx) const
{
assert(idx < MAX_REG_ARG);
if (idx == 0)
{
return GetRegNum();
}
return (regNumber)gtOtherRegs[idx - 1];
}
//----------------------------------------------------------------------
// SetRegNumByIdx: set i'th register of this struct argument
//
// Arguments:
// reg - reg number
// idx - index of the struct
//
// Return Value:
// None
//
void SetRegNumByIdx(regNumber reg, unsigned idx)
{
assert(idx < MAX_REG_ARG);
if (idx == 0)
{
SetRegNum(reg);
}
else
{
gtOtherRegs[idx - 1] = (regNumberSmall)reg;
assert(gtOtherRegs[idx - 1] == reg);
}
}
//----------------------------------------------------------------------------
// ClearOtherRegs: clear multi-reg state to indicate no regs are allocated
//
// Arguments:
// None
//
// Return Value:
// None
//
void ClearOtherRegs()
{
for (unsigned i = 0; i < MAX_REG_ARG - 1; ++i)
{
gtOtherRegs[i] = REG_NA;
}
}
GenTreeFlags GetRegSpillFlagByIdx(unsigned idx) const
{
return GetMultiRegSpillFlagsByIdx(gtSpillFlags, idx);
}
void SetRegSpillFlagByIdx(GenTreeFlags flags, unsigned idx)
{
#if FEATURE_MULTIREG_RET
gtSpillFlags = SetMultiRegSpillFlagsByIdx(gtSpillFlags, flags, idx);
#endif
}
//--------------------------------------------------------------------------
// GetRegType: Get var_type of the register specified by index.
//
// Arguments:
// index - Index of the register.
// First register will have an index 0 and so on.
//
// Return Value:
// var_type of the register specified by its index.
var_types GetRegType(unsigned index) const
{
assert(index < gtNumRegs);
var_types result = m_regType[index];
return result;
}
//-------------------------------------------------------------------
// clearOtherRegFlags: clear GTF_* flags associated with gtOtherRegs
//
// Arguments:
// None
//
// Return Value:
// None
//
void ClearOtherRegFlags()
{
gtSpillFlags = 0;
}
#if DEBUGGABLE_GENTREE
GenTreePutArgSplit() : GenTreePutArgStk()
{
}
#endif
};
#endif // FEATURE_ARG_SPLIT
// Represents GT_COPY or GT_RELOAD node
//
// As it turns out, these are only needed on targets that happen to have multi-reg returns.
// However, they are actually needed on any target that has any multi-reg ops. It is just
// coincidence that those are the same (and there isn't a FEATURE_MULTIREG_OPS).
//
struct GenTreeCopyOrReload : public GenTreeUnOp
{
#if FEATURE_MULTIREG_RET
// State required to support copy/reload of a multi-reg call node.
// The first register is always given by GetRegNum().
//
regNumberSmall gtOtherRegs[MAX_RET_REG_COUNT - 1];
#endif
//----------------------------------------------------------
// ClearOtherRegs: set gtOtherRegs to REG_NA.
//
// Arguments:
// None
//
// Return Value:
// None
//
void ClearOtherRegs()
{
#if FEATURE_MULTIREG_RET
for (unsigned i = 0; i < MAX_RET_REG_COUNT - 1; ++i)
{
gtOtherRegs[i] = REG_NA;
}
#endif
}
//-----------------------------------------------------------
// GetRegNumByIdx: Get regNumber of i'th position.
//
// Arguments:
// idx - register position.
//
// Return Value:
// Returns regNumber assigned to i'th position.
//
regNumber GetRegNumByIdx(unsigned idx) const
{
assert(idx < MAX_RET_REG_COUNT);
if (idx == 0)
{
return GetRegNum();
}
#if FEATURE_MULTIREG_RET
return (regNumber)gtOtherRegs[idx - 1];
#else
return REG_NA;
#endif
}
//-----------------------------------------------------------
// SetRegNumByIdx: Set the regNumber for i'th position.
//
// Arguments:
// reg - reg number
// idx - register position.
//
// Return Value:
// None.
//
void SetRegNumByIdx(regNumber reg, unsigned idx)
{
assert(idx < MAX_RET_REG_COUNT);
if (idx == 0)
{
SetRegNum(reg);
}
#if FEATURE_MULTIREG_RET
else
{
gtOtherRegs[idx - 1] = (regNumberSmall)reg;
assert(gtOtherRegs[idx - 1] == reg);
}
#else
else
{
unreached();
}
#endif
}
//----------------------------------------------------------------------------
// CopyOtherRegs: copy multi-reg state from the given copy/reload node to this
// node.
//
// Arguments:
// from - GenTree node from which to copy multi-reg state
//
// Return Value:
// None
//
// TODO-ARM: Implement this routine for Arm64 and Arm32
// TODO-X86: Implement this routine for x86
void CopyOtherRegs(GenTreeCopyOrReload* from)
{
assert(OperGet() == from->OperGet());
#ifdef UNIX_AMD64_ABI
for (unsigned i = 0; i < MAX_RET_REG_COUNT - 1; ++i)
{
gtOtherRegs[i] = from->gtOtherRegs[i];
}
#endif
}
unsigned GetRegCount() const
{
#if FEATURE_MULTIREG_RET
// We need to return the highest index for which we have a valid register.
// Note that the gtOtherRegs array is off by one (the 0th register is GetRegNum()).
// If there's no valid register in gtOtherRegs, GetRegNum() must be valid.
// Note that for most nodes, the set of valid registers must be contiguous,
// but for COPY or RELOAD there is only a valid register for the register positions
// that must be copied or reloaded.
//
for (unsigned i = MAX_RET_REG_COUNT; i > 1; i--)
{
if (gtOtherRegs[i - 2] != REG_NA)
{
return i;
}
}
#endif
// We should never have a COPY or RELOAD with no valid registers.
assert(GetRegNum() != REG_NA);
return 1;
}
GenTreeCopyOrReload(genTreeOps oper, var_types type, GenTree* op1) : GenTreeUnOp(oper, type, op1)
{
assert(type != TYP_STRUCT || op1->IsMultiRegNode());
SetRegNum(REG_NA);
ClearOtherRegs();
}
#if DEBUGGABLE_GENTREE
GenTreeCopyOrReload() : GenTreeUnOp()
{
}
#endif
};
// Represents GT_ALLOCOBJ node
struct GenTreeAllocObj final : public GenTreeUnOp
{
unsigned int gtNewHelper; // Value returned by ICorJitInfo::getNewHelper
bool gtHelperHasSideEffects;
CORINFO_CLASS_HANDLE gtAllocObjClsHnd;
#ifdef FEATURE_READYTORUN
CORINFO_CONST_LOOKUP gtEntryPoint;
#endif
GenTreeAllocObj(
var_types type, unsigned int helper, bool helperHasSideEffects, CORINFO_CLASS_HANDLE clsHnd, GenTree* op)
: GenTreeUnOp(GT_ALLOCOBJ, type, op DEBUGARG(/*largeNode*/ TRUE))
, // This node in most cases will be changed to a call node
gtNewHelper(helper)
, gtHelperHasSideEffects(helperHasSideEffects)
, gtAllocObjClsHnd(clsHnd)
{
#ifdef FEATURE_READYTORUN
gtEntryPoint.addr = nullptr;
#endif
}
#if DEBUGGABLE_GENTREE
GenTreeAllocObj() : GenTreeUnOp()
{
}
#endif
};
// Represents GT_RUNTIMELOOKUP node
struct GenTreeRuntimeLookup final : public GenTreeUnOp
{
CORINFO_GENERIC_HANDLE gtHnd;
CorInfoGenericHandleType gtHndType;
GenTreeRuntimeLookup(CORINFO_GENERIC_HANDLE hnd, CorInfoGenericHandleType hndTyp, GenTree* tree)
: GenTreeUnOp(GT_RUNTIMELOOKUP, tree->gtType, tree DEBUGARG(/*largeNode*/ FALSE)), gtHnd(hnd), gtHndType(hndTyp)
{
assert(hnd != nullptr);
}
#if DEBUGGABLE_GENTREE
GenTreeRuntimeLookup() : GenTreeUnOp()
{
}
#endif
// Return reference to the actual tree that does the lookup
GenTree*& Lookup()
{
return gtOp1;
}
bool IsClassHandle() const
{
return gtHndType == CORINFO_HANDLETYPE_CLASS;
}
bool IsMethodHandle() const
{
return gtHndType == CORINFO_HANDLETYPE_METHOD;
}
bool IsFieldHandle() const
{
return gtHndType == CORINFO_HANDLETYPE_FIELD;
}
// Note these operations describe the handle that is input to the
// lookup, not the handle produced by the lookup.
CORINFO_CLASS_HANDLE GetClassHandle() const
{
assert(IsClassHandle());
return (CORINFO_CLASS_HANDLE)gtHnd;
}
CORINFO_METHOD_HANDLE GetMethodHandle() const
{
assert(IsMethodHandle());
return (CORINFO_METHOD_HANDLE)gtHnd;
}
CORINFO_FIELD_HANDLE GetFieldHandle() const
{
assert(IsMethodHandle());
return (CORINFO_FIELD_HANDLE)gtHnd;
}
};
// Represents the condition of a GT_JCC or GT_SETCC node.
struct GenCondition
{
// clang-format off
enum Code : unsigned char
{
OperMask = 7,
Unsigned = 8,
Unordered = Unsigned,
Float = 16,
// 0 would be the encoding of "signed EQ" but since equality is sign insensitive
// we'll use 0 as invalid/uninitialized condition code. This will also leave 1
// as a spare code.
NONE = 0,
SLT = 2,
SLE = 3,
SGE = 4,
SGT = 5,
S = 6,
NS = 7,
EQ = Unsigned | 0, // = 8
NE = Unsigned | 1, // = 9
ULT = Unsigned | SLT, // = 10
ULE = Unsigned | SLE, // = 11
UGE = Unsigned | SGE, // = 12
UGT = Unsigned | SGT, // = 13
C = Unsigned | S, // = 14
NC = Unsigned | NS, // = 15
FEQ = Float | 0, // = 16
FNE = Float | 1, // = 17
FLT = Float | SLT, // = 18
FLE = Float | SLE, // = 19
FGE = Float | SGE, // = 20
FGT = Float | SGT, // = 21
O = Float | S, // = 22
NO = Float | NS, // = 23
FEQU = Unordered | FEQ, // = 24
FNEU = Unordered | FNE, // = 25
FLTU = Unordered | FLT, // = 26
FLEU = Unordered | FLE, // = 27
FGEU = Unordered | FGE, // = 28
FGTU = Unordered | FGT, // = 29
P = Unordered | O, // = 30
NP = Unordered | NO, // = 31
};
// clang-format on
private:
Code m_code;
public:
Code GetCode() const
{
return m_code;
}
bool IsFlag() const
{
return (m_code & OperMask) >= S;
}
bool IsUnsigned() const
{
return (ULT <= m_code) && (m_code <= UGT);
}
bool IsFloat() const
{
return !IsFlag() && (m_code & Float) != 0;
}
bool IsUnordered() const
{
return !IsFlag() && (m_code & (Float | Unordered)) == (Float | Unordered);
}
bool Is(Code cond) const
{
return m_code == cond;
}
template <typename... TRest>
bool Is(Code c, TRest... rest) const
{
return Is(c) || Is(rest...);
}
// Indicate whether the condition should be swapped in order to avoid generating
// multiple branches. This happens for certain floating point conditions on XARCH,
// see GenConditionDesc and its associated mapping table for more details.
bool PreferSwap() const
{
#ifdef TARGET_XARCH
return Is(GenCondition::FLT, GenCondition::FLE, GenCondition::FGTU, GenCondition::FGEU);
#else
return false;
#endif
}
const char* Name() const
{
// clang-format off
static const char* names[]
{
"NONE", "???", "SLT", "SLE", "SGE", "SGT", "S", "NS",
"UEQ", "UNE", "ULT", "ULE", "UGE", "UGT", "C", "NC",
"FEQ", "FNE", "FLT", "FLE", "FGE", "FGT", "O", "NO",
"FEQU", "FNEU", "FLTU", "FLEU", "FGEU", "FGTU", "P", "NP"
};
// clang-format on
assert(m_code < ArrLen(names));
return names[m_code];
}
GenCondition() : m_code()
{
}
GenCondition(Code cond) : m_code(cond)
{
}
static_assert((GT_NE - GT_EQ) == (NE & ~Unsigned), "bad relop");
static_assert((GT_LT - GT_EQ) == SLT, "bad relop");
static_assert((GT_LE - GT_EQ) == SLE, "bad relop");
static_assert((GT_GE - GT_EQ) == SGE, "bad relop");
static_assert((GT_GT - GT_EQ) == SGT, "bad relop");
static_assert((GT_TEST_NE - GT_TEST_EQ) == (NE & ~Unsigned), "bad relop");
static GenCondition FromRelop(GenTree* relop)
{
assert(relop->OperIsCompare());
if (varTypeIsFloating(relop->gtGetOp1()))
{
return FromFloatRelop(relop);
}
else
{
return FromIntegralRelop(relop);
}
}
static GenCondition FromFloatRelop(GenTree* relop)
{
assert(varTypeIsFloating(relop->gtGetOp1()) && varTypeIsFloating(relop->gtGetOp2()));
return FromFloatRelop(relop->OperGet(), (relop->gtFlags & GTF_RELOP_NAN_UN) != 0);
}
static GenCondition FromFloatRelop(genTreeOps oper, bool isUnordered)
{
assert(GenTree::OperIsCompare(oper));
unsigned code = oper - GT_EQ;
assert(code <= SGT);
code |= Float;
if (isUnordered)
{
code |= Unordered;
}
return GenCondition(static_cast<Code>(code));
}
static GenCondition FromIntegralRelop(GenTree* relop)
{
assert(!varTypeIsFloating(relop->gtGetOp1()) && !varTypeIsFloating(relop->gtGetOp2()));
return FromIntegralRelop(relop->OperGet(), relop->IsUnsigned());
}
static GenCondition FromIntegralRelop(genTreeOps oper, bool isUnsigned)
{
assert(GenTree::OperIsCompare(oper));
// GT_TEST_EQ/NE are special, they need to be mapped as GT_EQ/NE
unsigned code = oper - ((oper >= GT_TEST_EQ) ? GT_TEST_EQ : GT_EQ);
if (isUnsigned || (code <= 1)) // EQ/NE are treated as unsigned
{
code |= Unsigned;
}
return GenCondition(static_cast<Code>(code));
}
static GenCondition Reverse(GenCondition condition)
{
// clang-format off
static const Code reverse[]
{
// EQ NE LT LE GE GT F NF
NONE, NONE, SGE, SGT, SLT, SLE, NS, S,
NE, EQ, UGE, UGT, ULT, ULE, NC, C,
FNEU, FEQU, FGEU, FGTU, FLTU, FLEU, NO, O,
FNE, FEQ, FGE, FGT, FLT, FGT, NP, P
};
// clang-format on
assert(condition.m_code < ArrLen(reverse));
return GenCondition(reverse[condition.m_code]);
}
static GenCondition Swap(GenCondition condition)
{
// clang-format off
static const Code swap[]
{
// EQ NE LT LE GE GT F NF
NONE, NONE, SGT, SGE, SLE, SLT, S, NS,
EQ, NE, UGT, UGE, ULE, ULT, C, NC,
FEQ, FNE, FGT, FGE, FLE, FLT, O, NO,
FEQU, FNEU, FGTU, FGEU, FLEU, FLTU, P, NP
};
// clang-format on
assert(condition.m_code < ArrLen(swap));
return GenCondition(swap[condition.m_code]);
}
};
// Represents a GT_JCC or GT_SETCC node.
struct GenTreeCC final : public GenTree
{
GenCondition gtCondition;
GenTreeCC(genTreeOps oper, GenCondition condition, var_types type = TYP_VOID)
: GenTree(oper, type DEBUGARG(/*largeNode*/ FALSE)), gtCondition(condition)
{
assert(OperIs(GT_JCC, GT_SETCC));
}
#if DEBUGGABLE_GENTREE
GenTreeCC() : GenTree()
{
}
#endif // DEBUGGABLE_GENTREE
};
//------------------------------------------------------------------------
// Deferred inline functions of GenTree -- these need the subtypes above to
// be defined already.
//------------------------------------------------------------------------
inline bool GenTree::OperIsBlkOp()
{
return ((gtOper == GT_ASG) && varTypeIsStruct(AsOp()->gtOp1)) || OperIsStoreBlk();
}
inline bool GenTree::OperIsInitBlkOp()
{
if (!OperIsBlkOp())
{
return false;
}
GenTree* src;
if (gtOper == GT_ASG)
{
src = gtGetOp2();
}
else
{
src = AsBlk()->Data()->gtSkipReloadOrCopy();
}
return src->OperIsInitVal() || src->OperIsConst();
}
inline bool GenTree::OperIsCopyBlkOp()
{
return OperIsBlkOp() && !OperIsInitBlkOp();
}
//------------------------------------------------------------------------
// IsFPZero: Checks whether this is a floating point constant with value 0.0
//
// Return Value:
// Returns true iff the tree is an GT_CNS_DBL, with value of 0.0.
inline bool GenTree::IsFPZero() const
{
if ((gtOper == GT_CNS_DBL) && (AsDblCon()->gtDconVal == 0.0))
{
return true;
}
return false;
}
//------------------------------------------------------------------------
// IsIntegralConst: Checks whether this is a constant node with the given value
//
// Arguments:
// constVal - the value of interest
//
// Return Value:
// Returns true iff the tree is an integral constant opcode, with
// the given value.
//
// Notes:
// Like gtIconVal, the argument is of ssize_t, so cannot check for
// long constants in a target-independent way.
inline bool GenTree::IsIntegralConst(ssize_t constVal) const
{
if ((gtOper == GT_CNS_INT) && (AsIntConCommon()->IconValue() == constVal))
{
return true;
}
if ((gtOper == GT_CNS_LNG) && (AsIntConCommon()->LngValue() == constVal))
{
return true;
}
return false;
}
//-------------------------------------------------------------------
// IsIntegralConstVector: returns true if this this is a SIMD vector
// with all its elements equal to an integral constant.
//
// Arguments:
// constVal - const value of vector element
//
// Returns:
// True if this represents an integral const SIMD vector.
//
inline bool GenTree::IsIntegralConstVector(ssize_t constVal) const
{
#ifdef FEATURE_SIMD
// SIMDIntrinsicInit intrinsic with a const value as initializer
// represents a const vector.
if ((gtOper == GT_SIMD) && (AsSIMD()->GetSIMDIntrinsicId() == SIMDIntrinsicInit) &&
AsSIMD()->Op(1)->IsIntegralConst(constVal))
{
assert(varTypeIsIntegral(AsSIMD()->GetSimdBaseType()));
assert(AsSIMD()->GetOperandCount() == 1);
return true;
}
#endif // FEATURE_SIMD
#ifdef FEATURE_HW_INTRINSICS
if (gtOper == GT_HWINTRINSIC)
{
const GenTreeHWIntrinsic* node = AsHWIntrinsic();
if (!varTypeIsIntegral(node->GetSimdBaseType()))
{
// Can't be an integral constant
return false;
}
NamedIntrinsic intrinsicId = node->GetHWIntrinsicId();
if ((node->GetOperandCount() == 0) && (constVal == 0))
{
#if defined(TARGET_XARCH)
return (intrinsicId == NI_Vector128_get_Zero) || (intrinsicId == NI_Vector256_get_Zero);
#elif defined(TARGET_ARM64)
return (intrinsicId == NI_Vector64_get_Zero) || (intrinsicId == NI_Vector128_get_Zero);
#endif // !TARGET_XARCH && !TARGET_ARM64
}
else if ((node->GetOperandCount() == 1) && node->Op(1)->IsIntegralConst(constVal))
{
#if defined(TARGET_XARCH)
return (intrinsicId == NI_Vector128_Create) || (intrinsicId == NI_Vector256_Create);
#elif defined(TARGET_ARM64)
return (intrinsicId == NI_Vector64_Create) || (intrinsicId == NI_Vector128_Create);
#endif // !TARGET_XARCH && !TARGET_ARM64
}
}
#endif // FEATURE_HW_INTRINSICS
return false;
}
//-------------------------------------------------------------------
// IsSIMDZero: returns true if this this is a SIMD vector
// with all its elements equal to zero.
//
// Returns:
// True if this represents an integral const SIMD vector.
//
inline bool GenTree::IsSIMDZero() const
{
#ifdef FEATURE_SIMD
if ((gtOper == GT_SIMD) && (AsSIMD()->GetSIMDIntrinsicId() == SIMDIntrinsicInit))
{
return (AsSIMD()->Op(1)->IsIntegralConst(0) || AsSIMD()->Op(1)->IsFPZero());
}
#endif
return false;
}
//-------------------------------------------------------------------
// IsFloatPositiveZero: returns true if this is exactly a const float value of postive zero (+0.0)
//
// Returns:
// True if this represents a const floating-point value of exactly positive zero (+0.0).
// Will return false if the value is negative zero (-0.0).
//
inline bool GenTree::IsFloatPositiveZero() const
{
if (IsCnsFltOrDbl())
{
// This implementation is almost identical to IsCnsNonZeroFltOrDbl
// but it is easier to parse out
// rather than using !IsCnsNonZeroFltOrDbl.
double constValue = AsDblCon()->gtDconVal;
return *(__int64*)&constValue == 0;
}
return false;
}
//-------------------------------------------------------------------
// IsVectorZero: returns true if this node is a HWIntrinsic that is Vector*_get_Zero.
//
// Returns:
// True if this represents a HWIntrinsic node that is Vector*_get_Zero.
//
// TODO: We already have IsSIMDZero() and IsIntegralConstVector(0),
// however, IsSIMDZero() does not cover hardware intrinsics, and IsIntegralConstVector(0) does not cover floating
// point. In order to not risk adverse behaviour by modifying those, this function 'IsVectorZero' was introduced.
// At some point, it makes sense to normalize this logic to be a single function call rather than have several
// separate ones; preferably this one.
inline bool GenTree::IsVectorZero() const
{
#ifdef FEATURE_HW_INTRINSICS
if (gtOper == GT_HWINTRINSIC)
{
const GenTreeHWIntrinsic* node = AsHWIntrinsic();
const NamedIntrinsic intrinsicId = node->GetHWIntrinsicId();
#if defined(TARGET_XARCH)
return (intrinsicId == NI_Vector128_get_Zero) || (intrinsicId == NI_Vector256_get_Zero);
#elif defined(TARGET_ARM64)
return (intrinsicId == NI_Vector64_get_Zero) || (intrinsicId == NI_Vector128_get_Zero);
#endif // !TARGET_XARCH && !TARGET_ARM64
}
#endif // FEATURE_HW_INTRINSICS
return false;
}
inline bool GenTree::IsBoxedValue()
{
assert(gtOper != GT_BOX || AsBox()->BoxOp() != nullptr);
return (gtOper == GT_BOX) && (gtFlags & GTF_BOX_VALUE);
}
#ifdef DEBUG
//------------------------------------------------------------------------
// IsValidCallArgument: Given an GenTree node that represents an argument
// enforce (or don't enforce) the following invariant.
//
// Arguments:
// instance method for a GenTree node
//
// Return values:
// true: the GenTree node is accepted as a valid argument
// false: the GenTree node is not accepted as a valid argumeny
//
// Notes:
// For targets that don't support arguments as a list of fields, we do not support GT_FIELD_LIST.
//
// Currently for AMD64 UNIX we allow a limited case where a GT_FIELD_LIST is
// allowed but every element must be a GT_LCL_FLD.
//
// For the future targets that allow for Multireg args (and this includes the current ARM64 target),
// or that allow for passing promoted structs, we allow a GT_FIELD_LIST of arbitrary nodes.
// These would typically start out as GT_LCL_VARs or GT_LCL_FLDS or GT_INDs,
// but could be changed into constants or GT_COMMA trees by the later
// optimization phases.
inline bool GenTree::IsValidCallArgument()
{
if (OperIs(GT_FIELD_LIST))
{
#if !FEATURE_MULTIREG_ARGS && !FEATURE_PUT_STRUCT_ARG_STK
return false;
#else // FEATURE_MULTIREG_ARGS or FEATURE_PUT_STRUCT_ARG_STK
// We allow this GT_FIELD_LIST as an argument
return true;
#endif // FEATURE_MULTIREG_ARGS or FEATURE_PUT_STRUCT_ARG_STK
}
// We don't have either kind of list, so it satisfies the invariant.
return true;
}
#endif // DEBUG
inline GenTree* GenTree::gtGetOp1() const
{
return AsOp()->gtOp1;
}
#ifdef DEBUG
/* static */ inline bool GenTree::RequiresNonNullOp2(genTreeOps oper)
{
switch (oper)
{
case GT_ADD:
case GT_SUB:
case GT_MUL:
case GT_DIV:
case GT_MOD:
case GT_UDIV:
case GT_UMOD:
case GT_OR:
case GT_XOR:
case GT_AND:
case GT_LSH:
case GT_RSH:
case GT_RSZ:
case GT_ROL:
case GT_ROR:
case GT_INDEX:
case GT_ASG:
case GT_EQ:
case GT_NE:
case GT_LT:
case GT_LE:
case GT_GE:
case GT_GT:
case GT_COMMA:
case GT_QMARK:
case GT_COLON:
case GT_MKREFANY:
return true;
default:
return false;
}
}
#endif // DEBUG
inline GenTree* GenTree::gtGetOp2() const
{
assert(OperIsBinary());
GenTree* op2 = AsOp()->gtOp2;
// Only allow null op2 if the node type allows it, e.g. GT_LEA.
assert((op2 != nullptr) || !RequiresNonNullOp2(gtOper));
return op2;
}
inline GenTree* GenTree::gtGetOp2IfPresent() const
{
/* AsOp()->gtOp2 is only valid for GTK_BINOP nodes. */
GenTree* op2 = OperIsBinary() ? AsOp()->gtOp2 : nullptr;
// This documents the genTreeOps for which AsOp()->gtOp2 cannot be nullptr.
// This helps prefix in its analysis of code which calls gtGetOp2()
assert((op2 != nullptr) || !RequiresNonNullOp2(gtOper));
return op2;
}
inline GenTree* GenTree::gtEffectiveVal(bool commaOnly /* = false */)
{
GenTree* effectiveVal = this;
for (;;)
{
assert(!effectiveVal->OperIs(GT_PUTARG_TYPE));
if (effectiveVal->gtOper == GT_COMMA)
{
effectiveVal = effectiveVal->AsOp()->gtGetOp2();
}
else if (!commaOnly && (effectiveVal->gtOper == GT_NOP) && (effectiveVal->AsOp()->gtOp1 != nullptr))
{
effectiveVal = effectiveVal->AsOp()->gtOp1;
}
else
{
return effectiveVal;
}
}
}
//-------------------------------------------------------------------------
// gtCommaAssignVal - find value being assigned to a comma wrapped assigment
//
// Returns:
// tree representing value being assigned if this tree represents a
// comma-wrapped local definition and use.
//
// original tree, of not.
//
inline GenTree* GenTree::gtCommaAssignVal()
{
GenTree* result = this;
if (OperIs(GT_COMMA))
{
GenTree* commaOp1 = AsOp()->gtOp1;
GenTree* commaOp2 = AsOp()->gtOp2;
if (commaOp2->OperIs(GT_LCL_VAR) && commaOp1->OperIs(GT_ASG))
{
GenTree* asgOp1 = commaOp1->AsOp()->gtOp1;
GenTree* asgOp2 = commaOp1->AsOp()->gtOp2;
if (asgOp1->OperIs(GT_LCL_VAR) && (asgOp1->AsLclVar()->GetLclNum() == commaOp2->AsLclVar()->GetLclNum()))
{
result = asgOp2;
}
}
}
return result;
}
//-------------------------------------------------------------------------
// gtSkipPutArgType - skip PUTARG_TYPE if it is presented.
//
// Returns:
// the original tree or its child if it was a PUTARG_TYPE.
//
// Notes:
// PUTARG_TYPE should be skipped when we are doing transformations
// that are not affected by ABI, for example: inlining, implicit byref morphing.
//
inline GenTree* GenTree::gtSkipPutArgType()
{
if (OperIs(GT_PUTARG_TYPE))
{
GenTree* res = AsUnOp()->gtGetOp1();
assert(!res->OperIs(GT_PUTARG_TYPE));
return res;
}
return this;
}
inline GenTree* GenTree::gtSkipReloadOrCopy()
{
// There can be only one reload or copy (we can't have a reload/copy of a reload/copy)
if (gtOper == GT_RELOAD || gtOper == GT_COPY)
{
assert(gtGetOp1()->OperGet() != GT_RELOAD && gtGetOp1()->OperGet() != GT_COPY);
return gtGetOp1();
}
return this;
}
//-----------------------------------------------------------------------------------
// IsMultiRegCall: whether a call node returns its value in more than one register
//
// Arguments:
// None
//
// Return Value:
// Returns true if this GenTree is a multi register returning call
//
inline bool GenTree::IsMultiRegCall() const
{
if (this->IsCall())
{
return AsCall()->HasMultiRegRetVal();
}
return false;
}
//-----------------------------------------------------------------------------------
// IsMultiRegLclVar: whether a local var node defines multiple registers
//
// Arguments:
// None
//
// Return Value:
// Returns true if this GenTree is a multi register defining local var
//
inline bool GenTree::IsMultiRegLclVar() const
{
if (OperIsScalarLocal())
{
return AsLclVar()->IsMultiReg();
}
return false;
}
//-----------------------------------------------------------------------------------
// GetRegByIndex: Get a specific register, based on regIndex, that is produced by this node.
//
// Arguments:
// regIndex - which register to return (must be 0 for non-multireg nodes)
//
// Return Value:
// The register, if any, assigned to this index for this node.
//
// Notes:
// All targets that support multi-reg ops of any kind also support multi-reg return
// values for calls. Should that change with a future target, this method will need
// to change accordingly.
//
inline regNumber GenTree::GetRegByIndex(int regIndex) const
{
if (regIndex == 0)
{
return GetRegNum();
}
#if FEATURE_MULTIREG_RET
if (IsMultiRegCall())
{
return AsCall()->GetRegNumByIdx(regIndex);
}
#if FEATURE_ARG_SPLIT
if (OperIsPutArgSplit())
{
return AsPutArgSplit()->GetRegNumByIdx(regIndex);
}
#endif
#if !defined(TARGET_64BIT)
if (OperIsMultiRegOp())
{
return AsMultiRegOp()->GetRegNumByIdx(regIndex);
}
#endif
if (OperIs(GT_COPY, GT_RELOAD))
{
return AsCopyOrReload()->GetRegNumByIdx(regIndex);
}
#endif // FEATURE_MULTIREG_RET
#ifdef FEATURE_HW_INTRINSICS
if (OperIs(GT_HWINTRINSIC))
{
assert(regIndex == 1);
// TODO-ARM64-NYI: Support hardware intrinsics operating on multiple contiguous registers.
return AsHWIntrinsic()->GetOtherReg();
}
#endif // FEATURE_HW_INTRINSICS
if (OperIsScalarLocal())
{
return AsLclVar()->GetRegNumByIdx(regIndex);
}
assert(!"Invalid regIndex for GetRegFromMultiRegNode");
return REG_NA;
}
//-----------------------------------------------------------------------------------
// GetRegTypeByIndex: Get a specific register's type, based on regIndex, that is produced
// by this multi-reg node.
//
// Arguments:
// regIndex - index of register whose type will be returned
//
// Return Value:
// The register type assigned to this index for this node.
//
// Notes:
// This must be a multireg node that is *not* a copy or reload (which must retrieve the
// type from its source), and 'regIndex' must be a valid index for this node.
//
// All targets that support multi-reg ops of any kind also support multi-reg return
// values for calls. Should that change with a future target, this method will need
// to change accordingly.
//
inline var_types GenTree::GetRegTypeByIndex(int regIndex) const
{
#if FEATURE_MULTIREG_RET
if (IsMultiRegCall())
{
return AsCall()->AsCall()->GetReturnTypeDesc()->GetReturnRegType(regIndex);
}
#if FEATURE_ARG_SPLIT
if (OperIsPutArgSplit())
{
return AsPutArgSplit()->GetRegType(regIndex);
}
#endif // FEATURE_ARG_SPLIT
#if !defined(TARGET_64BIT)
if (OperIsMultiRegOp())
{
return AsMultiRegOp()->GetRegType(regIndex);
}
#endif // !defined(TARGET_64BIT)
#endif // FEATURE_MULTIREG_RET
if (OperIsHWIntrinsic())
{
assert(TypeGet() == TYP_STRUCT);
#ifdef TARGET_ARM64
if (AsHWIntrinsic()->GetSimdSize() == 16)
{
return TYP_SIMD16;
}
else
{
assert(AsHWIntrinsic()->GetSimdSize() == 8);
return TYP_SIMD8;
}
#elif defined(TARGET_XARCH)
// At this time, the only multi-reg HW intrinsics all return the type of their
// arguments. If this changes, we will need a way to record or determine this.
return gtGetOp1()->TypeGet();
#endif
}
if (OperIsScalarLocal())
{
if (TypeGet() == TYP_LONG)
{
return TYP_INT;
}
assert(TypeGet() == TYP_STRUCT);
assert((gtFlags & GTF_VAR_MULTIREG) != 0);
// The register type for a multireg lclVar requires looking at the LclVarDsc,
// which requires a Compiler instance. The caller must use the GetFieldTypeByIndex
// on GenTreeLclVar.
assert(!"GetRegTypeByIndex for LclVar");
}
assert(!"Invalid node type for GetRegTypeByIndex");
return TYP_UNDEF;
}
//-----------------------------------------------------------------------------------
// GetRegSpillFlagByIdx: Get a specific register's spill flags, based on regIndex,
// for this multi-reg node.
//
// Arguments:
// regIndex - which register's spill flags to return
//
// Return Value:
// The spill flags (GTF_SPILL GTF_SPILLED) for this register.
//
// Notes:
// This must be a multireg node and 'regIndex' must be a valid index for this node.
// This method returns the GTF "equivalent" flags based on the packed flags on the multireg node.
//
inline GenTreeFlags GenTree::GetRegSpillFlagByIdx(int regIndex) const
{
#if FEATURE_MULTIREG_RET
if (IsMultiRegCall())
{
return AsCall()->GetRegSpillFlagByIdx(regIndex);
}
#if FEATURE_ARG_SPLIT
if (OperIsPutArgSplit())
{
return AsPutArgSplit()->GetRegSpillFlagByIdx(regIndex);
}
#endif // FEATURE_ARG_SPLIT
#if !defined(TARGET_64BIT)
if (OperIsMultiRegOp())
{
return AsMultiRegOp()->GetRegSpillFlagByIdx(regIndex);
}
#endif // !defined(TARGET_64BIT)
#endif // FEATURE_MULTIREG_RET
if (OperIsScalarLocal())
{
return AsLclVar()->GetRegSpillFlagByIdx(regIndex);
}
assert(!"Invalid node type for GetRegSpillFlagByIdx");
return GTF_EMPTY;
}
//-----------------------------------------------------------------------------------
// GetLastUseBit: Get the last use bit for regIndex
//
// Arguments:
// regIndex - the register index
//
// Return Value:
// The bit to set, clear or query for the last-use of the regIndex'th value.
//
// Notes:
// This must be a GenTreeLclVar or GenTreeCopyOrReload node.
//
inline GenTreeFlags GenTree::GetLastUseBit(int regIndex) const
{
assert(regIndex < 4);
assert(OperIs(GT_LCL_VAR, GT_STORE_LCL_VAR, GT_COPY, GT_RELOAD));
static_assert_no_msg((1 << MULTIREG_LAST_USE_SHIFT) == GTF_VAR_MULTIREG_DEATH0);
return (GenTreeFlags)(1 << (MULTIREG_LAST_USE_SHIFT + regIndex));
}
//-----------------------------------------------------------------------------------
// IsLastUse: Determine whether this node is a last use of the regIndex'th value
//
// Arguments:
// regIndex - the register index
//
// Return Value:
// true iff this is a last use.
//
// Notes:
// This must be a GenTreeLclVar or GenTreeCopyOrReload node.
//
inline bool GenTree::IsLastUse(int regIndex) const
{
assert(OperIs(GT_LCL_VAR, GT_STORE_LCL_VAR, GT_COPY, GT_RELOAD));
return (gtFlags & GetLastUseBit(regIndex)) != 0;
}
//-----------------------------------------------------------------------------------
// IsLastUse: Determine whether this node is a last use of any value
//
// Return Value:
// true iff this has any last uses (i.e. at any index).
//
// Notes:
// This must be a GenTreeLclVar or GenTreeCopyOrReload node.
//
inline bool GenTree::HasLastUse() const
{
return (gtFlags & (GTF_VAR_DEATH_MASK)) != 0;
}
//-----------------------------------------------------------------------------------
// SetLastUse: Set the last use bit for the given index
//
// Arguments:
// regIndex - the register index
//
// Notes:
// This must be a GenTreeLclVar or GenTreeCopyOrReload node.
//
inline void GenTree::SetLastUse(int regIndex)
{
gtFlags |= GetLastUseBit(regIndex);
}
//-----------------------------------------------------------------------------------
// ClearLastUse: Clear the last use bit for the given index
//
// Arguments:
// regIndex - the register index
//
// Notes:
// This must be a GenTreeLclVar or GenTreeCopyOrReload node.
//
inline void GenTree::ClearLastUse(int regIndex)
{
gtFlags &= ~GetLastUseBit(regIndex);
}
//-------------------------------------------------------------------------
// IsCopyOrReload: whether this is a GT_COPY or GT_RELOAD node.
//
// Arguments:
// None
//
// Return Value:
// Returns true if this GenTree is a copy or reload node.
//
inline bool GenTree::IsCopyOrReload() const
{
return (gtOper == GT_COPY || gtOper == GT_RELOAD);
}
//-----------------------------------------------------------------------------------
// IsCopyOrReloadOfMultiRegCall: whether this is a GT_COPY or GT_RELOAD of a multi-reg
// call node.
//
// Arguments:
// None
//
// Return Value:
// Returns true if this GenTree is a copy or reload of multi-reg call node.
//
inline bool GenTree::IsCopyOrReloadOfMultiRegCall() const
{
if (IsCopyOrReload())
{
return gtGetOp1()->IsMultiRegCall();
}
return false;
}
inline bool GenTree::IsCnsIntOrI() const
{
return (gtOper == GT_CNS_INT);
}
inline bool GenTree::IsIntegralConst() const
{
#ifdef TARGET_64BIT
return IsCnsIntOrI();
#else // !TARGET_64BIT
return ((gtOper == GT_CNS_INT) || (gtOper == GT_CNS_LNG));
#endif // !TARGET_64BIT
}
//-------------------------------------------------------------------------
// IsIntegralConstUnsignedPow2: Determines whether the unsigned value of
// an integral constant is the power of 2.
//
// Return Value:
// Returns true if the unsigned value of a GenTree's integral constant
// is the power of 2.
//
// Notes:
// Integral constant nodes store its value in signed form.
// This should handle cases where an unsigned-int was logically used in
// user code.
//
inline bool GenTree::IsIntegralConstUnsignedPow2() const
{
if (IsIntegralConst())
{
return isPow2((UINT64)AsIntConCommon()->IntegralValue());
}
return false;
}
//-------------------------------------------------------------------------
// IsIntegralConstAbsPow2: Determines whether the absolute value of
// an integral constant is the power of 2.
//
// Return Value:
// Returns true if the absolute value of a GenTree's integral constant
// is the power of 2.
//
inline bool GenTree::IsIntegralConstAbsPow2() const
{
if (IsIntegralConst())
{
INT64 svalue = AsIntConCommon()->IntegralValue();
size_t value = (svalue == SSIZE_T_MIN) ? static_cast<size_t>(svalue) : static_cast<size_t>(abs(svalue));
return isPow2(value);
}
return false;
}
// Is this node an integer constant that fits in a 32-bit signed integer (INT32)
inline bool GenTree::IsIntCnsFitsInI32()
{
#ifdef TARGET_64BIT
return IsCnsIntOrI() && AsIntCon()->FitsInI32();
#else // !TARGET_64BIT
return IsCnsIntOrI();
#endif // !TARGET_64BIT
}
inline bool GenTree::IsCnsFltOrDbl() const
{
return OperGet() == GT_CNS_DBL;
}
inline bool GenTree::IsCnsNonZeroFltOrDbl() const
{
if (OperGet() == GT_CNS_DBL)
{
double constValue = AsDblCon()->gtDconVal;
return *(__int64*)&constValue != 0;
}
return false;
}
inline bool GenTree::IsHelperCall()
{
return OperGet() == GT_CALL && AsCall()->gtCallType == CT_HELPER;
}
inline var_types GenTree::CastFromType()
{
return this->AsCast()->CastOp()->TypeGet();
}
inline var_types& GenTree::CastToType()
{
return this->AsCast()->gtCastType;
}
inline bool GenTree::isUsedFromSpillTemp() const
{
// If spilled and no reg at use, then it is used from the spill temp location rather than being reloaded.
if (((gtFlags & GTF_SPILLED) != 0) && ((gtFlags & GTF_NOREG_AT_USE) != 0))
{
return true;
}
return false;
}
/*****************************************************************************/
#ifndef HOST_64BIT
#include <poppack.h>
#endif
/*****************************************************************************/
const size_t TREE_NODE_SZ_SMALL = sizeof(GenTreeLclFld);
const size_t TREE_NODE_SZ_LARGE = sizeof(GenTreeCall);
enum varRefKinds
{
VR_INVARIANT = 0x00, // an invariant value
VR_NONE = 0x00,
VR_IND_REF = 0x01, // an object reference
VR_IND_SCL = 0x02, // a non-object reference
VR_GLB_VAR = 0x04, // a global (clsVar)
};
/*****************************************************************************/
#endif // !GENTREE_H
/*****************************************************************************/
| // Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
/*XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XX XX
XX GenTree XX
XX XX
XX This is the node in the semantic tree graph. It represents the operation XX
XX corresponding to the node, and other information during code-gen. XX
XX XX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
*/
/*****************************************************************************/
#ifndef _GENTREE_H_
#define _GENTREE_H_
/*****************************************************************************/
#include "vartype.h" // For "var_types"
#include "target.h" // For "regNumber"
#include "ssaconfig.h" // For "SsaConfig::RESERVED_SSA_NUM"
#include "valuenumtype.h"
#include "jitstd.h"
#include "jithashtable.h"
#include "simd.h"
#include "namedintrinsiclist.h"
#include "layout.h"
#include "debuginfo.h"
// Debugging GenTree is much easier if we add a magic virtual function to make the debugger able to figure out what type
// it's got. This is enabled by default in DEBUG. To enable it in RET builds (temporarily!), you need to change the
// build to define DEBUGGABLE_GENTREE=1, as well as pass /OPT:NOICF to the linker (or else all the vtables get merged,
// making the debugging value supplied by them useless).
#ifndef DEBUGGABLE_GENTREE
#ifdef DEBUG
#define DEBUGGABLE_GENTREE 1
#else // !DEBUG
#define DEBUGGABLE_GENTREE 0
#endif // !DEBUG
#endif // !DEBUGGABLE_GENTREE
// The SpecialCodeKind enum is used to indicate the type of special (unique)
// target block that will be targeted by an instruction.
// These are used by:
// GenTreeBoundsChk nodes (SCK_RNGCHK_FAIL, SCK_ARG_EXCPN, SCK_ARG_RNG_EXCPN)
// - these nodes have a field (gtThrowKind) to indicate which kind
// GenTreeOps nodes, for which codegen will generate the branch
// - it will use the appropriate kind based on the opcode, though it's not
// clear why SCK_OVERFLOW == SCK_ARITH_EXCPN
//
enum SpecialCodeKind
{
SCK_NONE,
SCK_RNGCHK_FAIL, // target when range check fails
SCK_DIV_BY_ZERO, // target for divide by zero (Not used on X86/X64)
SCK_ARITH_EXCPN, // target on arithmetic exception
SCK_OVERFLOW = SCK_ARITH_EXCPN, // target on overflow
SCK_ARG_EXCPN, // target on ArgumentException (currently used only for SIMD intrinsics)
SCK_ARG_RNG_EXCPN, // target on ArgumentOutOfRangeException (currently used only for SIMD intrinsics)
SCK_COUNT
};
/*****************************************************************************/
enum genTreeOps : BYTE
{
#define GTNODE(en, st, cm, ok) GT_##en,
#include "gtlist.h"
GT_COUNT,
#ifdef TARGET_64BIT
// GT_CNS_NATIVELONG is the gtOper symbol for GT_CNS_LNG or GT_CNS_INT, depending on the target.
// For the 64-bit targets we will only use GT_CNS_INT as it used to represent all the possible sizes
GT_CNS_NATIVELONG = GT_CNS_INT,
#else
// For the 32-bit targets we use a GT_CNS_LNG to hold a 64-bit integer constant and GT_CNS_INT for all others.
// In the future when we retarget the JIT for x86 we should consider eliminating GT_CNS_LNG
GT_CNS_NATIVELONG = GT_CNS_LNG,
#endif
};
// The following enum defines a set of bit flags that can be used
// to classify expression tree nodes.
//
enum GenTreeOperKind
{
GTK_SPECIAL = 0x00, // special operator
GTK_LEAF = 0x01, // leaf operator
GTK_UNOP = 0x02, // unary operator
GTK_BINOP = 0x04, // binary operator
GTK_KINDMASK = (GTK_SPECIAL | GTK_LEAF | GTK_UNOP | GTK_BINOP), // operator kind mask
GTK_SMPOP = (GTK_UNOP | GTK_BINOP),
GTK_COMMUTE = 0x08, // commutative operator
GTK_EXOP = 0x10, // Indicates that an oper for a node type that extends GenTreeOp (or GenTreeUnOp)
// by adding non-node fields to unary or binary operator.
GTK_NOVALUE = 0x20, // node does not produce a value
GTK_MASK = 0xFF
};
// The following enum defines a set of bit flags that describe opers for the purposes
// of DEBUG-only checks. This is separate from the above "GenTreeOperKind"s to avoid
// making the table for those larger in Release builds. However, it resides in the same
// "namespace" and so all values here must be distinct from those in "GenTreeOperKind".
//
enum GenTreeDebugOperKind
{
DBK_FIRST_FLAG = GTK_MASK + 1,
DBK_NOTHIR = DBK_FIRST_FLAG, // This oper is not supported in HIR (before rationalization).
DBK_NOTLIR = DBK_FIRST_FLAG << 1, // This oper is not supported in LIR (after rationalization).
DBK_NOCONTAIN = DBK_FIRST_FLAG << 2, // This oper produces a value, but may not be contained.
DBK_MASK = ~GTK_MASK
};
/*****************************************************************************/
enum gtCallTypes : BYTE
{
CT_USER_FUNC, // User function
CT_HELPER, // Jit-helper
CT_INDIRECT, // Indirect call
CT_COUNT // fake entry (must be last)
};
#ifdef DEBUG
/*****************************************************************************
*
* TargetHandleTypes are used to determine the type of handle present inside GenTreeIntCon node.
* The values are such that they don't overlap with helper's or user function's handle.
*/
enum TargetHandleType : BYTE
{
THT_Unknown = 2,
THT_GSCookieCheck = 4,
THT_SetGSCookie = 6,
THT_IntializeArrayIntrinsics = 8
};
#endif
/*****************************************************************************/
struct BasicBlock;
enum BasicBlockFlags : unsigned __int64;
struct InlineCandidateInfo;
struct GuardedDevirtualizationCandidateInfo;
struct ClassProfileCandidateInfo;
struct LateDevirtualizationInfo;
typedef unsigned short AssertionIndex;
static const AssertionIndex NO_ASSERTION_INDEX = 0;
//------------------------------------------------------------------------
// GetAssertionIndex: return 1-based AssertionIndex from 0-based int index.
//
// Arguments:
// index - 0-based index
// Return Value:
// 1-based AssertionIndex.
inline AssertionIndex GetAssertionIndex(unsigned index)
{
return (AssertionIndex)(index + 1);
}
class AssertionInfo
{
// true if the assertion holds on the bbNext edge instead of the bbJumpDest edge (for GT_JTRUE nodes)
unsigned short m_isNextEdgeAssertion : 1;
// 1-based index of the assertion
unsigned short m_assertionIndex : 15;
AssertionInfo(bool isNextEdgeAssertion, AssertionIndex assertionIndex)
: m_isNextEdgeAssertion(isNextEdgeAssertion), m_assertionIndex(assertionIndex)
{
assert(m_assertionIndex == assertionIndex);
}
public:
AssertionInfo() : AssertionInfo(false, 0)
{
}
AssertionInfo(AssertionIndex assertionIndex) : AssertionInfo(false, assertionIndex)
{
}
static AssertionInfo ForNextEdge(AssertionIndex assertionIndex)
{
// Ignore the edge information if there's no assertion
bool isNextEdge = (assertionIndex != NO_ASSERTION_INDEX);
return AssertionInfo(isNextEdge, assertionIndex);
}
void Clear()
{
m_isNextEdgeAssertion = 0;
m_assertionIndex = NO_ASSERTION_INDEX;
}
bool HasAssertion() const
{
return m_assertionIndex != NO_ASSERTION_INDEX;
}
AssertionIndex GetAssertionIndex() const
{
return m_assertionIndex;
}
bool IsNextEdgeAssertion() const
{
return m_isNextEdgeAssertion;
}
};
// GT_FIELD nodes will be lowered into more "code-gen-able" representations, like
// GT_IND's of addresses, or GT_LCL_FLD nodes. We'd like to preserve the more abstract
// information, and will therefore annotate such lowered nodes with FieldSeq's. A FieldSeq
// represents a (possibly) empty sequence of fields. The fields are in the order
// in which they are dereferenced. The first field may be an object field or a struct field;
// all subsequent fields must be struct fields.
class FieldSeqNode
{
public:
enum class FieldKind : uintptr_t
{
Instance = 0, // An instance field, object or struct.
SimpleStatic = 1, // Simple static field - the handle represents a unique location.
SharedStatic = 2, // Static field on a shared generic type: "Class<__Canon>.StaticField".
};
private:
static const uintptr_t FIELD_KIND_MASK = 0b11;
static_assert_no_msg(sizeof(CORINFO_FIELD_HANDLE) == sizeof(uintptr_t));
uintptr_t m_fieldHandleAndKind;
FieldSeqNode* m_next;
public:
FieldSeqNode(CORINFO_FIELD_HANDLE fieldHnd, FieldSeqNode* next, FieldKind fieldKind);
FieldKind GetKind() const
{
return static_cast<FieldKind>(m_fieldHandleAndKind & FIELD_KIND_MASK);
}
CORINFO_FIELD_HANDLE GetFieldHandle() const
{
assert(!IsPseudoField() && (GetFieldHandleValue() != NO_FIELD_HANDLE));
return GetFieldHandleValue();
}
CORINFO_FIELD_HANDLE GetFieldHandleValue() const
{
return CORINFO_FIELD_HANDLE(m_fieldHandleAndKind & ~FIELD_KIND_MASK);
}
// returns true when this is the pseudo #FirstElem field sequence
bool IsFirstElemFieldSeq() const;
// returns true when this is the pseudo #ConstantIndex field sequence
bool IsConstantIndexFieldSeq() const;
// returns true when this is the the pseudo #FirstElem field sequence or the pseudo #ConstantIndex field sequence
bool IsPseudoField() const;
bool IsStaticField() const
{
return (GetKind() == FieldKind::SimpleStatic) || (GetKind() == FieldKind::SharedStatic);
}
bool IsSharedStaticField() const
{
return GetKind() == FieldKind::SharedStatic;
}
FieldSeqNode* GetNext() const
{
return m_next;
}
FieldSeqNode* GetTail()
{
FieldSeqNode* tail = this;
while (tail->m_next != nullptr)
{
tail = tail->m_next;
}
return tail;
}
// Make sure this provides methods that allow it to be used as a KeyFuncs type in JitHashTable.
// Note that there is a one-to-one relationship between the field handle and the field kind, so
// we do not need to mask away the latter for comparison purposes.
static int GetHashCode(FieldSeqNode fsn)
{
return static_cast<int>(fsn.m_fieldHandleAndKind) ^ static_cast<int>(reinterpret_cast<intptr_t>(fsn.m_next));
}
static bool Equals(const FieldSeqNode& fsn1, const FieldSeqNode& fsn2)
{
return fsn1.m_fieldHandleAndKind == fsn2.m_fieldHandleAndKind && fsn1.m_next == fsn2.m_next;
}
};
// This class canonicalizes field sequences.
class FieldSeqStore
{
typedef JitHashTable<FieldSeqNode, /*KeyFuncs*/ FieldSeqNode, FieldSeqNode*> FieldSeqNodeCanonMap;
CompAllocator m_alloc;
FieldSeqNodeCanonMap* m_canonMap;
static FieldSeqNode s_notAField; // No value, just exists to provide an address.
// Dummy variables to provide the addresses for the "pseudo field handle" statics below.
static int FirstElemPseudoFieldStruct;
static int ConstantIndexPseudoFieldStruct;
public:
FieldSeqStore(CompAllocator alloc);
// Returns the (canonical in the store) singleton field sequence for the given handle.
FieldSeqNode* CreateSingleton(CORINFO_FIELD_HANDLE fieldHnd,
FieldSeqNode::FieldKind fieldKind = FieldSeqNode::FieldKind::Instance);
// This is a special distinguished FieldSeqNode indicating that a constant does *not*
// represent a valid field sequence. This is "infectious", in the sense that appending it
// (on either side) to any field sequence yields the "NotAField()" sequence.
static FieldSeqNode* NotAField()
{
return &s_notAField;
}
// Returns the (canonical in the store) field sequence representing the concatenation of
// the sequences represented by "a" and "b". Assumes that "a" and "b" are canonical; that is,
// they are the results of CreateSingleton, NotAField, or Append calls. If either of the arguments
// are the "NotAField" value, so is the result.
FieldSeqNode* Append(FieldSeqNode* a, FieldSeqNode* b);
// We have a few "pseudo" field handles:
// This treats the constant offset of the first element of something as if it were a field.
// Works for method table offsets of boxed structs, or first elem offset of arrays/strings.
static CORINFO_FIELD_HANDLE FirstElemPseudoField;
// If there is a constant index, we make a psuedo field to correspond to the constant added to
// offset of the indexed field. This keeps the field sequence structure "normalized", especially in the
// case where the element type is a struct, so we might add a further struct field offset.
static CORINFO_FIELD_HANDLE ConstantIndexPseudoField;
static bool IsPseudoField(CORINFO_FIELD_HANDLE hnd)
{
return hnd == FirstElemPseudoField || hnd == ConstantIndexPseudoField;
}
};
class GenTreeUseEdgeIterator;
class GenTreeOperandIterator;
struct Statement;
/*****************************************************************************/
// Forward declarations of the subtypes
#define GTSTRUCT_0(fn, en) struct GenTree##fn;
#define GTSTRUCT_1(fn, en) struct GenTree##fn;
#define GTSTRUCT_2(fn, en, en2) struct GenTree##fn;
#define GTSTRUCT_3(fn, en, en2, en3) struct GenTree##fn;
#define GTSTRUCT_4(fn, en, en2, en3, en4) struct GenTree##fn;
#define GTSTRUCT_N(fn, ...) struct GenTree##fn;
#define GTSTRUCT_2_SPECIAL(fn, en, en2) GTSTRUCT_2(fn, en, en2)
#define GTSTRUCT_3_SPECIAL(fn, en, en2, en3) GTSTRUCT_3(fn, en, en2, en3)
#include "gtstructs.h"
/*****************************************************************************/
// Don't format the GenTreeFlags declaration
// clang-format off
//------------------------------------------------------------------------
// GenTreeFlags: a bitmask of flags for GenTree stored in gtFlags
//
enum GenTreeFlags : unsigned int
{
GTF_EMPTY = 0,
//---------------------------------------------------------------------
// The first set of flags can be used with a large set of nodes, and
// thus they must all have distinct values. That is, one can test any
// expression node for one of these flags.
//---------------------------------------------------------------------
GTF_ASG = 0x00000001, // sub-expression contains an assignment
GTF_CALL = 0x00000002, // sub-expression contains a func. call
GTF_EXCEPT = 0x00000004, // sub-expression might throw an exception
GTF_GLOB_REF = 0x00000008, // sub-expression uses global variable(s)
GTF_ORDER_SIDEEFF = 0x00000010, // sub-expression has a re-ordering side effect
// If you set these flags, make sure that code:gtExtractSideEffList knows how to find the tree,
// otherwise the C# (run csc /o-) code:
// var v = side_eff_operation
// with no use of `v` will drop your tree on the floor.
GTF_PERSISTENT_SIDE_EFFECTS = GTF_ASG | GTF_CALL,
GTF_SIDE_EFFECT = GTF_PERSISTENT_SIDE_EFFECTS | GTF_EXCEPT,
GTF_GLOB_EFFECT = GTF_SIDE_EFFECT | GTF_GLOB_REF,
GTF_ALL_EFFECT = GTF_GLOB_EFFECT | GTF_ORDER_SIDEEFF,
GTF_REVERSE_OPS = 0x00000020, // operand op2 should be evaluated before op1 (normally, op1 is evaluated first and op2 is evaluated second)
GTF_CONTAINED = 0x00000040, // This node is contained (executed as part of its parent)
GTF_SPILLED = 0x00000080, // the value has been spilled
GTF_NOREG_AT_USE = 0x00000100, // tree node is in memory at the point of use
GTF_SET_FLAGS = 0x00000200, // Requires that codegen for this node set the flags. Use gtSetFlags() to check this flag.
GTF_USE_FLAGS = 0x00000400, // Indicates that this node uses the flags bits.
GTF_MAKE_CSE = 0x00000800, // Hoisted expression: try hard to make this into CSE (see optPerformHoistExpr)
GTF_DONT_CSE = 0x00001000, // Don't bother CSE'ing this expr
GTF_COLON_COND = 0x00002000, // This node is conditionally executed (part of ? :)
GTF_NODE_MASK = GTF_COLON_COND,
GTF_BOOLEAN = 0x00004000, // value is known to be 0/1
GTF_UNSIGNED = 0x00008000, // With GT_CAST: the source operand is an unsigned type
// With operators: the specified node is an unsigned operator
GTF_LATE_ARG = 0x00010000, // The specified node is evaluated to a temp in the arg list, and this temp is added to gtCallLateArgs.
GTF_SPILL = 0x00020000, // Needs to be spilled here
// The extra flag GTF_IS_IN_CSE is used to tell the consumer of the side effect flags
// that we are calling in the context of performing a CSE, thus we
// should allow the run-once side effects of running a class constructor.
//
// The only requirement of this flag is that it not overlap any of the
// side-effect flags. The actual bit used is otherwise arbitrary.
GTF_IS_IN_CSE = GTF_BOOLEAN,
GTF_COMMON_MASK = 0x0003FFFF, // mask of all the flags above
GTF_REUSE_REG_VAL = 0x00800000, // This is set by the register allocator on nodes whose value already exists in the
// register assigned to this node, so the code generator does not have to generate
// code to produce the value. It is currently used only on constant nodes.
// It CANNOT be set on var (GT_LCL*) nodes, or on indir (GT_IND or GT_STOREIND) nodes, since
// it is not needed for lclVars and is highly unlikely to be useful for indir nodes.
//---------------------------------------------------------------------
// The following flags can be used only with a small set of nodes, and
// thus their values need not be distinct (other than within the set
// that goes with a particular node/nodes, of course). That is, one can
// only test for one of these flags if the 'gtOper' value is tested as
// well to make sure it's the right operator for the particular flag.
//---------------------------------------------------------------------
// NB: GTF_VAR_* and GTF_REG_* share the same namespace of flags.
// These flags are also used by GT_LCL_FLD, and the last-use (DEATH) flags are also used by GenTreeCopyOrReload.
GTF_VAR_DEF = 0x80000000, // GT_LCL_VAR -- this is a definition
GTF_VAR_USEASG = 0x40000000, // GT_LCL_VAR -- this is a partial definition, a use of the previous definition is implied
// A partial definition usually occurs when a struct field is assigned to (s.f = ...) or
// when a scalar typed variable is assigned to via a narrow store (*((byte*)&i) = ...).
// Last-use bits.
// Note that a node marked GTF_VAR_MULTIREG can only be a pure definition of all the fields, or a pure use of all the fields,
// so we don't need the equivalent of GTF_VAR_USEASG.
GTF_VAR_MULTIREG_DEATH0 = 0x04000000, // GT_LCL_VAR -- The last-use bit for a lclVar (the first register if it is multireg).
GTF_VAR_DEATH = GTF_VAR_MULTIREG_DEATH0,
GTF_VAR_MULTIREG_DEATH1 = 0x08000000, // GT_LCL_VAR -- The last-use bit for the second register of a multireg lclVar.
GTF_VAR_MULTIREG_DEATH2 = 0x10000000, // GT_LCL_VAR -- The last-use bit for the third register of a multireg lclVar.
GTF_VAR_MULTIREG_DEATH3 = 0x20000000, // GT_LCL_VAR -- The last-use bit for the fourth register of a multireg lclVar.
GTF_VAR_DEATH_MASK = GTF_VAR_MULTIREG_DEATH0 | GTF_VAR_MULTIREG_DEATH1 | GTF_VAR_MULTIREG_DEATH2 | GTF_VAR_MULTIREG_DEATH3,
// This is the amount we have to shift, plus the regIndex, to get the last use bit we want.
#define MULTIREG_LAST_USE_SHIFT 26
GTF_VAR_MULTIREG = 0x02000000, // This is a struct or (on 32-bit platforms) long variable that is used or defined
// to/from a multireg source or destination (e.g. a call arg or return, or an op
// that returns its result in multiple registers such as a long multiply).
GTF_LIVENESS_MASK = GTF_VAR_DEF | GTF_VAR_USEASG | GTF_VAR_DEATH_MASK,
GTF_VAR_CAST = 0x01000000, // GT_LCL_VAR -- has been explictly cast (variable node may not be type of local)
GTF_VAR_ITERATOR = 0x00800000, // GT_LCL_VAR -- this is a iterator reference in the loop condition
GTF_VAR_CLONED = 0x00400000, // GT_LCL_VAR -- this node has been cloned or is a clone
GTF_VAR_CONTEXT = 0x00200000, // GT_LCL_VAR -- this node is part of a runtime lookup
GTF_VAR_FOLDED_IND = 0x00100000, // GT_LCL_VAR -- this node was folded from *(typ*)&lclVar expression tree in fgMorphSmpOp()
// where 'typ' is a small type and 'lclVar' corresponds to a normalized-on-store local variable.
// This flag identifies such nodes in order to make sure that fgDoNormalizeOnStore() is called
// on their parents in post-order morph.
// Relevant for inlining optimizations (see fgInlinePrependStatements)
GTF_VAR_ARR_INDEX = 0x00000020, // The variable is part of (the index portion of) an array index expression.
// Shares a value with GTF_REVERSE_OPS, which is meaningless for local var.
// For additional flags for GT_CALL node see GTF_CALL_M_*
GTF_CALL_UNMANAGED = 0x80000000, // GT_CALL -- direct call to unmanaged code
GTF_CALL_INLINE_CANDIDATE = 0x40000000, // GT_CALL -- this call has been marked as an inline candidate
GTF_CALL_VIRT_KIND_MASK = 0x30000000, // GT_CALL -- mask of the below call kinds
GTF_CALL_NONVIRT = 0x00000000, // GT_CALL -- a non virtual call
GTF_CALL_VIRT_STUB = 0x10000000, // GT_CALL -- a stub-dispatch virtual call
GTF_CALL_VIRT_VTABLE = 0x20000000, // GT_CALL -- a vtable-based virtual call
GTF_CALL_NULLCHECK = 0x08000000, // GT_CALL -- must check instance pointer for null
GTF_CALL_POP_ARGS = 0x04000000, // GT_CALL -- caller pop arguments?
GTF_CALL_HOISTABLE = 0x02000000, // GT_CALL -- call is hoistable
GTF_MEMORYBARRIER_LOAD = 0x40000000, // GT_MEMORYBARRIER -- Load barrier
GTF_FLD_VOLATILE = 0x40000000, // GT_FIELD/GT_CLS_VAR -- same as GTF_IND_VOLATILE
GTF_FLD_INITCLASS = 0x20000000, // GT_FIELD/GT_CLS_VAR -- field access requires preceding class/static init helper
GTF_INX_RNGCHK = 0x80000000, // GT_INDEX/GT_INDEX_ADDR -- the array reference should be range-checked.
GTF_INX_STRING_LAYOUT = 0x40000000, // GT_INDEX -- this uses the special string array layout
GTF_INX_NOFAULT = 0x20000000, // GT_INDEX -- the INDEX does not throw an exception (morph to GTF_IND_NONFAULTING)
GTF_IND_TGT_NOT_HEAP = 0x80000000, // GT_IND -- the target is not on the heap
GTF_IND_VOLATILE = 0x40000000, // GT_IND -- the load or store must use volatile sematics (this is a nop on X86)
GTF_IND_NONFAULTING = 0x20000000, // Operations for which OperIsIndir() is true -- An indir that cannot fault.
// Same as GTF_ARRLEN_NONFAULTING.
GTF_IND_TGTANYWHERE = 0x10000000, // GT_IND -- the target could be anywhere
GTF_IND_TLS_REF = 0x08000000, // GT_IND -- the target is accessed via TLS
GTF_IND_ASG_LHS = 0x04000000, // GT_IND -- this GT_IND node is (the effective val) of the LHS of an
// assignment; don't evaluate it independently.
GTF_IND_REQ_ADDR_IN_REG = GTF_IND_ASG_LHS, // GT_IND -- requires its addr operand to be evaluated
// into a register. This flag is useful in cases where it
// is required to generate register indirect addressing mode.
// One such case is virtual stub calls on xarch. This is only
// valid in the backend, where GTF_IND_ASG_LHS is not necessary
// (all such indirections will be lowered to GT_STOREIND).
GTF_IND_UNALIGNED = 0x02000000, // GT_IND -- the load or store is unaligned (we assume worst case
// alignment of 1 byte)
GTF_IND_INVARIANT = 0x01000000, // GT_IND -- the target is invariant (a prejit indirection)
GTF_IND_ARR_INDEX = 0x00800000, // GT_IND -- the indirection represents an (SZ) array index
GTF_IND_NONNULL = 0x00400000, // GT_IND -- the indirection never returns null (zero)
GTF_IND_FLAGS = GTF_IND_VOLATILE | GTF_IND_TGTANYWHERE | GTF_IND_NONFAULTING | GTF_IND_TLS_REF | \
GTF_IND_UNALIGNED | GTF_IND_INVARIANT | GTF_IND_NONNULL | GTF_IND_ARR_INDEX | GTF_IND_TGT_NOT_HEAP,
GTF_CLS_VAR_VOLATILE = 0x40000000, // GT_FIELD/GT_CLS_VAR -- same as GTF_IND_VOLATILE
GTF_CLS_VAR_INITCLASS = 0x20000000, // GT_FIELD/GT_CLS_VAR -- same as GTF_FLD_INITCLASS
GTF_CLS_VAR_ASG_LHS = 0x04000000, // GT_CLS_VAR -- this GT_CLS_VAR node is (the effective val) of the LHS
// of an assignment; don't evaluate it independently.
GTF_ADDRMODE_NO_CSE = 0x80000000, // GT_ADD/GT_MUL/GT_LSH -- Do not CSE this node only, forms complex
// addressing mode
GTF_MUL_64RSLT = 0x40000000, // GT_MUL -- produce 64-bit result
GTF_RELOP_NAN_UN = 0x80000000, // GT_<relop> -- Is branch taken if ops are NaN?
GTF_RELOP_JMP_USED = 0x40000000, // GT_<relop> -- result of compare used for jump or ?:
GTF_RELOP_ZTT = 0x08000000, // GT_<relop> -- Loop test cloned for converting while-loops into do-while
// with explicit "loop test" in the header block.
GTF_RELOP_SJUMP_OPT = 0x04000000, // GT_<relop> -- Swap signed jl/jge with js/jns during emitter, reuses flags
// from previous instruction.
GTF_JCMP_EQ = 0x80000000, // GTF_JCMP_EQ -- Branch on equal rather than not equal
GTF_JCMP_TST = 0x40000000, // GTF_JCMP_TST -- Use bit test instruction rather than compare against zero instruction
GTF_RET_MERGED = 0x80000000, // GT_RETURN -- This is a return generated during epilog merging.
GTF_QMARK_CAST_INSTOF = 0x80000000, // GT_QMARK -- Is this a top (not nested) level qmark created for
// castclass or instanceof?
GTF_BOX_VALUE = 0x80000000, // GT_BOX -- "box" is on a value type
GTF_ICON_HDL_MASK = 0xFF000000, // Bits used by handle types below
GTF_ICON_SCOPE_HDL = 0x01000000, // GT_CNS_INT -- constant is a scope handle
GTF_ICON_CLASS_HDL = 0x02000000, // GT_CNS_INT -- constant is a class handle
GTF_ICON_METHOD_HDL = 0x03000000, // GT_CNS_INT -- constant is a method handle
GTF_ICON_FIELD_HDL = 0x04000000, // GT_CNS_INT -- constant is a field handle
GTF_ICON_STATIC_HDL = 0x05000000, // GT_CNS_INT -- constant is a handle to static data
GTF_ICON_STR_HDL = 0x06000000, // GT_CNS_INT -- constant is a string handle
GTF_ICON_CONST_PTR = 0x07000000, // GT_CNS_INT -- constant is a pointer to immutable data, (e.g. IAT_PPVALUE)
GTF_ICON_GLOBAL_PTR = 0x08000000, // GT_CNS_INT -- constant is a pointer to mutable data (e.g. from the VM state)
GTF_ICON_VARG_HDL = 0x09000000, // GT_CNS_INT -- constant is a var arg cookie handle
GTF_ICON_PINVKI_HDL = 0x0A000000, // GT_CNS_INT -- constant is a pinvoke calli handle
GTF_ICON_TOKEN_HDL = 0x0B000000, // GT_CNS_INT -- constant is a token handle (other than class, method or field)
GTF_ICON_TLS_HDL = 0x0C000000, // GT_CNS_INT -- constant is a TLS ref with offset
GTF_ICON_FTN_ADDR = 0x0D000000, // GT_CNS_INT -- constant is a function address
GTF_ICON_CIDMID_HDL = 0x0E000000, // GT_CNS_INT -- constant is a class ID or a module ID
GTF_ICON_BBC_PTR = 0x0F000000, // GT_CNS_INT -- constant is a basic block count pointer
GTF_ICON_STATIC_BOX_PTR = 0x10000000, // GT_CNS_INT -- constant is an address of the box for a STATIC_IN_HEAP field
GTF_ICON_FIELD_SEQ = 0x11000000, // <--------> -- constant is a FieldSeqNode* (used only as VNHandle)
// GTF_ICON_REUSE_REG_VAL = 0x00800000 // GT_CNS_INT -- GTF_REUSE_REG_VAL, defined above
GTF_ICON_FIELD_OFF = 0x00400000, // GT_CNS_INT -- constant is a field offset
GTF_ICON_SIMD_COUNT = 0x00200000, // GT_CNS_INT -- constant is Vector<T>.Count
GTF_ICON_INITCLASS = 0x00100000, // GT_CNS_INT -- Constant is used to access a static that requires preceding
// class/static init helper. In some cases, the constant is
// the address of the static field itself, and in other cases
// there's an extra layer of indirection and it is the address
// of the cell that the runtime will fill in with the address
// of the static field; in both of those cases, the constant
// is what gets flagged.
GTF_BLK_VOLATILE = GTF_IND_VOLATILE, // GT_ASG, GT_STORE_BLK, GT_STORE_OBJ, GT_STORE_DYNBLK -- is a volatile block operation
GTF_BLK_UNALIGNED = GTF_IND_UNALIGNED, // GT_ASG, GT_STORE_BLK, GT_STORE_OBJ, GT_STORE_DYNBLK -- is an unaligned block operation
GTF_OVERFLOW = 0x10000000, // Supported for: GT_ADD, GT_SUB, GT_MUL and GT_CAST.
// Requires an overflow check. Use gtOverflow(Ex)() to check this flag.
GTF_DIV_BY_CNS_OPT = 0x80000000, // GT_DIV -- Uses the division by constant optimization to compute this division
GTF_CHK_INDEX_INBND = 0x80000000, // GT_BOUNDS_CHECK -- have proved this check is always in-bounds
GTF_ARRLEN_ARR_IDX = 0x80000000, // GT_ARR_LENGTH -- Length which feeds into an array index expression
GTF_ARRLEN_NONFAULTING = 0x20000000, // GT_ARR_LENGTH -- An array length operation that cannot fault. Same as GT_IND_NONFAULTING.
GTF_SIMDASHW_OP = 0x80000000, // GT_HWINTRINSIC -- Indicates that the structHandle should be gotten from gtGetStructHandleForSIMD
// rather than from gtGetStructHandleForHWSIMD.
// Flag used by assertion prop to indicate that a type is a TYP_LONG
#ifdef TARGET_64BIT
GTF_ASSERTION_PROP_LONG = 0x00000001,
#endif // TARGET_64BIT
};
inline constexpr GenTreeFlags operator ~(GenTreeFlags a)
{
return (GenTreeFlags)(~(unsigned int)a);
}
inline constexpr GenTreeFlags operator |(GenTreeFlags a, GenTreeFlags b)
{
return (GenTreeFlags)((unsigned int)a | (unsigned int)b);
}
inline constexpr GenTreeFlags operator &(GenTreeFlags a, GenTreeFlags b)
{
return (GenTreeFlags)((unsigned int)a & (unsigned int)b);
}
inline GenTreeFlags& operator |=(GenTreeFlags& a, GenTreeFlags b)
{
return a = (GenTreeFlags)((unsigned int)a | (unsigned int)b);
}
inline GenTreeFlags& operator &=(GenTreeFlags& a, GenTreeFlags b)
{
return a = (GenTreeFlags)((unsigned int)a & (unsigned int)b);
}
inline GenTreeFlags& operator ^=(GenTreeFlags& a, GenTreeFlags b)
{
return a = (GenTreeFlags)((unsigned int)a ^ (unsigned int)b);
}
// Can any side-effects be observed externally, say by a caller method?
// For assignments, only assignments to global memory can be observed
// externally, whereas simple assignments to local variables can not.
//
// Be careful when using this inside a "try" protected region as the
// order of assignments to local variables would need to be preserved
// wrt side effects if the variables are alive on entry to the
// "catch/finally" region. In such cases, even assignments to locals
// will have to be restricted.
#define GTF_GLOBALLY_VISIBLE_SIDE_EFFECTS(flags) \
(((flags) & (GTF_CALL | GTF_EXCEPT)) || (((flags) & (GTF_ASG | GTF_GLOB_REF)) == (GTF_ASG | GTF_GLOB_REF)))
#if defined(DEBUG)
//------------------------------------------------------------------------
// GenTreeDebugFlags: a bitmask of debug-only flags for GenTree stored in gtDebugFlags
//
enum GenTreeDebugFlags : unsigned int
{
GTF_DEBUG_NONE = 0x00000000, // No debug flags.
GTF_DEBUG_NODE_MORPHED = 0x00000001, // the node has been morphed (in the global morphing phase)
GTF_DEBUG_NODE_SMALL = 0x00000002,
GTF_DEBUG_NODE_LARGE = 0x00000004,
GTF_DEBUG_NODE_CG_PRODUCED = 0x00000008, // genProduceReg has been called on this node
GTF_DEBUG_NODE_CG_CONSUMED = 0x00000010, // genConsumeReg has been called on this node
GTF_DEBUG_NODE_LSRA_ADDED = 0x00000020, // This node was added by LSRA
GTF_DEBUG_NODE_MASK = 0x0000003F, // These flags are all node (rather than operation) properties.
GTF_DEBUG_VAR_CSE_REF = 0x00800000, // GT_LCL_VAR -- This is a CSE LCL_VAR node
};
inline constexpr GenTreeDebugFlags operator ~(GenTreeDebugFlags a)
{
return (GenTreeDebugFlags)(~(unsigned int)a);
}
inline constexpr GenTreeDebugFlags operator |(GenTreeDebugFlags a, GenTreeDebugFlags b)
{
return (GenTreeDebugFlags)((unsigned int)a | (unsigned int)b);
}
inline constexpr GenTreeDebugFlags operator &(GenTreeDebugFlags a, GenTreeDebugFlags b)
{
return (GenTreeDebugFlags)((unsigned int)a & (unsigned int)b);
}
inline GenTreeDebugFlags& operator |=(GenTreeDebugFlags& a, GenTreeDebugFlags b)
{
return a = (GenTreeDebugFlags)((unsigned int)a | (unsigned int)b);
}
inline GenTreeDebugFlags& operator &=(GenTreeDebugFlags& a, GenTreeDebugFlags b)
{
return a = (GenTreeDebugFlags)((unsigned int)a & (unsigned int)b);
}
#endif // defined(DEBUG)
// clang-format on
constexpr bool OpersAreContiguous(genTreeOps firstOper, genTreeOps secondOper)
{
return (firstOper + 1) == secondOper;
}
template <typename... Opers>
constexpr bool OpersAreContiguous(genTreeOps firstOper, genTreeOps secondOper, Opers... otherOpers)
{
return OpersAreContiguous(firstOper, secondOper) && OpersAreContiguous(secondOper, otherOpers...);
}
#ifndef HOST_64BIT
#include <pshpack4.h>
#endif
struct GenTree
{
// We use GT_STRUCT_0 only for the category of simple ops.
#define GTSTRUCT_0(fn, en) \
GenTree##fn* As##fn() \
{ \
assert(OperIsSimple()); \
return reinterpret_cast<GenTree##fn*>(this); \
} \
const GenTree##fn* As##fn() const \
{ \
assert(OperIsSimple()); \
return reinterpret_cast<const GenTree##fn*>(this); \
} \
GenTree##fn& As##fn##Ref() \
{ \
return *As##fn(); \
}
#define GTSTRUCT_N(fn, ...) \
GenTree##fn* As##fn() \
{ \
assert(OperIs(__VA_ARGS__)); \
return reinterpret_cast<GenTree##fn*>(this); \
} \
const GenTree##fn* As##fn() const \
{ \
assert(OperIs(__VA_ARGS__)); \
return reinterpret_cast<const GenTree##fn*>(this); \
} \
GenTree##fn& As##fn##Ref() \
{ \
return *As##fn(); \
}
#define GTSTRUCT_1(fn, en) GTSTRUCT_N(fn, en)
#define GTSTRUCT_2(fn, en, en2) GTSTRUCT_N(fn, en, en2)
#define GTSTRUCT_3(fn, en, en2, en3) GTSTRUCT_N(fn, en, en2, en3)
#define GTSTRUCT_4(fn, en, en2, en3, en4) GTSTRUCT_N(fn, en, en2, en3, en4)
#define GTSTRUCT_2_SPECIAL(fn, en, en2) GTSTRUCT_2(fn, en, en2)
#define GTSTRUCT_3_SPECIAL(fn, en, en2, en3) GTSTRUCT_3(fn, en, en2, en3)
#include "gtstructs.h"
genTreeOps gtOper; // enum subtype BYTE
var_types gtType; // enum subtype BYTE
genTreeOps OperGet() const
{
return gtOper;
}
var_types TypeGet() const
{
return gtType;
}
#ifdef DEBUG
genTreeOps gtOperSave; // Only used to save gtOper when we destroy a node, to aid debugging.
#endif
#define NO_CSE (0)
#define IS_CSE_INDEX(x) ((x) != 0)
#define IS_CSE_USE(x) ((x) > 0)
#define IS_CSE_DEF(x) ((x) < 0)
#define GET_CSE_INDEX(x) (((x) > 0) ? x : -(x))
#define TO_CSE_DEF(x) (-(x))
signed char gtCSEnum; // 0 or the CSE index (negated if def)
// valid only for CSE expressions
unsigned char gtLIRFlags; // Used for nodes that are in LIR. See LIR::Flags in lir.h for the various flags.
AssertionInfo gtAssertionInfo;
bool GeneratesAssertion() const
{
return gtAssertionInfo.HasAssertion();
}
void ClearAssertion()
{
gtAssertionInfo.Clear();
}
AssertionInfo GetAssertionInfo() const
{
return gtAssertionInfo;
}
void SetAssertionInfo(AssertionInfo info)
{
gtAssertionInfo = info;
}
//
// Cost metrics on the node. Don't allow direct access to the variable for setting.
//
public:
#ifdef DEBUG
// You are not allowed to read the cost values before they have been set in gtSetEvalOrder().
// Keep track of whether the costs have been initialized, and assert if they are read before being initialized.
// Obviously, this information does need to be initialized when a node is created.
// This is public so the dumpers can see it.
bool gtCostsInitialized;
#endif // DEBUG
#define MAX_COST UCHAR_MAX
#define IND_COST_EX 3 // execution cost for an indirection
unsigned char GetCostEx() const
{
assert(gtCostsInitialized);
return _gtCostEx;
}
unsigned char GetCostSz() const
{
assert(gtCostsInitialized);
return _gtCostSz;
}
// Set the costs. They are always both set at the same time.
// Don't use the "put" property: force calling this function, to make it more obvious in the few places
// that set the values.
// Note that costs are only set in gtSetEvalOrder() and its callees.
void SetCosts(unsigned costEx, unsigned costSz)
{
assert(costEx != (unsigned)-1); // looks bogus
assert(costSz != (unsigned)-1); // looks bogus
INDEBUG(gtCostsInitialized = true;)
_gtCostEx = (costEx > MAX_COST) ? MAX_COST : (unsigned char)costEx;
_gtCostSz = (costSz > MAX_COST) ? MAX_COST : (unsigned char)costSz;
}
// Opimized copy function, to avoid the SetCosts() function comparisons, and make it more clear that a node copy is
// happening.
void CopyCosts(const GenTree* const tree)
{
// If the 'tree' costs aren't initialized, we'll hit an assert below.
INDEBUG(gtCostsInitialized = tree->gtCostsInitialized;)
_gtCostEx = tree->GetCostEx();
_gtCostSz = tree->GetCostSz();
}
// Same as CopyCosts, but avoids asserts if the costs we are copying have not been initialized.
// This is because the importer, for example, clones nodes, before these costs have been initialized.
// Note that we directly access the 'tree' costs, not going through the accessor functions (either
// directly or through the properties).
void CopyRawCosts(const GenTree* const tree)
{
INDEBUG(gtCostsInitialized = tree->gtCostsInitialized;)
_gtCostEx = tree->_gtCostEx;
_gtCostSz = tree->_gtCostSz;
}
private:
unsigned char _gtCostEx; // estimate of expression execution cost
unsigned char _gtCostSz; // estimate of expression code size cost
//
// Register or register pair number of the node.
//
CLANG_FORMAT_COMMENT_ANCHOR;
#ifdef DEBUG
public:
enum genRegTag
{
GT_REGTAG_NONE, // Nothing has been assigned to _gtRegNum
GT_REGTAG_REG // _gtRegNum has been assigned
};
genRegTag GetRegTag() const
{
assert(gtRegTag == GT_REGTAG_NONE || gtRegTag == GT_REGTAG_REG);
return gtRegTag;
}
private:
genRegTag gtRegTag; // What is in _gtRegNum?
#endif // DEBUG
private:
// This stores the register assigned to the node. If a register is not assigned, _gtRegNum is set to REG_NA.
regNumberSmall _gtRegNum;
// Count of operands. Used *only* by GenTreeMultiOp, exists solely due to padding constraints.
friend struct GenTreeMultiOp;
uint8_t m_operandCount;
public:
// The register number is stored in a small format (8 bits), but the getters return and the setters take
// a full-size (unsigned) format, to localize the casts here.
CLANG_FORMAT_COMMENT_ANCHOR;
#ifdef DEBUG
bool canBeContained() const;
#endif
// for codegen purposes, is this node a subnode of its parent
bool isContained() const;
bool isContainedIndir() const;
bool isIndirAddrMode();
// This returns true only for GT_IND and GT_STOREIND, and is used in contexts where a "true"
// indirection is expected (i.e. either a load to or a store from a single register).
// OperIsIndir() returns true also for indirection nodes such as GT_BLK, etc. as well as GT_NULLCHECK.
bool isIndir() const;
bool isContainedIntOrIImmed() const
{
return isContained() && IsCnsIntOrI() && !isUsedFromSpillTemp();
}
bool isContainedFltOrDblImmed() const
{
return isContained() && (OperGet() == GT_CNS_DBL);
}
bool isLclField() const
{
return OperGet() == GT_LCL_FLD || OperGet() == GT_STORE_LCL_FLD;
}
bool isUsedFromSpillTemp() const;
// Indicates whether it is a memory op.
// Right now it includes Indir and LclField ops.
bool isMemoryOp() const
{
return isIndir() || isLclField();
}
bool isUsedFromMemory() const
{
return ((isContained() && (isMemoryOp() || (OperGet() == GT_LCL_VAR) || (OperGet() == GT_CNS_DBL))) ||
isUsedFromSpillTemp());
}
bool isLclVarUsedFromMemory() const
{
return (OperGet() == GT_LCL_VAR) && (isContained() || isUsedFromSpillTemp());
}
bool isLclFldUsedFromMemory() const
{
return isLclField() && (isContained() || isUsedFromSpillTemp());
}
bool isUsedFromReg() const
{
return !isContained() && !isUsedFromSpillTemp();
}
regNumber GetRegNum() const
{
assert((gtRegTag == GT_REGTAG_REG) || (gtRegTag == GT_REGTAG_NONE)); // TODO-Cleanup: get rid of the NONE case,
// and fix everyplace that reads undefined
// values
regNumber reg = (regNumber)_gtRegNum;
assert((gtRegTag == GT_REGTAG_NONE) || // TODO-Cleanup: get rid of the NONE case, and fix everyplace that reads
// undefined values
(reg >= REG_FIRST && reg <= REG_COUNT));
return reg;
}
void SetRegNum(regNumber reg)
{
assert(reg >= REG_FIRST && reg <= REG_COUNT);
_gtRegNum = (regNumberSmall)reg;
INDEBUG(gtRegTag = GT_REGTAG_REG;)
assert(_gtRegNum == reg);
}
void ClearRegNum()
{
_gtRegNum = REG_NA;
INDEBUG(gtRegTag = GT_REGTAG_NONE;)
}
// Copy the _gtRegNum/gtRegTag fields
void CopyReg(GenTree* from);
bool gtHasReg(Compiler* comp) const;
int GetRegisterDstCount(Compiler* compiler) const;
regMaskTP gtGetRegMask() const;
regMaskTP gtGetContainedRegMask();
GenTreeFlags gtFlags;
#if defined(DEBUG)
GenTreeDebugFlags gtDebugFlags;
#endif // defined(DEBUG)
ValueNumPair gtVNPair;
regMaskSmall gtRsvdRegs; // set of fixed trashed registers
unsigned AvailableTempRegCount(regMaskTP mask = (regMaskTP)-1) const;
regNumber GetSingleTempReg(regMaskTP mask = (regMaskTP)-1);
regNumber ExtractTempReg(regMaskTP mask = (regMaskTP)-1);
void SetVNsFromNode(GenTree* tree)
{
gtVNPair = tree->gtVNPair;
}
ValueNum GetVN(ValueNumKind vnk) const
{
if (vnk == VNK_Liberal)
{
return gtVNPair.GetLiberal();
}
else
{
assert(vnk == VNK_Conservative);
return gtVNPair.GetConservative();
}
}
void SetVN(ValueNumKind vnk, ValueNum vn)
{
if (vnk == VNK_Liberal)
{
return gtVNPair.SetLiberal(vn);
}
else
{
assert(vnk == VNK_Conservative);
return gtVNPair.SetConservative(vn);
}
}
void SetVNs(ValueNumPair vnp)
{
gtVNPair = vnp;
}
void ClearVN()
{
gtVNPair = ValueNumPair(); // Initializes both elements to "NoVN".
}
GenTree* gtNext;
GenTree* gtPrev;
#ifdef DEBUG
unsigned gtTreeID;
unsigned gtSeqNum; // liveness traversal order within the current statement
int gtUseNum; // use-ordered traversal within the function
#endif
static const unsigned char gtOperKindTable[];
static unsigned OperKind(unsigned gtOper)
{
assert(gtOper < GT_COUNT);
return gtOperKindTable[gtOper];
}
unsigned OperKind() const
{
assert(gtOper < GT_COUNT);
return gtOperKindTable[gtOper];
}
static bool IsExOp(unsigned opKind)
{
return (opKind & GTK_EXOP) != 0;
}
bool IsValue() const
{
if ((OperKind(gtOper) & GTK_NOVALUE) != 0)
{
return false;
}
if (gtType == TYP_VOID)
{
// These are the only operators which can produce either VOID or non-VOID results.
assert(OperIs(GT_NOP, GT_CALL, GT_COMMA) || OperIsCompare() || OperIsLong() || OperIsSIMD() ||
OperIsHWIntrinsic());
return false;
}
return true;
}
// LIR flags
// These helper methods, along with the flag values they manipulate, are defined in lir.h
//
// UnusedValue indicates that, although this node produces a value, it is unused.
inline void SetUnusedValue();
inline void ClearUnusedValue();
inline bool IsUnusedValue() const;
// RegOptional indicates that codegen can still generate code even if it isn't allocated a register.
inline bool IsRegOptional() const;
inline void SetRegOptional();
inline void ClearRegOptional();
#ifdef DEBUG
void dumpLIRFlags();
#endif
bool TypeIs(var_types type) const
{
return gtType == type;
}
template <typename... T>
bool TypeIs(var_types type, T... rest) const
{
return TypeIs(type) || TypeIs(rest...);
}
static bool StaticOperIs(genTreeOps operCompare, genTreeOps oper)
{
return operCompare == oper;
}
template <typename... T>
static bool StaticOperIs(genTreeOps operCompare, genTreeOps oper, T... rest)
{
return StaticOperIs(operCompare, oper) || StaticOperIs(operCompare, rest...);
}
bool OperIs(genTreeOps oper) const
{
return OperGet() == oper;
}
template <typename... T>
bool OperIs(genTreeOps oper, T... rest) const
{
return OperIs(oper) || OperIs(rest...);
}
static bool OperIsConst(genTreeOps gtOper)
{
static_assert_no_msg(OpersAreContiguous(GT_CNS_INT, GT_CNS_LNG, GT_CNS_DBL, GT_CNS_STR));
return (GT_CNS_INT <= gtOper) && (gtOper <= GT_CNS_STR);
}
bool OperIsConst() const
{
return OperIsConst(gtOper);
}
static bool OperIsLeaf(genTreeOps gtOper)
{
return (OperKind(gtOper) & GTK_LEAF) != 0;
}
bool OperIsLeaf() const
{
return (OperKind(gtOper) & GTK_LEAF) != 0;
}
static bool OperIsLocal(genTreeOps gtOper)
{
static_assert_no_msg(
OpersAreContiguous(GT_PHI_ARG, GT_LCL_VAR, GT_LCL_FLD, GT_STORE_LCL_VAR, GT_STORE_LCL_FLD));
return (GT_PHI_ARG <= gtOper) && (gtOper <= GT_STORE_LCL_FLD);
}
static bool OperIsLocalAddr(genTreeOps gtOper)
{
return (gtOper == GT_LCL_VAR_ADDR || gtOper == GT_LCL_FLD_ADDR);
}
static bool OperIsLocalField(genTreeOps gtOper)
{
return (gtOper == GT_LCL_FLD || gtOper == GT_LCL_FLD_ADDR || gtOper == GT_STORE_LCL_FLD);
}
inline bool OperIsLocalField() const
{
return OperIsLocalField(gtOper);
}
static bool OperIsScalarLocal(genTreeOps gtOper)
{
return (gtOper == GT_LCL_VAR || gtOper == GT_STORE_LCL_VAR);
}
static bool OperIsNonPhiLocal(genTreeOps gtOper)
{
return OperIsLocal(gtOper) && (gtOper != GT_PHI_ARG);
}
static bool OperIsLocalRead(genTreeOps gtOper)
{
return (OperIsLocal(gtOper) && !OperIsLocalStore(gtOper));
}
static bool OperIsLocalStore(genTreeOps gtOper)
{
return (gtOper == GT_STORE_LCL_VAR || gtOper == GT_STORE_LCL_FLD);
}
static bool OperIsAddrMode(genTreeOps gtOper)
{
return (gtOper == GT_LEA);
}
static bool OperIsInitVal(genTreeOps gtOper)
{
return (gtOper == GT_INIT_VAL);
}
bool OperIsInitVal() const
{
return OperIsInitVal(OperGet());
}
bool IsConstInitVal() const
{
return (gtOper == GT_CNS_INT) || (OperIsInitVal() && (gtGetOp1()->gtOper == GT_CNS_INT));
}
bool OperIsBlkOp();
bool OperIsCopyBlkOp();
bool OperIsInitBlkOp();
static bool OperIsBlk(genTreeOps gtOper)
{
return (gtOper == GT_BLK) || (gtOper == GT_OBJ) || OperIsStoreBlk(gtOper);
}
bool OperIsBlk() const
{
return OperIsBlk(OperGet());
}
static bool OperIsStoreBlk(genTreeOps gtOper)
{
return StaticOperIs(gtOper, GT_STORE_BLK, GT_STORE_OBJ, GT_STORE_DYN_BLK);
}
bool OperIsStoreBlk() const
{
return OperIsStoreBlk(OperGet());
}
bool OperIsPutArgSplit() const
{
#if FEATURE_ARG_SPLIT
assert((gtOper != GT_PUTARG_SPLIT) || compFeatureArgSplit());
return gtOper == GT_PUTARG_SPLIT;
#else // !FEATURE_ARG_SPLIT
return false;
#endif
}
bool OperIsPutArgStk() const
{
return gtOper == GT_PUTARG_STK;
}
bool OperIsPutArgStkOrSplit() const
{
return OperIsPutArgStk() || OperIsPutArgSplit();
}
bool OperIsPutArgReg() const
{
return gtOper == GT_PUTARG_REG;
}
bool OperIsPutArg() const
{
return OperIsPutArgStk() || OperIsPutArgReg() || OperIsPutArgSplit();
}
bool OperIsFieldList() const
{
return OperIs(GT_FIELD_LIST);
}
bool OperIsMultiRegOp() const
{
#if !defined(TARGET_64BIT)
if (OperIs(GT_MUL_LONG))
{
return true;
}
#if defined(TARGET_ARM)
if (OperIs(GT_PUTARG_REG, GT_BITCAST))
{
return true;
}
#endif // TARGET_ARM
#endif // TARGET_64BIT
return false;
}
bool OperIsAddrMode() const
{
return OperIsAddrMode(OperGet());
}
bool OperIsLocal() const
{
return OperIsLocal(OperGet());
}
bool OperIsLocalAddr() const
{
return OperIsLocalAddr(OperGet());
}
bool OperIsScalarLocal() const
{
return OperIsScalarLocal(OperGet());
}
bool OperIsNonPhiLocal() const
{
return OperIsNonPhiLocal(OperGet());
}
bool OperIsLocalStore() const
{
return OperIsLocalStore(OperGet());
}
bool OperIsLocalRead() const
{
return OperIsLocalRead(OperGet());
}
static bool OperIsCompare(genTreeOps gtOper)
{
static_assert_no_msg(OpersAreContiguous(GT_EQ, GT_NE, GT_LT, GT_LE, GT_GE, GT_GT, GT_TEST_EQ, GT_TEST_NE));
return (GT_EQ <= gtOper) && (gtOper <= GT_TEST_NE);
}
bool OperIsCompare() const
{
return OperIsCompare(OperGet());
}
static bool OperIsShift(genTreeOps gtOper)
{
return (gtOper == GT_LSH) || (gtOper == GT_RSH) || (gtOper == GT_RSZ);
}
bool OperIsShift() const
{
return OperIsShift(OperGet());
}
static bool OperIsShiftLong(genTreeOps gtOper)
{
#ifdef TARGET_64BIT
return false;
#else
return (gtOper == GT_LSH_HI) || (gtOper == GT_RSH_LO);
#endif
}
bool OperIsShiftLong() const
{
return OperIsShiftLong(OperGet());
}
static bool OperIsRotate(genTreeOps gtOper)
{
return (gtOper == GT_ROL) || (gtOper == GT_ROR);
}
bool OperIsRotate() const
{
return OperIsRotate(OperGet());
}
static bool OperIsShiftOrRotate(genTreeOps gtOper)
{
return OperIsShift(gtOper) || OperIsRotate(gtOper) || OperIsShiftLong(gtOper);
}
bool OperIsShiftOrRotate() const
{
return OperIsShiftOrRotate(OperGet());
}
static bool OperIsMul(genTreeOps gtOper)
{
return (gtOper == GT_MUL) || (gtOper == GT_MULHI)
#if !defined(TARGET_64BIT) || defined(TARGET_ARM64)
|| (gtOper == GT_MUL_LONG)
#endif
;
}
bool OperIsMul() const
{
return OperIsMul(gtOper);
}
bool OperIsArithmetic() const
{
genTreeOps op = OperGet();
return op == GT_ADD || op == GT_SUB || op == GT_MUL || op == GT_DIV || op == GT_MOD
|| op == GT_UDIV || op == GT_UMOD
|| op == GT_OR || op == GT_XOR || op == GT_AND
|| OperIsShiftOrRotate(op);
}
#ifdef TARGET_XARCH
static bool OperIsRMWMemOp(genTreeOps gtOper)
{
// Return if binary op is one of the supported operations for RMW of memory.
return (gtOper == GT_ADD || gtOper == GT_SUB || gtOper == GT_AND || gtOper == GT_OR || gtOper == GT_XOR ||
gtOper == GT_NOT || gtOper == GT_NEG || OperIsShiftOrRotate(gtOper));
}
bool OperIsRMWMemOp() const
{
// Return if binary op is one of the supported operations for RMW of memory.
return OperIsRMWMemOp(gtOper);
}
#endif // TARGET_XARCH
static bool OperIsUnary(genTreeOps gtOper)
{
return (OperKind(gtOper) & GTK_UNOP) != 0;
}
bool OperIsUnary() const
{
return OperIsUnary(gtOper);
}
static bool OperIsBinary(genTreeOps gtOper)
{
return (OperKind(gtOper) & GTK_BINOP) != 0;
}
bool OperIsBinary() const
{
return OperIsBinary(gtOper);
}
static bool OperIsSimple(genTreeOps gtOper)
{
return (OperKind(gtOper) & GTK_SMPOP) != 0;
}
static bool OperIsSpecial(genTreeOps gtOper)
{
return ((OperKind(gtOper) & GTK_KINDMASK) == GTK_SPECIAL);
}
bool OperIsSimple() const
{
return OperIsSimple(gtOper);
}
#ifdef FEATURE_SIMD
bool isCommutativeSIMDIntrinsic();
#else // !
bool isCommutativeSIMDIntrinsic()
{
return false;
}
#endif // FEATURE_SIMD
#ifdef FEATURE_HW_INTRINSICS
bool isCommutativeHWIntrinsic() const;
bool isContainableHWIntrinsic() const;
bool isRMWHWIntrinsic(Compiler* comp);
#else
bool isCommutativeHWIntrinsic() const
{
return false;
}
bool isContainableHWIntrinsic() const
{
return false;
}
bool isRMWHWIntrinsic(Compiler* comp)
{
return false;
}
#endif // FEATURE_HW_INTRINSICS
static bool OperIsCommutative(genTreeOps gtOper)
{
return (OperKind(gtOper) & GTK_COMMUTE) != 0;
}
bool OperIsCommutative()
{
return OperIsCommutative(gtOper) || (OperIsSIMD(gtOper) && isCommutativeSIMDIntrinsic()) ||
(OperIsHWIntrinsic(gtOper) && isCommutativeHWIntrinsic());
}
static bool OperMayOverflow(genTreeOps gtOper)
{
return ((gtOper == GT_ADD) || (gtOper == GT_SUB) || (gtOper == GT_MUL) || (gtOper == GT_CAST)
#if !defined(TARGET_64BIT)
|| (gtOper == GT_ADD_HI) || (gtOper == GT_SUB_HI)
#endif
);
}
bool OperMayOverflow() const
{
return OperMayOverflow(gtOper);
}
// This returns true only for GT_IND and GT_STOREIND, and is used in contexts where a "true"
// indirection is expected (i.e. either a load to or a store from a single register).
// OperIsIndir() returns true also for indirection nodes such as GT_BLK, etc. as well as GT_NULLCHECK.
static bool OperIsIndir(genTreeOps gtOper)
{
return gtOper == GT_IND || gtOper == GT_STOREIND || gtOper == GT_NULLCHECK || OperIsBlk(gtOper);
}
static bool OperIsIndirOrArrLength(genTreeOps gtOper)
{
return OperIsIndir(gtOper) || (gtOper == GT_ARR_LENGTH);
}
bool OperIsIndir() const
{
return OperIsIndir(gtOper);
}
bool OperIsIndirOrArrLength() const
{
return OperIsIndirOrArrLength(gtOper);
}
bool OperIsImplicitIndir() const;
static bool OperIsAtomicOp(genTreeOps gtOper)
{
switch (gtOper)
{
case GT_XADD:
case GT_XORR:
case GT_XAND:
case GT_XCHG:
case GT_LOCKADD:
case GT_CMPXCHG:
return true;
default:
return false;
}
}
bool OperIsAtomicOp() const
{
return OperIsAtomicOp(gtOper);
}
bool OperIsStore() const
{
return OperIsStore(gtOper);
}
static bool OperIsStore(genTreeOps gtOper)
{
return (gtOper == GT_STOREIND || gtOper == GT_STORE_LCL_VAR || gtOper == GT_STORE_LCL_FLD ||
OperIsStoreBlk(gtOper) || OperIsAtomicOp(gtOper));
}
static bool OperIsMultiOp(genTreeOps gtOper)
{
return OperIsSIMD(gtOper) || OperIsHWIntrinsic(gtOper);
}
bool OperIsMultiOp() const
{
return OperIsMultiOp(OperGet());
}
bool OperIsSsaDef() const
{
return OperIs(GT_ASG, GT_CALL);
}
// This is here for cleaner FEATURE_SIMD #ifdefs.
static bool OperIsSIMD(genTreeOps gtOper)
{
#ifdef FEATURE_SIMD
return gtOper == GT_SIMD;
#else // !FEATURE_SIMD
return false;
#endif // !FEATURE_SIMD
}
bool OperIsSIMD() const
{
return OperIsSIMD(gtOper);
}
static bool OperIsHWIntrinsic(genTreeOps gtOper)
{
#ifdef FEATURE_HW_INTRINSICS
return gtOper == GT_HWINTRINSIC;
#else
return false;
#endif // FEATURE_HW_INTRINSICS
}
bool OperIsHWIntrinsic() const
{
return OperIsHWIntrinsic(gtOper);
}
bool OperIsSimdOrHWintrinsic() const
{
return OperIsSIMD() || OperIsHWIntrinsic();
}
// This is here for cleaner GT_LONG #ifdefs.
static bool OperIsLong(genTreeOps gtOper)
{
#if defined(TARGET_64BIT)
return false;
#else
return gtOper == GT_LONG;
#endif
}
bool OperIsLong() const
{
return OperIsLong(gtOper);
}
bool OperIsConditionalJump() const
{
return (gtOper == GT_JTRUE) || (gtOper == GT_JCMP) || (gtOper == GT_JCC);
}
#ifdef DEBUG
static const GenTreeDebugOperKind gtDebugOperKindTable[];
static GenTreeDebugOperKind DebugOperKind(genTreeOps oper)
{
assert(oper < GT_COUNT);
return gtDebugOperKindTable[oper];
}
GenTreeDebugOperKind DebugOperKind() const
{
return DebugOperKind(OperGet());
}
bool NullOp1Legal() const
{
assert(OperIsSimple());
switch (gtOper)
{
case GT_LEA:
case GT_RETFILT:
case GT_NOP:
case GT_FIELD:
return true;
case GT_RETURN:
return gtType == TYP_VOID;
default:
return false;
}
}
bool NullOp2Legal() const
{
assert(OperIsSimple(gtOper) || OperIsBlk(gtOper));
if (!OperIsBinary(gtOper))
{
return true;
}
switch (gtOper)
{
case GT_INTRINSIC:
case GT_LEA:
#if defined(TARGET_ARM)
case GT_PUTARG_REG:
#endif // defined(TARGET_ARM)
return true;
default:
return false;
}
}
bool OperIsLIR() const
{
if (OperIs(GT_NOP))
{
// NOPs may only be present in LIR if they do not produce a value.
return IsNothingNode();
}
return (DebugOperKind() & DBK_NOTLIR) == 0;
}
bool OperSupportsReverseOpEvalOrder(Compiler* comp) const;
static bool RequiresNonNullOp2(genTreeOps oper);
bool IsValidCallArgument();
#endif // DEBUG
inline bool IsFPZero() const;
inline bool IsIntegralConst(ssize_t constVal) const;
inline bool IsIntegralConstVector(ssize_t constVal) const;
inline bool IsSIMDZero() const;
inline bool IsFloatPositiveZero() const;
inline bool IsVectorZero() const;
inline bool IsBoxedValue();
inline GenTree* gtGetOp1() const;
// Directly return op2. Asserts the node is binary. Might return nullptr if the binary node allows
// a nullptr op2, such as GT_LEA. This is more efficient than gtGetOp2IfPresent() if you know what
// node type you have.
inline GenTree* gtGetOp2() const;
// The returned pointer might be nullptr if the node is not binary, or if non-null op2 is not required.
inline GenTree* gtGetOp2IfPresent() const;
bool TryGetUse(GenTree* operand, GenTree*** pUse);
bool TryGetUse(GenTree* operand)
{
GenTree** unusedUse = nullptr;
return TryGetUse(operand, &unusedUse);
}
private:
bool TryGetUseBinOp(GenTree* operand, GenTree*** pUse);
public:
GenTree* gtGetParent(GenTree*** pUse);
void ReplaceOperand(GenTree** useEdge, GenTree* replacement);
inline GenTree* gtEffectiveVal(bool commaOnly = false);
inline GenTree* gtCommaAssignVal();
// Tunnel through any GT_RET_EXPRs
GenTree* gtRetExprVal(BasicBlockFlags* pbbFlags = nullptr);
inline GenTree* gtSkipPutArgType();
// Return the child of this node if it is a GT_RELOAD or GT_COPY; otherwise simply return the node itself
inline GenTree* gtSkipReloadOrCopy();
// Returns true if it is a call node returning its value in more than one register
inline bool IsMultiRegCall() const;
// Returns true if it is a struct lclVar node residing in multiple registers.
inline bool IsMultiRegLclVar() const;
// Returns true if it is a node returning its value in more than one register
bool IsMultiRegNode() const;
// Returns the number of registers defined by a multireg node.
unsigned GetMultiRegCount(Compiler* comp) const;
// Returns the regIndex'th register defined by a possibly-multireg node.
regNumber GetRegByIndex(int regIndex) const;
// Returns the type of the regIndex'th register defined by a multi-reg node.
var_types GetRegTypeByIndex(int regIndex) const;
// Returns the GTF flag equivalent for the regIndex'th register of a multi-reg node.
GenTreeFlags GetRegSpillFlagByIdx(int regIndex) const;
// Last-use information for either GenTreeLclVar or GenTreeCopyOrReload nodes.
private:
GenTreeFlags GetLastUseBit(int regIndex) const;
public:
bool IsLastUse(int regIndex) const;
bool HasLastUse() const;
void SetLastUse(int regIndex);
void ClearLastUse(int regIndex);
// Returns true if it is a GT_COPY or GT_RELOAD node
inline bool IsCopyOrReload() const;
// Returns true if it is a GT_COPY or GT_RELOAD of a multi-reg call node
inline bool IsCopyOrReloadOfMultiRegCall() const;
bool OperRequiresAsgFlag();
bool OperRequiresCallFlag(Compiler* comp);
bool OperMayThrow(Compiler* comp);
unsigned GetScaleIndexMul();
unsigned GetScaleIndexShf();
unsigned GetScaledIndex();
public:
static unsigned char s_gtNodeSizes[];
#if NODEBASH_STATS || MEASURE_NODE_SIZE || COUNT_AST_OPERS
static unsigned char s_gtTrueSizes[];
#endif
#if COUNT_AST_OPERS
static unsigned s_gtNodeCounts[];
#endif
static void InitNodeSize();
size_t GetNodeSize() const;
bool IsNodeProperlySized() const;
void ReplaceWith(GenTree* src, Compiler* comp);
static genTreeOps ReverseRelop(genTreeOps relop);
static genTreeOps SwapRelop(genTreeOps relop);
//---------------------------------------------------------------------
static bool Compare(GenTree* op1, GenTree* op2, bool swapOK = false);
//---------------------------------------------------------------------
#if defined(DEBUG) || NODEBASH_STATS || MEASURE_NODE_SIZE || COUNT_AST_OPERS || DUMP_FLOWGRAPHS
static const char* OpName(genTreeOps op);
#endif
#if MEASURE_NODE_SIZE
static const char* OpStructName(genTreeOps op);
#endif
//---------------------------------------------------------------------
bool IsNothingNode() const;
void gtBashToNOP();
// Value number update action enumeration
enum ValueNumberUpdate
{
CLEAR_VN, // Clear value number
PRESERVE_VN // Preserve value number
};
void SetOper(genTreeOps oper, ValueNumberUpdate vnUpdate = CLEAR_VN); // set gtOper
void SetOperResetFlags(genTreeOps oper); // set gtOper and reset flags
// set gtOper and only keep GTF_COMMON_MASK flags
void ChangeOper(genTreeOps oper, ValueNumberUpdate vnUpdate = CLEAR_VN);
void ChangeOperUnchecked(genTreeOps oper);
void SetOperRaw(genTreeOps oper);
void ChangeType(var_types newType)
{
var_types oldType = gtType;
gtType = newType;
GenTree* node = this;
while (node->gtOper == GT_COMMA)
{
node = node->gtGetOp2();
if (node->gtType != newType)
{
assert(node->gtType == oldType);
node->gtType = newType;
}
}
}
template <typename T>
void BashToConst(T value, var_types type = TYP_UNDEF);
void BashToZeroConst(var_types type);
#if NODEBASH_STATS
static void RecordOperBashing(genTreeOps operOld, genTreeOps operNew);
static void ReportOperBashing(FILE* fp);
#else
static void RecordOperBashing(genTreeOps operOld, genTreeOps operNew)
{ /* do nothing */
}
static void ReportOperBashing(FILE* fp)
{ /* do nothing */
}
#endif
bool IsLocal() const
{
return OperIsLocal(OperGet());
}
// Returns "true" iff 'this' is a GT_LCL_FLD or GT_STORE_LCL_FLD on which the type
// is not the same size as the type of the GT_LCL_VAR.
bool IsPartialLclFld(Compiler* comp);
// Returns "true" iff "this" defines a local variable. Requires "comp" to be the
// current compilation. If returns "true", sets "*pLclVarTree" to the
// tree for the local that is defined, and, if "pIsEntire" is non-null, sets "*pIsEntire" to
// true or false, depending on whether the assignment writes to the entirety of the local
// variable, or just a portion of it.
bool DefinesLocal(Compiler* comp, GenTreeLclVarCommon** pLclVarTree, bool* pIsEntire = nullptr);
bool IsLocalAddrExpr(Compiler* comp,
GenTreeLclVarCommon** pLclVarTree,
FieldSeqNode** pFldSeq,
ssize_t* pOffset = nullptr);
// Simpler variant of the above which just returns the local node if this is an expression that
// yields an address into a local
GenTreeLclVarCommon* IsLocalAddrExpr();
// Determine if this tree represents the value of an entire implicit byref parameter,
// and if so return the tree for the parameter.
GenTreeLclVar* IsImplicitByrefParameterValue(Compiler* compiler);
// Determine if this is a LclVarCommon node and return some additional info about it in the
// two out parameters.
bool IsLocalExpr(Compiler* comp, GenTreeLclVarCommon** pLclVarTree, FieldSeqNode** pFldSeq);
// Determine whether this is an assignment tree of the form X = X (op) Y,
// where Y is an arbitrary tree, and X is a lclVar.
unsigned IsLclVarUpdateTree(GenTree** otherTree, genTreeOps* updateOper);
bool IsFieldAddr(Compiler* comp, GenTree** pBaseAddr, FieldSeqNode** pFldSeq);
// Requires "this" to be the address of an array (the child of a GT_IND labeled with GTF_IND_ARR_INDEX).
// Sets "pArr" to the node representing the array (either an array object pointer, or perhaps a byref to the some
// element).
// Sets "*pArrayType" to the class handle for the array type.
// Sets "*inxVN" to the value number inferred for the array index.
// Sets "*pFldSeq" to the sequence, if any, of struct fields used to index into the array element.
void ParseArrayAddress(
Compiler* comp, struct ArrayInfo* arrayInfo, GenTree** pArr, ValueNum* pInxVN, FieldSeqNode** pFldSeq);
// Helper method for the above.
void ParseArrayAddressWork(Compiler* comp,
target_ssize_t inputMul,
GenTree** pArr,
ValueNum* pInxVN,
target_ssize_t* pOffset,
FieldSeqNode** pFldSeq);
// Requires "this" to be a GT_IND. Requires the outermost caller to set "*pFldSeq" to nullptr.
// Returns true if it is an array index expression, or access to a (sequence of) struct field(s)
// within a struct array element. If it returns true, sets *arrayInfo to the array information, and sets *pFldSeq
// to the sequence of struct field accesses.
bool ParseArrayElemForm(Compiler* comp, ArrayInfo* arrayInfo, FieldSeqNode** pFldSeq);
// Requires "this" to be the address of a (possible) array element (or struct field within that).
// If it is, sets "*arrayInfo" to the array access info, "*pFldSeq" to the sequence of struct fields
// accessed within the array element, and returns true. If not, returns "false".
bool ParseArrayElemAddrForm(Compiler* comp, ArrayInfo* arrayInfo, FieldSeqNode** pFldSeq);
// Requires "this" to be an int expression. If it is a sequence of one or more integer constants added together,
// returns true and sets "*pFldSeq" to the sequence of fields with which those constants are annotated.
bool ParseOffsetForm(Compiler* comp, FieldSeqNode** pFldSeq);
// Labels "*this" as an array index expression: label all constants and variables that could contribute, as part of
// an affine expression, to the value of the of the index.
void LabelIndex(Compiler* comp, bool isConst = true);
// Assumes that "this" occurs in a context where it is being dereferenced as the LHS of an assignment-like
// statement (assignment, initblk, or copyblk). The "width" should be the number of bytes copied by the
// operation. Returns "true" if "this" is an address of (or within)
// a local variable; sets "*pLclVarTree" to that local variable instance; and, if "pIsEntire" is non-null,
// sets "*pIsEntire" to true if this assignment writes the full width of the local.
bool DefinesLocalAddr(Compiler* comp, unsigned width, GenTreeLclVarCommon** pLclVarTree, bool* pIsEntire);
// These are only used for dumping.
// The GetRegNum() is only valid in LIR, but the dumping methods are not easily
// modified to check this.
CLANG_FORMAT_COMMENT_ANCHOR;
#ifdef DEBUG
bool InReg() const
{
return (GetRegTag() != GT_REGTAG_NONE) ? true : false;
}
regNumber GetReg() const
{
return (GetRegTag() != GT_REGTAG_NONE) ? GetRegNum() : REG_NA;
}
#endif
static bool IsContained(unsigned flags)
{
return ((flags & GTF_CONTAINED) != 0);
}
void SetContained()
{
assert(IsValue());
gtFlags |= GTF_CONTAINED;
assert(isContained());
}
void ClearContained()
{
assert(IsValue());
gtFlags &= ~GTF_CONTAINED;
ClearRegOptional();
}
bool CanCSE() const
{
return ((gtFlags & GTF_DONT_CSE) == 0);
}
void SetDoNotCSE()
{
gtFlags |= GTF_DONT_CSE;
}
void ClearDoNotCSE()
{
gtFlags &= ~GTF_DONT_CSE;
}
bool IsReverseOp() const
{
return (gtFlags & GTF_REVERSE_OPS) ? true : false;
}
void SetReverseOp()
{
gtFlags |= GTF_REVERSE_OPS;
}
void ClearReverseOp()
{
gtFlags &= ~GTF_REVERSE_OPS;
}
bool IsUnsigned() const
{
return ((gtFlags & GTF_UNSIGNED) != 0);
}
void SetUnsigned()
{
assert(OperIs(GT_ADD, GT_SUB, GT_CAST, GT_LE, GT_LT, GT_GT, GT_GE) || OperIsMul());
gtFlags |= GTF_UNSIGNED;
}
void ClearUnsigned()
{
assert(OperIs(GT_ADD, GT_SUB, GT_CAST) || OperIsMul());
gtFlags &= ~GTF_UNSIGNED;
}
void SetOverflow()
{
assert(OperMayOverflow());
gtFlags |= GTF_OVERFLOW;
}
void ClearOverflow()
{
assert(OperMayOverflow());
gtFlags &= ~GTF_OVERFLOW;
}
bool Is64RsltMul() const
{
return (gtFlags & GTF_MUL_64RSLT) != 0;
}
void Set64RsltMul()
{
gtFlags |= GTF_MUL_64RSLT;
}
void Clear64RsltMul()
{
gtFlags &= ~GTF_MUL_64RSLT;
}
void SetAllEffectsFlags(GenTree* source)
{
SetAllEffectsFlags(source->gtFlags & GTF_ALL_EFFECT);
}
void SetAllEffectsFlags(GenTree* firstSource, GenTree* secondSource)
{
SetAllEffectsFlags((firstSource->gtFlags | secondSource->gtFlags) & GTF_ALL_EFFECT);
}
void SetAllEffectsFlags(GenTree* firstSource, GenTree* secondSource, GenTree* thirdSouce)
{
SetAllEffectsFlags((firstSource->gtFlags | secondSource->gtFlags | thirdSouce->gtFlags) & GTF_ALL_EFFECT);
}
void SetAllEffectsFlags(GenTreeFlags sourceFlags)
{
assert((sourceFlags & ~GTF_ALL_EFFECT) == 0);
gtFlags &= ~GTF_ALL_EFFECT;
gtFlags |= sourceFlags;
}
inline bool IsCnsIntOrI() const;
inline bool IsIntegralConst() const;
inline bool IsIntegralConstUnsignedPow2() const;
inline bool IsIntegralConstAbsPow2() const;
inline bool IsIntCnsFitsInI32(); // Constant fits in INT32
inline bool IsCnsFltOrDbl() const;
inline bool IsCnsNonZeroFltOrDbl() const;
bool IsIconHandle() const
{
assert(gtOper == GT_CNS_INT);
return (gtFlags & GTF_ICON_HDL_MASK) ? true : false;
}
bool IsIconHandle(GenTreeFlags handleType) const
{
assert(gtOper == GT_CNS_INT);
assert((handleType & GTF_ICON_HDL_MASK) != 0); // check that handleType is one of the valid GTF_ICON_* values
assert((handleType & ~GTF_ICON_HDL_MASK) == 0);
return (gtFlags & GTF_ICON_HDL_MASK) == handleType;
}
// Return just the part of the flags corresponding to the GTF_ICON_*_HDL flag. For example,
// GTF_ICON_SCOPE_HDL. The tree node must be a const int, but it might not be a handle, in which
// case we'll return zero.
GenTreeFlags GetIconHandleFlag() const
{
assert(gtOper == GT_CNS_INT);
return (gtFlags & GTF_ICON_HDL_MASK);
}
// Mark this node as no longer being a handle; clear its GTF_ICON_*_HDL bits.
void ClearIconHandleMask()
{
assert(gtOper == GT_CNS_INT);
gtFlags &= ~GTF_ICON_HDL_MASK;
}
// Return true if the two GT_CNS_INT trees have the same handle flag (GTF_ICON_*_HDL).
static bool SameIconHandleFlag(GenTree* t1, GenTree* t2)
{
return t1->GetIconHandleFlag() == t2->GetIconHandleFlag();
}
bool IsArgPlaceHolderNode() const
{
return OperGet() == GT_ARGPLACE;
}
bool IsCall() const
{
return OperGet() == GT_CALL;
}
inline bool IsHelperCall();
bool gtOverflow() const;
bool gtOverflowEx() const;
bool gtSetFlags() const;
bool gtRequestSetFlags();
#ifdef DEBUG
static int gtDispFlags(GenTreeFlags flags, GenTreeDebugFlags debugFlags);
#endif
// cast operations
inline var_types CastFromType();
inline var_types& CastToType();
// Returns "true" iff "this" is a phi-related node (i.e. a GT_PHI_ARG, GT_PHI, or a PhiDefn).
bool IsPhiNode();
// Returns "true" iff "*this" is an assignment (GT_ASG) tree that defines an SSA name (lcl = phi(...));
bool IsPhiDefn();
// Returns "true" iff "*this" is a statement containing an assignment that defines an SSA name (lcl = phi(...));
// Because of the fact that we hid the assignment operator of "BitSet" (in DEBUG),
// we can't synthesize an assignment operator.
// TODO-Cleanup: Could change this w/o liveset on tree nodes
// (This is also necessary for the VTable trick.)
GenTree()
{
}
// Returns an iterator that will produce the use edge to each operand of this node. Differs
// from the sequence of nodes produced by a loop over `GetChild` in its handling of call, phi,
// and block op nodes.
GenTreeUseEdgeIterator UseEdgesBegin();
GenTreeUseEdgeIterator UseEdgesEnd();
IteratorPair<GenTreeUseEdgeIterator> UseEdges();
// Returns an iterator that will produce each operand of this node, in execution order.
GenTreeOperandIterator OperandsBegin();
GenTreeOperandIterator OperandsEnd();
// Returns a range that will produce the operands of this node in execution order.
IteratorPair<GenTreeOperandIterator> Operands();
enum class VisitResult
{
Abort = false,
Continue = true
};
// Visits each operand of this node. The operand must be either a lambda, function, or functor with the signature
// `GenTree::VisitResult VisitorFunction(GenTree* operand)`. Here is a simple example:
//
// unsigned operandCount = 0;
// node->VisitOperands([&](GenTree* operand) -> GenTree::VisitResult)
// {
// operandCount++;
// return GenTree::VisitResult::Continue;
// });
//
// This function is generally more efficient that the operand iterator and should be preferred over that API for
// hot code, as it affords better opportunities for inlining and acheives shorter dynamic path lengths when
// deciding how operands need to be accessed.
//
// Note that this function does not respect `GTF_REVERSE_OPS`. This is always safe in LIR, but may be dangerous
// in HIR if for some reason you need to visit operands in the order in which they will execute.
template <typename TVisitor>
void VisitOperands(TVisitor visitor);
private:
template <typename TVisitor>
void VisitBinOpOperands(TVisitor visitor);
public:
bool Precedes(GenTree* other);
bool IsInvariant() const;
bool IsNeverNegative(Compiler* comp) const;
bool IsReuseRegVal() const
{
// This can be extended to non-constant nodes, but not to local or indir nodes.
if (IsInvariant() && ((gtFlags & GTF_REUSE_REG_VAL) != 0))
{
return true;
}
return false;
}
void SetReuseRegVal()
{
assert(IsInvariant());
gtFlags |= GTF_REUSE_REG_VAL;
}
void ResetReuseRegVal()
{
assert(IsInvariant());
gtFlags &= ~GTF_REUSE_REG_VAL;
}
void SetIndirExceptionFlags(Compiler* comp);
#if MEASURE_NODE_SIZE
static void DumpNodeSizes(FILE* fp);
#endif
#ifdef DEBUG
private:
GenTree& operator=(const GenTree& gt)
{
assert(!"Don't copy");
return *this;
}
#endif // DEBUG
#if DEBUGGABLE_GENTREE
// In DEBUG builds, add a dummy virtual method, to give the debugger run-time type information.
virtual void DummyVirt()
{
}
typedef void* VtablePtr;
VtablePtr GetVtableForOper(genTreeOps oper);
void SetVtableForOper(genTreeOps oper);
static VtablePtr s_vtablesForOpers[GT_COUNT];
static VtablePtr s_vtableForOp;
#endif // DEBUGGABLE_GENTREE
public:
inline void* operator new(size_t sz, class Compiler*, genTreeOps oper);
inline GenTree(genTreeOps oper, var_types type DEBUGARG(bool largeNode = false));
};
// Represents a GT_PHI node - a variable sized list of GT_PHI_ARG nodes.
// All PHI_ARG nodes must represent uses of the same local variable and
// the PHI node's type must be the same as the local variable's type.
//
// The PHI node does not represent a definition by itself, it is always
// the RHS of a GT_ASG node. The LHS of the ASG node is always a GT_LCL_VAR
// node, that is a definition for the same local variable referenced by
// all the used PHI_ARG nodes:
//
// ASG(LCL_VAR(lcl7), PHI(PHI_ARG(lcl7), PHI_ARG(lcl7), PHI_ARG(lcl7)))
//
// PHI nodes are also present in LIR, where GT_STORE_LCL_VAR replaces the
// ASG node.
//
// The order of the PHI_ARG uses is not currently relevant and it may be
// the same or not as the order of the predecessor blocks.
//
struct GenTreePhi final : public GenTree
{
class Use
{
GenTree* m_node;
Use* m_next;
public:
Use(GenTree* node, Use* next = nullptr) : m_node(node), m_next(next)
{
assert(node->OperIs(GT_PHI_ARG));
}
GenTree*& NodeRef()
{
return m_node;
}
GenTree* GetNode() const
{
assert(m_node->OperIs(GT_PHI_ARG));
return m_node;
}
void SetNode(GenTree* node)
{
assert(node->OperIs(GT_PHI_ARG));
m_node = node;
}
Use*& NextRef()
{
return m_next;
}
Use* GetNext() const
{
return m_next;
}
};
class UseIterator
{
Use* m_use;
public:
UseIterator(Use* use) : m_use(use)
{
}
Use& operator*() const
{
return *m_use;
}
Use* operator->() const
{
return m_use;
}
UseIterator& operator++()
{
m_use = m_use->GetNext();
return *this;
}
bool operator==(const UseIterator& i) const
{
return m_use == i.m_use;
}
bool operator!=(const UseIterator& i) const
{
return m_use != i.m_use;
}
};
class UseList
{
Use* m_uses;
public:
UseList(Use* uses) : m_uses(uses)
{
}
UseIterator begin() const
{
return UseIterator(m_uses);
}
UseIterator end() const
{
return UseIterator(nullptr);
}
};
Use* gtUses;
GenTreePhi(var_types type) : GenTree(GT_PHI, type), gtUses(nullptr)
{
}
UseList Uses()
{
return UseList(gtUses);
}
//--------------------------------------------------------------------------
// Equals: Checks if 2 PHI nodes are equal.
//
// Arguments:
// phi1 - The first PHI node
// phi2 - The second PHI node
//
// Return Value:
// true if the 2 PHI nodes have the same type, number of uses, and the
// uses are equal.
//
// Notes:
// The order of uses must be the same for equality, even if the
// order is not usually relevant and is not guaranteed to reflect
// a particular order of the predecessor blocks.
//
static bool Equals(GenTreePhi* phi1, GenTreePhi* phi2)
{
if (phi1->TypeGet() != phi2->TypeGet())
{
return false;
}
GenTreePhi::UseIterator i1 = phi1->Uses().begin();
GenTreePhi::UseIterator end1 = phi1->Uses().end();
GenTreePhi::UseIterator i2 = phi2->Uses().begin();
GenTreePhi::UseIterator end2 = phi2->Uses().end();
for (; (i1 != end1) && (i2 != end2); ++i1, ++i2)
{
if (!Compare(i1->GetNode(), i2->GetNode()))
{
return false;
}
}
return (i1 == end1) && (i2 == end2);
}
#if DEBUGGABLE_GENTREE
GenTreePhi() : GenTree()
{
}
#endif
};
// Represents a list of fields constituting a struct, when it is passed as an argument.
//
struct GenTreeFieldList : public GenTree
{
class Use
{
GenTree* m_node;
Use* m_next;
uint16_t m_offset;
var_types m_type;
public:
Use(GenTree* node, unsigned offset, var_types type)
: m_node(node), m_next(nullptr), m_offset(static_cast<uint16_t>(offset)), m_type(type)
{
// We can save space on 32 bit hosts by storing the offset as uint16_t. Struct promotion
// only accepts structs which are much smaller than that - 128 bytes = max 4 fields * max
// SIMD vector size (32 bytes).
assert(offset <= UINT16_MAX);
}
GenTree*& NodeRef()
{
return m_node;
}
GenTree* GetNode() const
{
return m_node;
}
void SetNode(GenTree* node)
{
assert(node != nullptr);
m_node = node;
}
Use*& NextRef()
{
return m_next;
}
Use* GetNext() const
{
return m_next;
}
void SetNext(Use* next)
{
m_next = next;
}
unsigned GetOffset() const
{
return m_offset;
}
var_types GetType() const
{
return m_type;
}
void SetType(var_types type)
{
m_type = type;
}
};
class UseIterator
{
Use* use;
public:
UseIterator(Use* use) : use(use)
{
}
Use& operator*()
{
return *use;
}
Use* operator->()
{
return use;
}
void operator++()
{
use = use->GetNext();
}
bool operator==(const UseIterator& other)
{
return use == other.use;
}
bool operator!=(const UseIterator& other)
{
return use != other.use;
}
};
class UseList
{
Use* m_head;
Use* m_tail;
public:
UseList() : m_head(nullptr), m_tail(nullptr)
{
}
Use* GetHead() const
{
return m_head;
}
UseIterator begin() const
{
return m_head;
}
UseIterator end() const
{
return nullptr;
}
void AddUse(Use* newUse)
{
assert(newUse->GetNext() == nullptr);
if (m_head == nullptr)
{
m_head = newUse;
}
else
{
m_tail->SetNext(newUse);
}
m_tail = newUse;
}
void InsertUse(Use* insertAfter, Use* newUse)
{
assert(newUse->GetNext() == nullptr);
newUse->SetNext(insertAfter->GetNext());
insertAfter->SetNext(newUse);
if (m_tail == insertAfter)
{
m_tail = newUse;
}
}
void Reverse()
{
m_tail = m_head;
m_head = nullptr;
for (Use *next, *use = m_tail; use != nullptr; use = next)
{
next = use->GetNext();
use->SetNext(m_head);
m_head = use;
}
}
bool IsSorted() const
{
unsigned offset = 0;
for (GenTreeFieldList::Use& use : *this)
{
if (use.GetOffset() < offset)
{
return false;
}
offset = use.GetOffset();
}
return true;
}
};
private:
UseList m_uses;
public:
GenTreeFieldList() : GenTree(GT_FIELD_LIST, TYP_STRUCT)
{
SetContained();
}
UseList& Uses()
{
return m_uses;
}
// Add a new field use to the end of the use list and update side effect flags.
void AddField(Compiler* compiler, GenTree* node, unsigned offset, var_types type);
// Add a new field use to the end of the use list without updating side effect flags.
void AddFieldLIR(Compiler* compiler, GenTree* node, unsigned offset, var_types type);
// Insert a new field use after the specified use and update side effect flags.
void InsertField(Compiler* compiler, Use* insertAfter, GenTree* node, unsigned offset, var_types type);
// Insert a new field use after the specified use without updating side effect flags.
void InsertFieldLIR(Compiler* compiler, Use* insertAfter, GenTree* node, unsigned offset, var_types type);
//--------------------------------------------------------------------------
// Equals: Check if 2 FIELD_LIST nodes are equal.
//
// Arguments:
// list1 - The first FIELD_LIST node
// list2 - The second FIELD_LIST node
//
// Return Value:
// true if the 2 FIELD_LIST nodes have the same type, number of uses, and the
// uses are equal.
//
static bool Equals(GenTreeFieldList* list1, GenTreeFieldList* list2)
{
assert(list1->TypeGet() == TYP_STRUCT);
assert(list2->TypeGet() == TYP_STRUCT);
UseIterator i1 = list1->Uses().begin();
UseIterator end1 = list1->Uses().end();
UseIterator i2 = list2->Uses().begin();
UseIterator end2 = list2->Uses().end();
for (; (i1 != end1) && (i2 != end2); ++i1, ++i2)
{
if (!Compare(i1->GetNode(), i2->GetNode()) || (i1->GetOffset() != i2->GetOffset()) ||
(i1->GetType() != i2->GetType()))
{
return false;
}
}
return (i1 == end1) && (i2 == end2);
}
};
//------------------------------------------------------------------------
// GenTreeUseEdgeIterator: an iterator that will produce each use edge of a GenTree node in the order in which
// they are used.
//
// Operand iteration is common enough in the back end of the compiler that the implementation of this type has
// traded some simplicity for speed:
// - As much work as is reasonable is done in the constructor rather than during operand iteration
// - Node-specific functionality is handled by a small class of "advance" functions called by operator++
// rather than making operator++ itself handle all nodes
// - Some specialization has been performed for specific node types/shapes (e.g. the advance function for
// binary nodes is specialized based on whether or not the node has the GTF_REVERSE_OPS flag set)
//
// Valid values of this type may be obtained by calling `GenTree::UseEdgesBegin` and `GenTree::UseEdgesEnd`.
//
class GenTreeUseEdgeIterator final
{
friend class GenTreeOperandIterator;
friend GenTreeUseEdgeIterator GenTree::UseEdgesBegin();
friend GenTreeUseEdgeIterator GenTree::UseEdgesEnd();
enum
{
CALL_INSTANCE = 0,
CALL_ARGS = 1,
CALL_LATE_ARGS = 2,
CALL_CONTROL_EXPR = 3,
CALL_COOKIE = 4,
CALL_ADDRESS = 5,
CALL_TERMINAL = 6,
};
typedef void (GenTreeUseEdgeIterator::*AdvanceFn)();
AdvanceFn m_advance;
GenTree* m_node;
GenTree** m_edge;
// Pointer sized state storage, GenTreePhi::Use* or GenTreeCall::Use*
// or the exclusive end/beginning of GenTreeMultiOp's operand array.
void* m_statePtr;
// Integer sized state storage, usually the operand index for non-list based nodes.
int m_state;
GenTreeUseEdgeIterator(GenTree* node);
// Advance functions for special nodes
void AdvanceCmpXchg();
void AdvanceArrElem();
void AdvanceArrOffset();
void AdvanceStoreDynBlk();
void AdvanceFieldList();
void AdvancePhi();
template <bool ReverseOperands>
void AdvanceBinOp();
void SetEntryStateForBinOp();
// The advance function for call nodes
template <int state>
void AdvanceCall();
#if defined(FEATURE_SIMD) || defined(FEATURE_HW_INTRINSICS)
void AdvanceMultiOp();
void AdvanceReversedMultiOp();
void SetEntryStateForMultiOp();
#endif
void Terminate();
public:
GenTreeUseEdgeIterator();
inline GenTree** operator*()
{
assert(m_state != -1);
return m_edge;
}
inline GenTree** operator->()
{
assert(m_state != -1);
return m_edge;
}
inline bool operator==(const GenTreeUseEdgeIterator& other) const
{
if (m_state == -1 || other.m_state == -1)
{
return m_state == other.m_state;
}
return (m_node == other.m_node) && (m_edge == other.m_edge) && (m_statePtr == other.m_statePtr) &&
(m_state == other.m_state);
}
inline bool operator!=(const GenTreeUseEdgeIterator& other) const
{
return !(operator==(other));
}
GenTreeUseEdgeIterator& operator++();
};
//------------------------------------------------------------------------
// GenTreeOperandIterator: an iterator that will produce each operand of a
// GenTree node in the order in which they are
// used. This uses `GenTreeUseEdgeIterator` under
// the covers.
//
// Note: valid values of this type may be obtained by calling
// `GenTree::OperandsBegin` and `GenTree::OperandsEnd`.
class GenTreeOperandIterator final
{
friend GenTreeOperandIterator GenTree::OperandsBegin();
friend GenTreeOperandIterator GenTree::OperandsEnd();
GenTreeUseEdgeIterator m_useEdges;
GenTreeOperandIterator(GenTree* node) : m_useEdges(node)
{
}
public:
GenTreeOperandIterator() : m_useEdges()
{
}
inline GenTree* operator*()
{
return *(*m_useEdges);
}
inline GenTree* operator->()
{
return *(*m_useEdges);
}
inline bool operator==(const GenTreeOperandIterator& other) const
{
return m_useEdges == other.m_useEdges;
}
inline bool operator!=(const GenTreeOperandIterator& other) const
{
return !(operator==(other));
}
inline GenTreeOperandIterator& operator++()
{
++m_useEdges;
return *this;
}
};
/*****************************************************************************/
// In the current design, we never instantiate GenTreeUnOp: it exists only to be
// used as a base class. For unary operators, we instantiate GenTreeOp, with a NULL second
// argument. We check that this is true dynamically. We could tighten this and get static
// checking, but that would entail accessing the first child of a unary operator via something
// like gtUnOp.gtOp1 instead of AsOp()->gtOp1.
struct GenTreeUnOp : public GenTree
{
GenTree* gtOp1;
protected:
GenTreeUnOp(genTreeOps oper, var_types type DEBUGARG(bool largeNode = false))
: GenTree(oper, type DEBUGARG(largeNode)), gtOp1(nullptr)
{
}
GenTreeUnOp(genTreeOps oper, var_types type, GenTree* op1 DEBUGARG(bool largeNode = false))
: GenTree(oper, type DEBUGARG(largeNode)), gtOp1(op1)
{
assert(op1 != nullptr || NullOp1Legal());
if (op1 != nullptr)
{ // Propagate effects flags from child.
gtFlags |= op1->gtFlags & GTF_ALL_EFFECT;
}
}
#if DEBUGGABLE_GENTREE
GenTreeUnOp() : GenTree(), gtOp1(nullptr)
{
}
#endif
};
struct GenTreeOp : public GenTreeUnOp
{
GenTree* gtOp2;
GenTreeOp(genTreeOps oper, var_types type, GenTree* op1, GenTree* op2 DEBUGARG(bool largeNode = false))
: GenTreeUnOp(oper, type, op1 DEBUGARG(largeNode)), gtOp2(op2)
{
// comparisons are always integral types
assert(!GenTree::OperIsCompare(oper) || varTypeIsIntegral(type));
// Binary operators, with a few exceptions, require a non-nullptr
// second argument.
assert(op2 != nullptr || NullOp2Legal());
// Unary operators, on the other hand, require a null second argument.
assert(!OperIsUnary(oper) || op2 == nullptr);
// Propagate effects flags from child. (UnOp handled this for first child.)
if (op2 != nullptr)
{
gtFlags |= op2->gtFlags & GTF_ALL_EFFECT;
}
}
// A small set of types are unary operators with optional arguments. We use
// this constructor to build those.
GenTreeOp(genTreeOps oper, var_types type DEBUGARG(bool largeNode = false))
: GenTreeUnOp(oper, type DEBUGARG(largeNode)), gtOp2(nullptr)
{
// Unary operators with optional arguments:
assert(oper == GT_NOP || oper == GT_RETURN || oper == GT_RETFILT || OperIsBlk(oper));
}
// returns true if we will use the division by constant optimization for this node.
bool UsesDivideByConstOptimized(Compiler* comp);
// checks if we will use the division by constant optimization this node
// then sets the flag GTF_DIV_BY_CNS_OPT and GTF_DONT_CSE on the constant
void CheckDivideByConstOptimized(Compiler* comp);
// True if this node is marked as using the division by constant optimization
bool MarkedDivideByConstOptimized() const
{
return (gtFlags & GTF_DIV_BY_CNS_OPT) != 0;
}
#if !defined(TARGET_64BIT) || defined(TARGET_ARM64)
bool IsValidLongMul();
#endif
#if !defined(TARGET_64BIT) && defined(DEBUG)
void DebugCheckLongMul();
#endif
#if DEBUGGABLE_GENTREE
GenTreeOp() : GenTreeUnOp(), gtOp2(nullptr)
{
}
#endif
// True if this relop is marked for a transform during the emitter
// phase, e.g., jge => jns
bool MarkedForSignJumpOpt() const
{
return (gtFlags & GTF_RELOP_SJUMP_OPT) != 0;
}
};
struct GenTreeVal : public GenTree
{
size_t gtVal1;
GenTreeVal(genTreeOps oper, var_types type, ssize_t val) : GenTree(oper, type), gtVal1(val)
{
}
#if DEBUGGABLE_GENTREE
GenTreeVal() : GenTree()
{
}
#endif
};
struct GenTreeIntConCommon : public GenTree
{
inline INT64 LngValue() const;
inline void SetLngValue(INT64 val);
inline ssize_t IconValue() const;
inline void SetIconValue(ssize_t val);
inline INT64 IntegralValue() const;
inline void SetIntegralValue(int64_t value);
template <typename T>
inline void SetValueTruncating(T value);
GenTreeIntConCommon(genTreeOps oper, var_types type DEBUGARG(bool largeNode = false))
: GenTree(oper, type DEBUGARG(largeNode))
{
}
bool FitsInI8() // IconValue() fits into 8-bit signed storage
{
return FitsInI8(IconValue());
}
static bool FitsInI8(ssize_t val) // Constant fits into 8-bit signed storage
{
return (int8_t)val == val;
}
bool FitsInI32() // IconValue() fits into 32-bit signed storage
{
return FitsInI32(IconValue());
}
static bool FitsInI32(ssize_t val) // Constant fits into 32-bit signed storage
{
#ifdef TARGET_64BIT
return (int32_t)val == val;
#else
return true;
#endif
}
bool ImmedValNeedsReloc(Compiler* comp);
bool ImmedValCanBeFolded(Compiler* comp, genTreeOps op);
#ifdef TARGET_XARCH
bool FitsInAddrBase(Compiler* comp);
bool AddrNeedsReloc(Compiler* comp);
#endif
#if DEBUGGABLE_GENTREE
GenTreeIntConCommon() : GenTree()
{
}
#endif
};
// node representing a read from a physical register
struct GenTreePhysReg : public GenTree
{
// physregs need a field beyond GetRegNum() because
// GetRegNum() indicates the destination (and can be changed)
// whereas reg indicates the source
regNumber gtSrcReg;
GenTreePhysReg(regNumber r, var_types type = TYP_I_IMPL) : GenTree(GT_PHYSREG, type), gtSrcReg(r)
{
}
#if DEBUGGABLE_GENTREE
GenTreePhysReg() : GenTree()
{
}
#endif
};
/* gtIntCon -- integer constant (GT_CNS_INT) */
struct GenTreeIntCon : public GenTreeIntConCommon
{
/*
* This is the GT_CNS_INT struct definition.
* It's used to hold for both int constants and pointer handle constants.
* For the 64-bit targets we will only use GT_CNS_INT as it used to represent all the possible sizes
* For the 32-bit targets we use a GT_CNS_LNG to hold a 64-bit integer constant and GT_CNS_INT for all others.
* In the future when we retarget the JIT for x86 we should consider eliminating GT_CNS_LNG
*/
ssize_t gtIconVal; // Must overlap and have the same offset with the gtIconVal field in GenTreeLngCon below.
/* The InitializeArray intrinsic needs to go back to the newarray statement
to find the class handle of the array so that we can get its size. However,
in ngen mode, the handle in that statement does not correspond to the compile
time handle (rather it lets you get a handle at run-time). In that case, we also
need to store a compile time handle, which goes in this gtCompileTimeHandle field.
*/
ssize_t gtCompileTimeHandle;
// TODO-Cleanup: It's not clear what characterizes the cases where the field
// above is used. It may be that its uses and those of the "gtFieldSeq" field below
// are mutually exclusive, and they could be put in a union. Or else we should separate
// this type into three subtypes.
// If this constant represents the offset of one or more fields, "gtFieldSeq" represents that
// sequence of fields.
FieldSeqNode* gtFieldSeq;
#ifdef DEBUG
// If the value represents target address, holds the method handle to that target which is used
// to fetch target method name and display in the disassembled code.
size_t gtTargetHandle = 0;
#endif
GenTreeIntCon(var_types type, ssize_t value DEBUGARG(bool largeNode = false))
: GenTreeIntConCommon(GT_CNS_INT, type DEBUGARG(largeNode))
, gtIconVal(value)
, gtCompileTimeHandle(0)
, gtFieldSeq(FieldSeqStore::NotAField())
{
}
GenTreeIntCon(var_types type, ssize_t value, FieldSeqNode* fields DEBUGARG(bool largeNode = false))
: GenTreeIntConCommon(GT_CNS_INT, type DEBUGARG(largeNode))
, gtIconVal(value)
, gtCompileTimeHandle(0)
, gtFieldSeq(fields)
{
assert(fields != nullptr);
}
void FixupInitBlkValue(var_types asgType);
#if DEBUGGABLE_GENTREE
GenTreeIntCon() : GenTreeIntConCommon()
{
}
#endif
};
/* gtLngCon -- long constant (GT_CNS_LNG) */
struct GenTreeLngCon : public GenTreeIntConCommon
{
INT64 gtLconVal; // Must overlap and have the same offset with the gtIconVal field in GenTreeIntCon above.
INT32 LoVal()
{
return (INT32)(gtLconVal & 0xffffffff);
}
INT32 HiVal()
{
return (INT32)(gtLconVal >> 32);
}
GenTreeLngCon(INT64 val) : GenTreeIntConCommon(GT_CNS_NATIVELONG, TYP_LONG)
{
SetLngValue(val);
}
#if DEBUGGABLE_GENTREE
GenTreeLngCon() : GenTreeIntConCommon()
{
}
#endif
};
inline INT64 GenTreeIntConCommon::LngValue() const
{
#ifndef TARGET_64BIT
assert(gtOper == GT_CNS_LNG);
return AsLngCon()->gtLconVal;
#else
return IconValue();
#endif
}
inline void GenTreeIntConCommon::SetLngValue(INT64 val)
{
#ifndef TARGET_64BIT
assert(gtOper == GT_CNS_LNG);
AsLngCon()->gtLconVal = val;
#else
// Compile time asserts that these two fields overlap and have the same offsets: gtIconVal and gtLconVal
C_ASSERT(offsetof(GenTreeLngCon, gtLconVal) == offsetof(GenTreeIntCon, gtIconVal));
C_ASSERT(sizeof(AsLngCon()->gtLconVal) == sizeof(AsIntCon()->gtIconVal));
SetIconValue(ssize_t(val));
#endif
}
inline ssize_t GenTreeIntConCommon::IconValue() const
{
assert(gtOper == GT_CNS_INT); // We should never see a GT_CNS_LNG for a 64-bit target!
return AsIntCon()->gtIconVal;
}
inline void GenTreeIntConCommon::SetIconValue(ssize_t val)
{
assert(gtOper == GT_CNS_INT); // We should never see a GT_CNS_LNG for a 64-bit target!
AsIntCon()->gtIconVal = val;
}
inline INT64 GenTreeIntConCommon::IntegralValue() const
{
#ifdef TARGET_64BIT
return LngValue();
#else
return gtOper == GT_CNS_LNG ? LngValue() : (INT64)IconValue();
#endif // TARGET_64BIT
}
inline void GenTreeIntConCommon::SetIntegralValue(int64_t value)
{
#ifdef TARGET_64BIT
SetIconValue(value);
#else
if (OperIs(GT_CNS_LNG))
{
SetLngValue(value);
}
else
{
assert(FitsIn<int32_t>(value));
SetIconValue(static_cast<int32_t>(value));
}
#endif // TARGET_64BIT
}
//------------------------------------------------------------------------
// SetValueTruncating: Set the value, truncating to TYP_INT if necessary.
//
// The function will truncate the supplied value to a 32 bit signed
// integer if the node's type is not TYP_LONG, otherwise setting it
// as-is. Note that this function intentionally does not check for
// small types (such nodes are created in lowering) for TP reasons.
//
// This function is intended to be used where its truncating behavior is
// desirable. One example is folding of ADD(CNS_INT, CNS_INT) performed in
// wider integers, which is typical when compiling on 64 bit hosts, as
// most aritmetic is done in ssize_t's aka int64_t's in that case, while
// the node itself can be of a narrower type.
//
// Arguments:
// value - Value to set, truncating to TYP_INT if the node is not of TYP_LONG
//
// Notes:
// This function is templated so that it works well with compiler warnings of
// the form "Operation may overflow before being assigned to a wider type", in
// case "value" is of type ssize_t, which is common.
//
template <typename T>
inline void GenTreeIntConCommon::SetValueTruncating(T value)
{
static_assert_no_msg((std::is_same<T, int32_t>::value || std::is_same<T, int64_t>::value));
if (TypeIs(TYP_LONG))
{
SetLngValue(value);
}
else
{
SetIconValue(static_cast<int32_t>(value));
}
}
/* gtDblCon -- double constant (GT_CNS_DBL) */
struct GenTreeDblCon : public GenTree
{
double gtDconVal;
bool isBitwiseEqual(GenTreeDblCon* other)
{
unsigned __int64 bits = *(unsigned __int64*)(>DconVal);
unsigned __int64 otherBits = *(unsigned __int64*)(&(other->gtDconVal));
return (bits == otherBits);
}
GenTreeDblCon(double val, var_types type = TYP_DOUBLE) : GenTree(GT_CNS_DBL, type), gtDconVal(val)
{
assert(varTypeIsFloating(type));
}
#if DEBUGGABLE_GENTREE
GenTreeDblCon() : GenTree()
{
}
#endif
};
/* gtStrCon -- string constant (GT_CNS_STR) */
#define EMPTY_STRING_SCON (unsigned)-1
struct GenTreeStrCon : public GenTree
{
unsigned gtSconCPX;
CORINFO_MODULE_HANDLE gtScpHnd;
// Returns true if this GT_CNS_STR was imported for String.Empty field
bool IsStringEmptyField()
{
return gtSconCPX == EMPTY_STRING_SCON && gtScpHnd == nullptr;
}
// Because this node can come from an inlined method we need to
// have the scope handle, since it will become a helper call.
GenTreeStrCon(unsigned sconCPX, CORINFO_MODULE_HANDLE mod DEBUGARG(bool largeNode = false))
: GenTree(GT_CNS_STR, TYP_REF DEBUGARG(largeNode)), gtSconCPX(sconCPX), gtScpHnd(mod)
{
}
#if DEBUGGABLE_GENTREE
GenTreeStrCon() : GenTree()
{
}
#endif
};
// Common supertype of LCL_VAR, LCL_FLD, REG_VAR, PHI_ARG
// This inherits from UnOp because lclvar stores are Unops
struct GenTreeLclVarCommon : public GenTreeUnOp
{
private:
unsigned _gtLclNum; // The local number. An index into the Compiler::lvaTable array.
unsigned _gtSsaNum; // The SSA number.
public:
GenTreeLclVarCommon(genTreeOps oper, var_types type, unsigned lclNum DEBUGARG(bool largeNode = false))
: GenTreeUnOp(oper, type DEBUGARG(largeNode))
{
SetLclNum(lclNum);
}
unsigned GetLclNum() const
{
return _gtLclNum;
}
void SetLclNum(unsigned lclNum)
{
_gtLclNum = lclNum;
_gtSsaNum = SsaConfig::RESERVED_SSA_NUM;
}
uint16_t GetLclOffs() const;
unsigned GetSsaNum() const
{
return _gtSsaNum;
}
void SetSsaNum(unsigned ssaNum)
{
_gtSsaNum = ssaNum;
}
bool HasSsaName()
{
return (GetSsaNum() != SsaConfig::RESERVED_SSA_NUM);
}
FieldSeqNode* GetFieldSeq() const;
#if DEBUGGABLE_GENTREE
GenTreeLclVarCommon() : GenTreeUnOp()
{
}
#endif
};
//------------------------------------------------------------------------
// MultiRegSpillFlags
//
// GTF_SPILL or GTF_SPILLED flag on a multi-reg node indicates that one or
// more of its result regs are in that state. The spill flags of each register
// are stored here. We only need 2 bits per returned register,
// so this is treated as a 2-bit array. No architecture needs more than 8 bits.
//
typedef unsigned char MultiRegSpillFlags;
static const unsigned PACKED_GTF_SPILL = 1;
static const unsigned PACKED_GTF_SPILLED = 2;
//----------------------------------------------------------------------
// GetMultiRegSpillFlagsByIdx: get spill flag associated with the return register
// specified by its index.
//
// Arguments:
// idx - Position or index of the return register
//
// Return Value:
// Returns GTF_* flags associated with the register. Only GTF_SPILL and GTF_SPILLED are considered.
//
inline GenTreeFlags GetMultiRegSpillFlagsByIdx(MultiRegSpillFlags flags, unsigned idx)
{
static_assert_no_msg(MAX_RET_REG_COUNT * 2 <= sizeof(unsigned char) * BITS_PER_BYTE);
assert(idx < MAX_RET_REG_COUNT);
unsigned bits = flags >> (idx * 2); // It doesn't matter that we possibly leave other high bits here.
GenTreeFlags spillFlags = GTF_EMPTY;
if (bits & PACKED_GTF_SPILL)
{
spillFlags |= GTF_SPILL;
}
if (bits & PACKED_GTF_SPILLED)
{
spillFlags |= GTF_SPILLED;
}
return spillFlags;
}
//----------------------------------------------------------------------
// SetMultiRegSpillFlagsByIdx: set spill flags for the register specified by its index.
//
// Arguments:
// oldFlags - The current value of the MultiRegSpillFlags for a node.
// flagsToSet - GTF_* flags. Only GTF_SPILL and GTF_SPILLED are allowed.
// Note that these are the flags used on non-multireg nodes,
// and this method adds the appropriate flags to the
// incoming MultiRegSpillFlags and returns it.
// idx - Position or index of the register
//
// Return Value:
// The new value for the node's MultiRegSpillFlags.
//
inline MultiRegSpillFlags SetMultiRegSpillFlagsByIdx(MultiRegSpillFlags oldFlags, GenTreeFlags flagsToSet, unsigned idx)
{
static_assert_no_msg(MAX_RET_REG_COUNT * 2 <= sizeof(unsigned char) * BITS_PER_BYTE);
assert(idx < MAX_RET_REG_COUNT);
MultiRegSpillFlags newFlags = oldFlags;
unsigned bits = 0;
if (flagsToSet & GTF_SPILL)
{
bits |= PACKED_GTF_SPILL;
}
if (flagsToSet & GTF_SPILLED)
{
bits |= PACKED_GTF_SPILLED;
}
const unsigned char packedFlags = PACKED_GTF_SPILL | PACKED_GTF_SPILLED;
// Clear anything that was already there by masking out the bits before 'or'ing in what we want there.
newFlags = (unsigned char)((newFlags & ~(packedFlags << (idx * 2))) | (bits << (idx * 2)));
return newFlags;
}
// gtLclVar -- load/store/addr of local variable
struct GenTreeLclVar : public GenTreeLclVarCommon
{
private:
regNumberSmall gtOtherReg[MAX_MULTIREG_COUNT - 1];
MultiRegSpillFlags gtSpillFlags;
public:
INDEBUG(IL_OFFSET gtLclILoffs;) // instr offset of ref (only for JIT dumps)
// Multireg support
bool IsMultiReg() const
{
return ((gtFlags & GTF_VAR_MULTIREG) != 0);
}
void ClearMultiReg()
{
gtFlags &= ~GTF_VAR_MULTIREG;
}
void SetMultiReg()
{
gtFlags |= GTF_VAR_MULTIREG;
ClearOtherRegFlags();
}
regNumber GetRegNumByIdx(int regIndex) const
{
assert(regIndex < MAX_MULTIREG_COUNT);
return (regIndex == 0) ? GetRegNum() : (regNumber)gtOtherReg[regIndex - 1];
}
void SetRegNumByIdx(regNumber reg, int regIndex)
{
assert(regIndex < MAX_MULTIREG_COUNT);
if (regIndex == 0)
{
SetRegNum(reg);
}
else
{
gtOtherReg[regIndex - 1] = regNumberSmall(reg);
}
}
GenTreeFlags GetRegSpillFlagByIdx(unsigned idx) const
{
return GetMultiRegSpillFlagsByIdx(gtSpillFlags, idx);
}
void SetRegSpillFlagByIdx(GenTreeFlags flags, unsigned idx)
{
gtSpillFlags = SetMultiRegSpillFlagsByIdx(gtSpillFlags, flags, idx);
}
unsigned int GetFieldCount(Compiler* compiler) const;
var_types GetFieldTypeByIndex(Compiler* compiler, unsigned idx);
//-------------------------------------------------------------------
// clearOtherRegFlags: clear GTF_* flags associated with gtOtherRegs
//
// Arguments:
// None
//
// Return Value:
// None
void ClearOtherRegFlags()
{
gtSpillFlags = 0;
}
//-------------------------------------------------------------------------
// CopyOtherRegFlags: copy GTF_* flags associated with gtOtherRegs from
// the given LclVar node.
//
// Arguments:
// fromCall - GenTreeLclVar node from which to copy
//
// Return Value:
// None
//
void CopyOtherRegFlags(GenTreeLclVar* from)
{
this->gtSpillFlags = from->gtSpillFlags;
}
GenTreeLclVar(genTreeOps oper,
var_types type,
unsigned lclNum DEBUGARG(IL_OFFSET ilOffs = BAD_IL_OFFSET) DEBUGARG(bool largeNode = false))
: GenTreeLclVarCommon(oper, type, lclNum DEBUGARG(largeNode)) DEBUGARG(gtLclILoffs(ilOffs))
{
assert(OperIsLocal(oper) || OperIsLocalAddr(oper));
}
#if DEBUGGABLE_GENTREE
GenTreeLclVar() : GenTreeLclVarCommon()
{
}
#endif
};
// gtLclFld -- load/store/addr of local variable field
struct GenTreeLclFld : public GenTreeLclVarCommon
{
private:
uint16_t m_lclOffs; // offset into the variable to access
FieldSeqNode* m_fieldSeq; // This LclFld node represents some sequences of accesses.
public:
GenTreeLclFld(genTreeOps oper, var_types type, unsigned lclNum, unsigned lclOffs)
: GenTreeLclVarCommon(oper, type, lclNum), m_lclOffs(static_cast<uint16_t>(lclOffs)), m_fieldSeq(nullptr)
{
assert(lclOffs <= UINT16_MAX);
}
uint16_t GetLclOffs() const
{
return m_lclOffs;
}
void SetLclOffs(unsigned lclOffs)
{
assert(lclOffs <= UINT16_MAX);
m_lclOffs = static_cast<uint16_t>(lclOffs);
}
FieldSeqNode* GetFieldSeq() const
{
return m_fieldSeq;
}
void SetFieldSeq(FieldSeqNode* fieldSeq)
{
m_fieldSeq = fieldSeq;
}
#ifdef TARGET_ARM
bool IsOffsetMisaligned() const;
#endif // TARGET_ARM
#if DEBUGGABLE_GENTREE
GenTreeLclFld() : GenTreeLclVarCommon()
{
}
#endif
};
// GenTreeCast - conversion to a different type (GT_CAST).
//
// This node represents all "conv[.ovf].{type}[.un]" IL opcodes.
//
// There are four semantically significant values that determine what it does:
//
// 1) "genActualType(CastOp())" - the type being cast from.
// 2) "gtCastType" - the type being cast to.
// 3) "IsUnsigned" (the "GTF_UNSIGNED" flag) - whether the cast is "unsigned".
// 4) "gtOverflow" (the "GTF_OVERFLOW" flag) - whether the cast is checked.
//
// Different "kinds" of casts use these values differently; not all are always
// meaningful or legal:
//
// 1) For casts from FP types, "IsUnsigned" will always be "false".
// 2) Checked casts use "IsUnsigned" to represent the fact the type being cast
// from is unsigned. The target type's signedness is similarly significant.
// 3) For unchecked casts, "IsUnsigned" is significant for "int -> long", where
// it decides whether the cast sign- or zero-extends its source, and "integer
// -> FP" cases. For all other unchecked casts, "IsUnsigned" is meaningless.
// 4) For unchecked casts, signedness of the target type is only meaningful if
// the cast is to an FP or small type. In the latter case (and everywhere
// else in IR) it decided whether the value will be sign- or zero-extended.
//
// For additional context on "GT_CAST"'s semantics, see "IntegralRange::ForCast"
// methods and "GenIntCastDesc"'s constructor.
//
struct GenTreeCast : public GenTreeOp
{
GenTree*& CastOp()
{
return gtOp1;
}
var_types gtCastType;
GenTreeCast(var_types type, GenTree* op, bool fromUnsigned, var_types castType DEBUGARG(bool largeNode = false))
: GenTreeOp(GT_CAST, type, op, nullptr DEBUGARG(largeNode)), gtCastType(castType)
{
// We do not allow casts from floating point types to be treated as from
// unsigned to avoid bugs related to wrong GTF_UNSIGNED in case the
// CastOp's type changes.
assert(!varTypeIsFloating(op) || !fromUnsigned);
gtFlags |= fromUnsigned ? GTF_UNSIGNED : GTF_EMPTY;
}
#if DEBUGGABLE_GENTREE
GenTreeCast() : GenTreeOp()
{
}
#endif
};
// GT_BOX nodes are place markers for boxed values. The "real" tree
// for most purposes is in gtBoxOp.
struct GenTreeBox : public GenTreeUnOp
{
// An expanded helper call to implement the "box" if we don't get
// rid of it any other way. Must be in same position as op1.
GenTree*& BoxOp()
{
return gtOp1;
}
// This is the statement that contains the assignment tree when the node is an inlined GT_BOX on a value
// type
Statement* gtAsgStmtWhenInlinedBoxValue;
// And this is the statement that copies from the value being boxed to the box payload
Statement* gtCopyStmtWhenInlinedBoxValue;
GenTreeBox(var_types type,
GenTree* boxOp,
Statement* asgStmtWhenInlinedBoxValue,
Statement* copyStmtWhenInlinedBoxValue)
: GenTreeUnOp(GT_BOX, type, boxOp)
, gtAsgStmtWhenInlinedBoxValue(asgStmtWhenInlinedBoxValue)
, gtCopyStmtWhenInlinedBoxValue(copyStmtWhenInlinedBoxValue)
{
}
#if DEBUGGABLE_GENTREE
GenTreeBox() : GenTreeUnOp()
{
}
#endif
};
// GenTreeField -- data member ref (GT_FIELD)
struct GenTreeField : public GenTreeUnOp
{
CORINFO_FIELD_HANDLE gtFldHnd;
DWORD gtFldOffset;
bool gtFldMayOverlap;
#ifdef FEATURE_READYTORUN
CORINFO_CONST_LOOKUP gtFieldLookup;
#endif
GenTreeField(var_types type, GenTree* obj, CORINFO_FIELD_HANDLE fldHnd, DWORD offs)
: GenTreeUnOp(GT_FIELD, type, obj), gtFldHnd(fldHnd), gtFldOffset(offs), gtFldMayOverlap(false)
{
#ifdef FEATURE_READYTORUN
gtFieldLookup.addr = nullptr;
#endif
}
#if DEBUGGABLE_GENTREE
GenTreeField() : GenTreeUnOp()
{
}
#endif
// The object this field belongs to. Will be "nullptr" for static fields.
// Note that this is an address, i. e. for struct fields it will be ADDR(STRUCT).
GenTree* GetFldObj() const
{
return gtOp1;
}
// True if this field is a volatile memory operation.
bool IsVolatile() const
{
return (gtFlags & GTF_FLD_VOLATILE) != 0;
}
};
// There was quite a bit of confusion in the code base about which of gtOp1 and gtOp2 was the
// 'then' and 'else' clause of a colon node. Adding these accessors, while not enforcing anything,
// at least *allows* the programmer to be obviously correct.
// However, these conventions seem backward.
// TODO-Cleanup: If we could get these accessors used everywhere, then we could switch them.
struct GenTreeColon : public GenTreeOp
{
GenTree*& ThenNode()
{
return gtOp2;
}
GenTree*& ElseNode()
{
return gtOp1;
}
#if DEBUGGABLE_GENTREE
GenTreeColon() : GenTreeOp()
{
}
#endif
GenTreeColon(var_types typ, GenTree* thenNode, GenTree* elseNode) : GenTreeOp(GT_COLON, typ, elseNode, thenNode)
{
}
};
// gtCall -- method call (GT_CALL)
enum class InlineObservation;
//------------------------------------------------------------------------
// GenTreeCallFlags: a bitmask of flags for GenTreeCall stored in gtCallMoreFlags.
//
// clang-format off
enum GenTreeCallFlags : unsigned int
{
GTF_CALL_M_EMPTY = 0,
GTF_CALL_M_EXPLICIT_TAILCALL = 0x00000001, // the call is "tail" prefixed and importer has performed tail call checks
GTF_CALL_M_TAILCALL = 0x00000002, // the call is a tailcall
GTF_CALL_M_VARARGS = 0x00000004, // the call uses varargs ABI
GTF_CALL_M_RETBUFFARG = 0x00000008, // call has a return buffer argument
GTF_CALL_M_DELEGATE_INV = 0x00000010, // call to Delegate.Invoke
GTF_CALL_M_NOGCCHECK = 0x00000020, // not a call for computing full interruptability and therefore no GC check is required.
GTF_CALL_M_SPECIAL_INTRINSIC = 0x00000040, // function that could be optimized as an intrinsic
// in special cases. Used to optimize fast way out in morphing
GTF_CALL_M_UNMGD_THISCALL = 0x00000080, // "this" pointer (first argument) should be enregistered (only for GTF_CALL_UNMANAGED)
GTF_CALL_M_VIRTSTUB_REL_INDIRECT = 0x00000080, // the virtstub is indirected through a relative address (only for GTF_CALL_VIRT_STUB)
GTF_CALL_M_NONVIRT_SAME_THIS = 0x00000080, // callee "this" pointer is equal to caller this pointer (only for GTF_CALL_NONVIRT)
GTF_CALL_M_FRAME_VAR_DEATH = 0x00000100, // the compLvFrameListRoot variable dies here (last use)
GTF_CALL_M_TAILCALL_VIA_JIT_HELPER = 0x00000200, // call is a tail call dispatched via tail call JIT helper.
#if FEATURE_TAILCALL_OPT
GTF_CALL_M_IMPLICIT_TAILCALL = 0x00000400, // call is an opportunistic tail call and importer has performed tail call checks
GTF_CALL_M_TAILCALL_TO_LOOP = 0x00000800, // call is a fast recursive tail call that can be converted into a loop
#endif
GTF_CALL_M_PINVOKE = 0x00001000, // call is a pinvoke. This mirrors VM flag CORINFO_FLG_PINVOKE.
// A call marked as Pinvoke is not necessarily a GT_CALL_UNMANAGED. For e.g.
// an IL Stub dynamically generated for a PInvoke declaration is flagged as
// a Pinvoke but not as an unmanaged call. See impCheckForPInvokeCall() to
// know when these flags are set.
GTF_CALL_M_R2R_REL_INDIRECT = 0x00002000, // ready to run call is indirected through a relative address
GTF_CALL_M_DOES_NOT_RETURN = 0x00004000, // call does not return
GTF_CALL_M_WRAPPER_DELEGATE_INV = 0x00008000, // call is in wrapper delegate
GTF_CALL_M_FAT_POINTER_CHECK = 0x00010000, // CoreRT managed calli needs transformation, that checks
// special bit in calli address. If it is set, then it is necessary
// to restore real function address and load hidden argument
// as the first argument for calli. It is CoreRT replacement for instantiating
// stubs, because executable code cannot be generated at runtime.
GTF_CALL_M_HELPER_SPECIAL_DCE = 0x00020000, // this helper call can be removed if it is part of a comma and
// the comma result is unused.
GTF_CALL_M_DEVIRTUALIZED = 0x00040000, // this call was devirtualized
GTF_CALL_M_UNBOXED = 0x00080000, // this call was optimized to use the unboxed entry point
GTF_CALL_M_GUARDED_DEVIRT = 0x00100000, // this call is a candidate for guarded devirtualization
GTF_CALL_M_GUARDED_DEVIRT_CHAIN = 0x00200000, // this call is a candidate for chained guarded devirtualization
GTF_CALL_M_GUARDED = 0x00400000, // this call was transformed by guarded devirtualization
GTF_CALL_M_ALLOC_SIDE_EFFECTS = 0x00800000, // this is a call to an allocator with side effects
GTF_CALL_M_SUPPRESS_GC_TRANSITION = 0x01000000, // suppress the GC transition (i.e. during a pinvoke) but a separate GC safe point is required.
GTF_CALL_M_EXP_RUNTIME_LOOKUP = 0x02000000, // this call needs to be tranformed into CFG for the dynamic dictionary expansion feature.
GTF_CALL_M_STRESS_TAILCALL = 0x04000000, // the call is NOT "tail" prefixed but GTF_CALL_M_EXPLICIT_TAILCALL was added because of tail call stress mode
GTF_CALL_M_EXPANDED_EARLY = 0x08000000, // the Virtual Call target address is expanded and placed in gtControlExpr in Morph rather than in Lower
GTF_CALL_M_LATE_DEVIRT = 0x10000000, // this call has late devirtualzation info
};
inline constexpr GenTreeCallFlags operator ~(GenTreeCallFlags a)
{
return (GenTreeCallFlags)(~(unsigned int)a);
}
inline constexpr GenTreeCallFlags operator |(GenTreeCallFlags a, GenTreeCallFlags b)
{
return (GenTreeCallFlags)((unsigned int)a | (unsigned int)b);
}
inline constexpr GenTreeCallFlags operator &(GenTreeCallFlags a, GenTreeCallFlags b)
{
return (GenTreeCallFlags)((unsigned int)a & (unsigned int)b);
}
inline GenTreeCallFlags& operator |=(GenTreeCallFlags& a, GenTreeCallFlags b)
{
return a = (GenTreeCallFlags)((unsigned int)a | (unsigned int)b);
}
inline GenTreeCallFlags& operator &=(GenTreeCallFlags& a, GenTreeCallFlags b)
{
return a = (GenTreeCallFlags)((unsigned int)a & (unsigned int)b);
}
// clang-format on
// Return type descriptor of a GT_CALL node.
// x64 Unix, Arm64, Arm32 and x86 allow a value to be returned in multiple
// registers. For such calls this struct provides the following info
// on their return type
// - type of value returned in each return register
// - ABI return register numbers in which the value is returned
// - count of return registers in which the value is returned
//
// TODO-ARM: Update this to meet the needs of Arm64 and Arm32
//
// TODO-AllArch: Right now it is used for describing multi-reg returned types.
// Eventually we would want to use it for describing even single-reg
// returned types (e.g. structs returned in single register x64/arm).
// This would allow us not to lie or normalize single struct return
// values in importer/morph.
struct ReturnTypeDesc
{
private:
var_types m_regType[MAX_RET_REG_COUNT];
bool m_isEnclosingType;
#ifdef DEBUG
bool m_inited;
#endif
public:
ReturnTypeDesc()
{
Reset();
}
// Initialize the Return Type Descriptor for a method that returns a struct type
void InitializeStructReturnType(Compiler* comp, CORINFO_CLASS_HANDLE retClsHnd, CorInfoCallConvExtension callConv);
// Initialize the Return Type Descriptor for a method that returns a TYP_LONG
// Only needed for X86 and arm32.
void InitializeLongReturnType();
// Reset type descriptor to defaults
void Reset()
{
for (unsigned i = 0; i < MAX_RET_REG_COUNT; ++i)
{
m_regType[i] = TYP_UNKNOWN;
}
m_isEnclosingType = false;
#ifdef DEBUG
m_inited = false;
#endif
}
#ifdef DEBUG
// NOTE: we only use this function when writing out IR dumps. These dumps may take place before the ReturnTypeDesc
// has been initialized.
unsigned TryGetReturnRegCount() const
{
return m_inited ? GetReturnRegCount() : 0;
}
#endif // DEBUG
//--------------------------------------------------------------------------------------------
// GetReturnRegCount: Get the count of return registers in which the return value is returned.
//
// Arguments:
// None
//
// Return Value:
// Count of return registers.
// Returns 0 if the return type is not returned in registers.
unsigned GetReturnRegCount() const
{
assert(m_inited);
int regCount = 0;
for (unsigned i = 0; i < MAX_RET_REG_COUNT; ++i)
{
if (m_regType[i] == TYP_UNKNOWN)
{
break;
}
// otherwise
regCount++;
}
#ifdef DEBUG
// Any remaining elements in m_regTypes[] should also be TYP_UNKNOWN
for (unsigned i = regCount + 1; i < MAX_RET_REG_COUNT; ++i)
{
assert(m_regType[i] == TYP_UNKNOWN);
}
#endif
return regCount;
}
//-----------------------------------------------------------------------
// IsMultiRegRetType: check whether the type is returned in multiple
// return registers.
//
// Arguments:
// None
//
// Return Value:
// Returns true if the type is returned in multiple return registers.
// False otherwise.
// Note that we only have to examine the first two values to determine this
//
bool IsMultiRegRetType() const
{
if (MAX_RET_REG_COUNT < 2)
{
return false;
}
else
{
assert(m_inited);
return ((m_regType[0] != TYP_UNKNOWN) && (m_regType[1] != TYP_UNKNOWN));
}
}
//--------------------------------------------------------------------------
// GetReturnRegType: Get var_type of the return register specified by index.
//
// Arguments:
// index - Index of the return register.
// First return register will have an index 0 and so on.
//
// Return Value:
// var_type of the return register specified by its index.
// asserts if the index does not have a valid register return type.
var_types GetReturnRegType(unsigned index) const
{
var_types result = m_regType[index];
assert(result != TYP_UNKNOWN);
return result;
}
// True if this value is returned in integer register
// that is larger than the type itself.
bool IsEnclosingType() const
{
return m_isEnclosingType;
}
// Get i'th ABI return register
regNumber GetABIReturnReg(unsigned idx) const;
// Get reg mask of ABI return registers
regMaskTP GetABIReturnRegs() const;
};
class TailCallSiteInfo
{
bool m_isCallvirt : 1;
bool m_isCalli : 1;
CORINFO_SIG_INFO m_sig;
CORINFO_RESOLVED_TOKEN m_token;
public:
// Is the tailcall a callvirt instruction?
bool IsCallvirt()
{
return m_isCallvirt;
}
// Is the tailcall a calli instruction?
bool IsCalli()
{
return m_isCalli;
}
// Get the token of the callee
CORINFO_RESOLVED_TOKEN* GetToken()
{
assert(!IsCalli());
return &m_token;
}
// Get the signature of the callee
CORINFO_SIG_INFO* GetSig()
{
return &m_sig;
}
// Mark the tailcall as a calli with the given signature
void SetCalli(CORINFO_SIG_INFO* sig)
{
m_isCallvirt = false;
m_isCalli = true;
m_sig = *sig;
}
// Mark the tailcall as a callvirt with the given signature and token
void SetCallvirt(CORINFO_SIG_INFO* sig, CORINFO_RESOLVED_TOKEN* token)
{
m_isCallvirt = true;
m_isCalli = false;
m_sig = *sig;
m_token = *token;
}
// Mark the tailcall as a call with the given signature and token
void SetCall(CORINFO_SIG_INFO* sig, CORINFO_RESOLVED_TOKEN* token)
{
m_isCallvirt = false;
m_isCalli = false;
m_sig = *sig;
m_token = *token;
}
};
class fgArgInfo;
enum class NonStandardArgKind : unsigned
{
None,
PInvokeFrame,
PInvokeTarget,
PInvokeCookie,
WrapperDelegateCell,
ShiftLow,
ShiftHigh,
FixedRetBuffer,
VirtualStubCell,
R2RIndirectionCell,
ValidateIndirectCallTarget,
// If changing this enum also change getNonStandardArgKindName and isNonStandardArgAddedLate in fgArgInfo
};
#ifdef DEBUG
const char* getNonStandardArgKindName(NonStandardArgKind kind);
#endif
enum class CFGCallKind
{
ValidateAndCall,
Dispatch,
};
struct GenTreeCall final : public GenTree
{
class Use
{
GenTree* m_node;
Use* m_next;
public:
Use(GenTree* node, Use* next = nullptr) : m_node(node), m_next(next)
{
assert(node != nullptr);
}
GenTree*& NodeRef()
{
return m_node;
}
GenTree* GetNode() const
{
assert(m_node != nullptr);
return m_node;
}
void SetNode(GenTree* node)
{
assert(node != nullptr);
m_node = node;
}
Use*& NextRef()
{
return m_next;
}
Use* GetNext() const
{
return m_next;
}
void SetNext(Use* next)
{
m_next = next;
}
};
class UseIterator
{
Use* m_use;
public:
UseIterator(Use* use) : m_use(use)
{
}
Use& operator*() const
{
return *m_use;
}
Use* operator->() const
{
return m_use;
}
Use* GetUse() const
{
return m_use;
}
UseIterator& operator++()
{
m_use = m_use->GetNext();
return *this;
}
bool operator==(const UseIterator& i) const
{
return m_use == i.m_use;
}
bool operator!=(const UseIterator& i) const
{
return m_use != i.m_use;
}
};
class UseList
{
Use* m_uses;
public:
UseList(Use* uses) : m_uses(uses)
{
}
UseIterator begin() const
{
return UseIterator(m_uses);
}
UseIterator end() const
{
return UseIterator(nullptr);
}
};
Use* gtCallThisArg; // The instance argument ('this' pointer)
Use* gtCallArgs; // The list of arguments in original evaluation order
Use* gtCallLateArgs; // On x86: The register arguments in an optimal order
// On ARM/x64: - also includes any outgoing arg space arguments
// - that were evaluated into a temp LclVar
fgArgInfo* fgArgInfo;
UseList Args()
{
return UseList(gtCallArgs);
}
UseList LateArgs()
{
return UseList(gtCallLateArgs);
}
#ifdef DEBUG
// Used to register callsites with the EE
CORINFO_SIG_INFO* callSig;
#endif
union {
TailCallSiteInfo* tailCallInfo;
// Only used for unmanaged calls, which cannot be tail-called
CorInfoCallConvExtension unmgdCallConv;
};
#if FEATURE_MULTIREG_RET
// State required to support multi-reg returning call nodes.
//
// TODO-AllArch: enable for all call nodes to unify single-reg and multi-reg returns.
ReturnTypeDesc gtReturnTypeDesc;
// GetRegNum() would always be the first return reg.
// The following array holds the other reg numbers of multi-reg return.
regNumberSmall gtOtherRegs[MAX_RET_REG_COUNT - 1];
MultiRegSpillFlags gtSpillFlags;
#endif // FEATURE_MULTIREG_RET
//-----------------------------------------------------------------------
// GetReturnTypeDesc: get the type descriptor of return value of the call
//
// Arguments:
// None
//
// Returns
// Type descriptor of the value returned by call
//
// TODO-AllArch: enable for all call nodes to unify single-reg and multi-reg returns.
const ReturnTypeDesc* GetReturnTypeDesc() const
{
#if FEATURE_MULTIREG_RET
return >ReturnTypeDesc;
#else
return nullptr;
#endif
}
void InitializeLongReturnType()
{
#if FEATURE_MULTIREG_RET
gtReturnTypeDesc.InitializeLongReturnType();
#endif
}
void InitializeStructReturnType(Compiler* comp, CORINFO_CLASS_HANDLE retClsHnd, CorInfoCallConvExtension callConv)
{
#if FEATURE_MULTIREG_RET
gtReturnTypeDesc.InitializeStructReturnType(comp, retClsHnd, callConv);
#endif
}
void ResetReturnType()
{
#if FEATURE_MULTIREG_RET
gtReturnTypeDesc.Reset();
#endif
}
//---------------------------------------------------------------------------
// GetRegNumByIdx: get i'th return register allocated to this call node.
//
// Arguments:
// idx - index of the return register
//
// Return Value:
// Return regNumber of i'th return register of call node.
// Returns REG_NA if there is no valid return register for the given index.
//
regNumber GetRegNumByIdx(unsigned idx) const
{
assert(idx < MAX_RET_REG_COUNT);
if (idx == 0)
{
return GetRegNum();
}
#if FEATURE_MULTIREG_RET
return (regNumber)gtOtherRegs[idx - 1];
#else
return REG_NA;
#endif
}
//----------------------------------------------------------------------
// SetRegNumByIdx: set i'th return register of this call node
//
// Arguments:
// reg - reg number
// idx - index of the return register
//
// Return Value:
// None
//
void SetRegNumByIdx(regNumber reg, unsigned idx)
{
assert(idx < MAX_RET_REG_COUNT);
if (idx == 0)
{
SetRegNum(reg);
}
#if FEATURE_MULTIREG_RET
else
{
gtOtherRegs[idx - 1] = (regNumberSmall)reg;
assert(gtOtherRegs[idx - 1] == reg);
}
#else
unreached();
#endif
}
//----------------------------------------------------------------------------
// ClearOtherRegs: clear multi-reg state to indicate no regs are allocated
//
// Arguments:
// None
//
// Return Value:
// None
//
void ClearOtherRegs()
{
#if FEATURE_MULTIREG_RET
for (unsigned i = 0; i < MAX_RET_REG_COUNT - 1; ++i)
{
gtOtherRegs[i] = REG_NA;
}
#endif
}
//----------------------------------------------------------------------------
// CopyOtherRegs: copy multi-reg state from the given call node to this node
//
// Arguments:
// fromCall - GenTreeCall node from which to copy multi-reg state
//
// Return Value:
// None
//
void CopyOtherRegs(GenTreeCall* fromCall)
{
#if FEATURE_MULTIREG_RET
for (unsigned i = 0; i < MAX_RET_REG_COUNT - 1; ++i)
{
this->gtOtherRegs[i] = fromCall->gtOtherRegs[i];
}
#endif
}
// Get reg mask of all the valid registers of gtOtherRegs array
regMaskTP GetOtherRegMask() const;
GenTreeFlags GetRegSpillFlagByIdx(unsigned idx) const
{
#if FEATURE_MULTIREG_RET
return GetMultiRegSpillFlagsByIdx(gtSpillFlags, idx);
#else
assert(!"unreached");
return GTF_EMPTY;
#endif
}
void SetRegSpillFlagByIdx(GenTreeFlags flags, unsigned idx)
{
#if FEATURE_MULTIREG_RET
gtSpillFlags = SetMultiRegSpillFlagsByIdx(gtSpillFlags, flags, idx);
#endif
}
//-------------------------------------------------------------------
// clearOtherRegFlags: clear GTF_* flags associated with gtOtherRegs
//
// Arguments:
// None
//
// Return Value:
// None
void ClearOtherRegFlags()
{
#if FEATURE_MULTIREG_RET
gtSpillFlags = 0;
#endif
}
//-------------------------------------------------------------------------
// CopyOtherRegFlags: copy GTF_* flags associated with gtOtherRegs from
// the given call node.
//
// Arguments:
// fromCall - GenTreeCall node from which to copy
//
// Return Value:
// None
//
void CopyOtherRegFlags(GenTreeCall* fromCall)
{
#if FEATURE_MULTIREG_RET
this->gtSpillFlags = fromCall->gtSpillFlags;
#endif
}
bool IsUnmanaged() const
{
return (gtFlags & GTF_CALL_UNMANAGED) != 0;
}
bool NeedsNullCheck() const
{
return (gtFlags & GTF_CALL_NULLCHECK) != 0;
}
bool CallerPop() const
{
return (gtFlags & GTF_CALL_POP_ARGS) != 0;
}
bool IsVirtual() const
{
return (gtFlags & GTF_CALL_VIRT_KIND_MASK) != GTF_CALL_NONVIRT;
}
bool IsVirtualStub() const
{
return (gtFlags & GTF_CALL_VIRT_KIND_MASK) == GTF_CALL_VIRT_STUB;
}
bool IsVirtualVtable() const
{
return (gtFlags & GTF_CALL_VIRT_KIND_MASK) == GTF_CALL_VIRT_VTABLE;
}
bool IsInlineCandidate() const
{
return (gtFlags & GTF_CALL_INLINE_CANDIDATE) != 0;
}
bool IsR2ROrVirtualStubRelativeIndir()
{
#if defined(FEATURE_READYTORUN)
if (IsR2RRelativeIndir())
{
return true;
}
#endif
return IsVirtualStubRelativeIndir();
}
bool HasNonStandardAddedArgs(Compiler* compiler) const;
int GetNonStandardAddedArgCount(Compiler* compiler) const;
// Returns true if this call uses a retBuf argument and its calling convention
bool HasRetBufArg() const
{
return (gtCallMoreFlags & GTF_CALL_M_RETBUFFARG) != 0;
}
//-------------------------------------------------------------------------
// TreatAsHasRetBufArg:
//
// Arguments:
// compiler, the compiler instance so that we can call eeGetHelperNum
//
// Return Value:
// Returns true if we treat the call as if it has a retBuf argument
// This method may actually have a retBuf argument
// or it could be a JIT helper that we are still transforming during
// the importer phase.
//
// Notes:
// On ARM64 marking the method with the GTF_CALL_M_RETBUFFARG flag
// will make HasRetBufArg() return true, but will also force the
// use of register x8 to pass the RetBuf argument.
//
bool TreatAsHasRetBufArg(Compiler* compiler) const;
bool HasFixedRetBufArg() const
{
if (!(hasFixedRetBuffReg() && HasRetBufArg()))
{
return false;
}
#if !defined(TARGET_ARM)
return !TargetOS::IsWindows || !callConvIsInstanceMethodCallConv(GetUnmanagedCallConv());
#else
return true;
#endif
}
//-----------------------------------------------------------------------------------------
// HasMultiRegRetVal: whether the call node returns its value in multiple return registers.
//
// Arguments:
// None
//
// Return Value:
// True if the call is returning a multi-reg return value. False otherwise.
//
bool HasMultiRegRetVal() const
{
#ifdef FEATURE_MULTIREG_RET
#if defined(TARGET_X86) || defined(TARGET_ARM)
if (varTypeIsLong(gtType))
{
return true;
}
#endif
if (!varTypeIsStruct(gtType) || HasRetBufArg())
{
return false;
}
// Now it is a struct that is returned in registers.
return GetReturnTypeDesc()->IsMultiRegRetType();
#else // !FEATURE_MULTIREG_RET
return false;
#endif // !FEATURE_MULTIREG_RET
}
// Returns true if VM has flagged this method as CORINFO_FLG_PINVOKE.
bool IsPInvoke() const
{
return (gtCallMoreFlags & GTF_CALL_M_PINVOKE) != 0;
}
// Note that the distinction of whether tail prefixed or an implicit tail call
// is maintained on a call node till fgMorphCall() after which it will be
// either a tail call (i.e. IsTailCall() is true) or a non-tail call.
bool IsTailPrefixedCall() const
{
return (gtCallMoreFlags & GTF_CALL_M_EXPLICIT_TAILCALL) != 0;
}
// Returns true if this call didn't have an explicit tail. prefix in the IL
// but was marked as an explicit tail call because of tail call stress mode.
bool IsStressTailCall() const
{
return (gtCallMoreFlags & GTF_CALL_M_STRESS_TAILCALL) != 0;
}
// This method returning "true" implies that tail call flowgraph morhphing has
// performed final checks and committed to making a tail call.
bool IsTailCall() const
{
return (gtCallMoreFlags & GTF_CALL_M_TAILCALL) != 0;
}
// This method returning "true" implies that importer has performed tail call checks
// and providing a hint that this can be converted to a tail call.
bool CanTailCall() const
{
return IsTailPrefixedCall() || IsImplicitTailCall();
}
// Check whether this is a tailcall dispatched via JIT helper. We only use
// this mechanism on x86 as it is faster than our other more general
// tailcall mechanism.
bool IsTailCallViaJitHelper() const
{
#ifdef TARGET_X86
return IsTailCall() && (gtCallMoreFlags & GTF_CALL_M_TAILCALL_VIA_JIT_HELPER);
#else
return false;
#endif
}
#if FEATURE_FASTTAILCALL
bool IsFastTailCall() const
{
#ifdef TARGET_X86
return IsTailCall() && !(gtCallMoreFlags & GTF_CALL_M_TAILCALL_VIA_JIT_HELPER);
#else
return IsTailCall();
#endif
}
#else // !FEATURE_FASTTAILCALL
bool IsFastTailCall() const
{
return false;
}
#endif // !FEATURE_FASTTAILCALL
#if FEATURE_TAILCALL_OPT
// Returns true if this is marked for opportunistic tail calling.
// That is, can be tail called though not explicitly prefixed with "tail" prefix.
bool IsImplicitTailCall() const
{
return (gtCallMoreFlags & GTF_CALL_M_IMPLICIT_TAILCALL) != 0;
}
bool IsTailCallConvertibleToLoop() const
{
return (gtCallMoreFlags & GTF_CALL_M_TAILCALL_TO_LOOP) != 0;
}
#else // !FEATURE_TAILCALL_OPT
bool IsImplicitTailCall() const
{
return false;
}
bool IsTailCallConvertibleToLoop() const
{
return false;
}
#endif // !FEATURE_TAILCALL_OPT
bool NormalizesSmallTypesOnReturn()
{
return GetUnmanagedCallConv() == CorInfoCallConvExtension::Managed;
}
bool IsSameThis() const
{
return (gtCallMoreFlags & GTF_CALL_M_NONVIRT_SAME_THIS) != 0;
}
bool IsDelegateInvoke() const
{
return (gtCallMoreFlags & GTF_CALL_M_DELEGATE_INV) != 0;
}
bool IsVirtualStubRelativeIndir() const
{
return IsVirtualStub() && (gtCallMoreFlags & GTF_CALL_M_VIRTSTUB_REL_INDIRECT) != 0;
}
bool IsR2RRelativeIndir() const
{
#ifdef FEATURE_READYTORUN
return (gtCallMoreFlags & GTF_CALL_M_R2R_REL_INDIRECT) != 0;
#else
return false;
#endif
}
#ifdef FEATURE_READYTORUN
void setEntryPoint(const CORINFO_CONST_LOOKUP& entryPoint)
{
gtEntryPoint = entryPoint;
if (gtEntryPoint.accessType == IAT_PVALUE)
{
gtCallMoreFlags |= GTF_CALL_M_R2R_REL_INDIRECT;
}
}
#endif // FEATURE_READYTORUN
bool IsVarargs() const
{
return (gtCallMoreFlags & GTF_CALL_M_VARARGS) != 0;
}
bool IsNoReturn() const
{
return (gtCallMoreFlags & GTF_CALL_M_DOES_NOT_RETURN) != 0;
}
bool IsFatPointerCandidate() const
{
return (gtCallMoreFlags & GTF_CALL_M_FAT_POINTER_CHECK) != 0;
}
bool IsGuardedDevirtualizationCandidate() const
{
return (gtCallMoreFlags & GTF_CALL_M_GUARDED_DEVIRT) != 0;
}
bool IsPure(Compiler* compiler) const;
bool HasSideEffects(Compiler* compiler, bool ignoreExceptions = false, bool ignoreCctors = false) const;
void ClearFatPointerCandidate()
{
gtCallMoreFlags &= ~GTF_CALL_M_FAT_POINTER_CHECK;
}
void SetFatPointerCandidate()
{
gtCallMoreFlags |= GTF_CALL_M_FAT_POINTER_CHECK;
}
bool IsDevirtualized() const
{
return (gtCallMoreFlags & GTF_CALL_M_DEVIRTUALIZED) != 0;
}
bool IsGuarded() const
{
return (gtCallMoreFlags & GTF_CALL_M_GUARDED) != 0;
}
bool IsUnboxed() const
{
return (gtCallMoreFlags & GTF_CALL_M_UNBOXED) != 0;
}
bool IsSuppressGCTransition() const
{
return (gtCallMoreFlags & GTF_CALL_M_SUPPRESS_GC_TRANSITION) != 0;
}
void ClearGuardedDevirtualizationCandidate()
{
gtCallMoreFlags &= ~GTF_CALL_M_GUARDED_DEVIRT;
}
void SetGuardedDevirtualizationCandidate()
{
gtCallMoreFlags |= GTF_CALL_M_GUARDED_DEVIRT;
}
void SetIsGuarded()
{
gtCallMoreFlags |= GTF_CALL_M_GUARDED;
}
void SetExpRuntimeLookup()
{
gtCallMoreFlags |= GTF_CALL_M_EXP_RUNTIME_LOOKUP;
}
void ClearExpRuntimeLookup()
{
gtCallMoreFlags &= ~GTF_CALL_M_EXP_RUNTIME_LOOKUP;
}
bool IsExpRuntimeLookup() const
{
return (gtCallMoreFlags & GTF_CALL_M_EXP_RUNTIME_LOOKUP) != 0;
}
void SetExpandedEarly()
{
gtCallMoreFlags |= GTF_CALL_M_EXPANDED_EARLY;
}
void ClearExpandedEarly()
{
gtCallMoreFlags &= ~GTF_CALL_M_EXPANDED_EARLY;
}
bool IsExpandedEarly() const
{
return (gtCallMoreFlags & GTF_CALL_M_EXPANDED_EARLY) != 0;
}
//-----------------------------------------------------------------------------------------
// GetIndirectionCellArgKind: Get the kind of indirection cell used by this call.
//
// Arguments:
// None
//
// Return Value:
// The kind (either R2RIndirectionCell or VirtualStubCell),
// or NonStandardArgKind::None if this call does not have an indirection cell.
//
NonStandardArgKind GetIndirectionCellArgKind() const
{
if (IsVirtualStub())
{
return NonStandardArgKind::VirtualStubCell;
}
#if defined(TARGET_ARMARCH)
// For ARM architectures, we always use an indirection cell for R2R calls.
if (IsR2RRelativeIndir())
{
return NonStandardArgKind::R2RIndirectionCell;
}
#elif defined(TARGET_XARCH)
// On XARCH we disassemble it from callsite except for tailcalls that need indirection cell.
if (IsR2RRelativeIndir() && IsFastTailCall())
{
return NonStandardArgKind::R2RIndirectionCell;
}
#endif
return NonStandardArgKind::None;
}
CFGCallKind GetCFGCallKind()
{
#if defined(TARGET_AMD64)
// On x64 the dispatcher is more performant, but we cannot use it when
// we need to pass indirection cells as those go into registers that
// are clobbered by the dispatch helper.
bool mayUseDispatcher = GetIndirectionCellArgKind() == NonStandardArgKind::None;
bool shouldUseDispatcher = true;
#elif defined(TARGET_ARM64)
bool mayUseDispatcher = true;
// Branch predictors on ARM64 generally do not handle the dispatcher as
// well as on x64 hardware, so only use the validator by default.
bool shouldUseDispatcher = false;
#else
// Other platforms do not even support the dispatcher.
bool mayUseDispatcher = false;
bool shouldUseDispatcher = false;
#endif
#ifdef DEBUG
switch (JitConfig.JitCFGUseDispatcher())
{
case 0:
shouldUseDispatcher = false;
break;
case 1:
shouldUseDispatcher = true;
break;
default:
break;
}
#endif
return mayUseDispatcher && shouldUseDispatcher ? CFGCallKind::Dispatch : CFGCallKind::ValidateAndCall;
}
void ResetArgInfo();
GenTreeCallFlags gtCallMoreFlags; // in addition to gtFlags
gtCallTypes gtCallType : 3; // value from the gtCallTypes enumeration
var_types gtReturnType : 5; // exact return type
CORINFO_CLASS_HANDLE gtRetClsHnd; // The return type handle of the call if it is a struct; always available
void* gtStubCallStubAddr; // GTF_CALL_VIRT_STUB - these are never inlined
union {
// only used for CALLI unmanaged calls (CT_INDIRECT)
GenTree* gtCallCookie;
// gtInlineCandidateInfo is only used when inlining methods
InlineCandidateInfo* gtInlineCandidateInfo;
GuardedDevirtualizationCandidateInfo* gtGuardedDevirtualizationCandidateInfo;
ClassProfileCandidateInfo* gtClassProfileCandidateInfo;
LateDevirtualizationInfo* gtLateDevirtualizationInfo;
CORINFO_GENERIC_HANDLE compileTimeHelperArgumentHandle; // Used to track type handle argument of dynamic helpers
void* gtDirectCallAddress; // Used to pass direct call address between lower and codegen
};
// expression evaluated after args are placed which determines the control target
GenTree* gtControlExpr;
union {
CORINFO_METHOD_HANDLE gtCallMethHnd; // CT_USER_FUNC or CT_HELPER
GenTree* gtCallAddr; // CT_INDIRECT
};
#ifdef FEATURE_READYTORUN
// Call target lookup info for method call from a Ready To Run module
CORINFO_CONST_LOOKUP gtEntryPoint;
#endif
#if defined(DEBUG) || defined(INLINE_DATA)
// For non-inline candidates, track the first observation
// that blocks candidacy.
InlineObservation gtInlineObservation;
// IL offset of the call wrt its parent method.
IL_OFFSET gtRawILOffset;
// In DEBUG we report even non inline candidates in the inline tree in
// fgNoteNonInlineCandidate. We need to keep around the inline context for
// this as normally it's part of the candidate info.
class InlineContext* gtInlineContext;
#endif // defined(DEBUG) || defined(INLINE_DATA)
bool IsHelperCall() const
{
return gtCallType == CT_HELPER;
}
bool IsHelperCall(CORINFO_METHOD_HANDLE callMethHnd) const
{
return IsHelperCall() && (callMethHnd == gtCallMethHnd);
}
bool IsHelperCall(Compiler* compiler, unsigned helper) const;
void ReplaceCallOperand(GenTree** operandUseEdge, GenTree* replacement);
bool AreArgsComplete() const;
CorInfoCallConvExtension GetUnmanagedCallConv() const
{
return IsUnmanaged() ? unmgdCallConv : CorInfoCallConvExtension::Managed;
}
static bool Equals(GenTreeCall* c1, GenTreeCall* c2);
GenTreeCall(var_types type) : GenTree(GT_CALL, type)
{
fgArgInfo = nullptr;
gtRetBufArg = nullptr;
}
#if DEBUGGABLE_GENTREE
GenTreeCall() : GenTree()
{
}
#endif
GenTree* GetLclRetBufArgNode() const
{
if (gtRetBufArg == nullptr)
{
return nullptr;
}
assert(HasRetBufArg());
GenTree* lclRetBufArgNode = gtRetBufArg->GetNode();
switch (lclRetBufArgNode->OperGet())
{
// Get the true value from setup args
case GT_ASG:
return lclRetBufArgNode->AsOp()->gtGetOp2();
case GT_STORE_LCL_VAR:
return lclRetBufArgNode->AsUnOp()->gtGetOp1();
// Get the value from putarg wrapper nodes
case GT_PUTARG_REG:
case GT_PUTARG_STK:
return lclRetBufArgNode->AsOp()->gtGetOp1();
// Otherwise the node should be in the Use*
default:
return lclRetBufArgNode;
}
}
void SetLclRetBufArg(Use* retBufArg);
Use* gtRetBufArg; // The argument that holds return buffer argument
};
struct GenTreeCmpXchg : public GenTree
{
GenTree* gtOpLocation;
GenTree* gtOpValue;
GenTree* gtOpComparand;
GenTreeCmpXchg(var_types type, GenTree* loc, GenTree* val, GenTree* comparand)
: GenTree(GT_CMPXCHG, type), gtOpLocation(loc), gtOpValue(val), gtOpComparand(comparand)
{
// There's no reason to do a compare-exchange on a local location, so we'll assume that all of these
// have global effects.
gtFlags |= (GTF_GLOB_REF | GTF_ASG);
// Merge in flags from operands
gtFlags |= gtOpLocation->gtFlags & GTF_ALL_EFFECT;
gtFlags |= gtOpValue->gtFlags & GTF_ALL_EFFECT;
gtFlags |= gtOpComparand->gtFlags & GTF_ALL_EFFECT;
}
#if DEBUGGABLE_GENTREE
GenTreeCmpXchg() : GenTree()
{
}
#endif
};
#if !defined(TARGET_64BIT)
struct GenTreeMultiRegOp : public GenTreeOp
{
regNumber gtOtherReg;
// GTF_SPILL or GTF_SPILLED flag on a multi-reg node indicates that one or
// more of its result regs are in that state. The spill flag of each of the
// return register is stored here. We only need 2 bits per returned register,
// so this is treated as a 2-bit array. No architecture needs more than 8 bits.
MultiRegSpillFlags gtSpillFlags;
GenTreeMultiRegOp(genTreeOps oper, var_types type, GenTree* op1, GenTree* op2)
: GenTreeOp(oper, type, op1, op2), gtOtherReg(REG_NA)
{
ClearOtherRegFlags();
}
unsigned GetRegCount() const
{
return (TypeGet() == TYP_LONG) ? 2 : 1;
}
//---------------------------------------------------------------------------
// GetRegNumByIdx: get i'th register allocated to this struct argument.
//
// Arguments:
// idx - index of the register
//
// Return Value:
// Return regNumber of i'th register of this register argument
//
regNumber GetRegNumByIdx(unsigned idx) const
{
assert(idx < 2);
if (idx == 0)
{
return GetRegNum();
}
return gtOtherReg;
}
GenTreeFlags GetRegSpillFlagByIdx(unsigned idx) const
{
return GetMultiRegSpillFlagsByIdx(gtSpillFlags, idx);
}
void SetRegSpillFlagByIdx(GenTreeFlags flags, unsigned idx)
{
#if FEATURE_MULTIREG_RET
gtSpillFlags = SetMultiRegSpillFlagsByIdx(gtSpillFlags, flags, idx);
#endif
}
//--------------------------------------------------------------------------
// GetRegType: Get var_type of the register specified by index.
//
// Arguments:
// index - Index of the register.
// First register will have an index 0 and so on.
//
// Return Value:
// var_type of the register specified by its index.
//
var_types GetRegType(unsigned index) const
{
assert(index < 2);
// The type of register is usually the same as GenTree type, since GenTreeMultiRegOp usually defines a single
// reg.
// The special case is when we have TYP_LONG, which may be a MUL_LONG, or a DOUBLE arg passed as LONG,
// in which case we need to separate them into int for each index.
var_types result = TypeGet();
if (result == TYP_LONG)
{
result = TYP_INT;
}
return result;
}
//-------------------------------------------------------------------
// clearOtherRegFlags: clear GTF_* flags associated with gtOtherRegs
//
// Arguments:
// None
//
// Return Value:
// None
//
void ClearOtherRegFlags()
{
gtSpillFlags = 0;
}
#if DEBUGGABLE_GENTREE
GenTreeMultiRegOp() : GenTreeOp()
{
}
#endif
};
#endif // !defined(TARGET_64BIT)
struct GenTreeFptrVal : public GenTree
{
CORINFO_METHOD_HANDLE gtFptrMethod;
bool gtFptrDelegateTarget;
#ifdef FEATURE_READYTORUN
CORINFO_CONST_LOOKUP gtEntryPoint;
#endif
GenTreeFptrVal(var_types type, CORINFO_METHOD_HANDLE meth)
: GenTree(GT_FTN_ADDR, type), gtFptrMethod(meth), gtFptrDelegateTarget(false)
{
#ifdef FEATURE_READYTORUN
gtEntryPoint.addr = nullptr;
gtEntryPoint.accessType = IAT_VALUE;
#endif
}
#if DEBUGGABLE_GENTREE
GenTreeFptrVal() : GenTree()
{
}
#endif
};
/* gtQmark */
struct GenTreeQmark : public GenTreeOp
{
GenTreeQmark(var_types type, GenTree* cond, GenTreeColon* colon) : GenTreeOp(GT_QMARK, type, cond, colon)
{
// These must follow a specific form.
assert((cond != nullptr) && cond->TypeIs(TYP_INT));
assert((colon != nullptr) && colon->OperIs(GT_COLON));
}
#if DEBUGGABLE_GENTREE
GenTreeQmark() : GenTreeOp()
{
}
#endif
};
/* gtIntrinsic -- intrinsic (possibly-binary op [NULL op2 is allowed] with an additional field) */
struct GenTreeIntrinsic : public GenTreeOp
{
NamedIntrinsic gtIntrinsicName;
CORINFO_METHOD_HANDLE gtMethodHandle; // Method handle of the method which is treated as an intrinsic.
#ifdef FEATURE_READYTORUN
// Call target lookup info for method call from a Ready To Run module
CORINFO_CONST_LOOKUP gtEntryPoint;
#endif
GenTreeIntrinsic(var_types type, GenTree* op1, NamedIntrinsic intrinsicName, CORINFO_METHOD_HANDLE methodHandle)
: GenTreeOp(GT_INTRINSIC, type, op1, nullptr), gtIntrinsicName(intrinsicName), gtMethodHandle(methodHandle)
{
assert(intrinsicName != NI_Illegal);
}
GenTreeIntrinsic(
var_types type, GenTree* op1, GenTree* op2, NamedIntrinsic intrinsicName, CORINFO_METHOD_HANDLE methodHandle)
: GenTreeOp(GT_INTRINSIC, type, op1, op2), gtIntrinsicName(intrinsicName), gtMethodHandle(methodHandle)
{
assert(intrinsicName != NI_Illegal);
}
#if DEBUGGABLE_GENTREE
GenTreeIntrinsic() : GenTreeOp()
{
}
#endif
};
// GenTreeMultiOp - a node with a flexible count of operands stored in an array.
// The array can be an inline one, or a dynamic one, or both, with switching
// between them supported. See GenTreeJitIntrinsic for an example of a node
// utilizing GenTreeMultiOp. GTF_REVERSE_OPS is supported for GenTreeMultiOp's
// with two operands.
//
struct GenTreeMultiOp : public GenTree
{
public:
class Iterator
{
protected:
GenTree** m_use;
Iterator(GenTree** use) : m_use(use)
{
}
public:
Iterator& operator++()
{
m_use++;
return *this;
}
bool operator==(const Iterator& other) const
{
return m_use == other.m_use;
}
bool operator!=(const Iterator& other) const
{
return m_use != other.m_use;
}
};
class OperandsIterator final : public Iterator
{
public:
OperandsIterator(GenTree** use) : Iterator(use)
{
}
GenTree* operator*()
{
return *m_use;
}
};
class UseEdgesIterator final : public Iterator
{
public:
UseEdgesIterator(GenTree** use) : Iterator(use)
{
}
GenTree** operator*()
{
return m_use;
}
};
private:
GenTree** m_operands;
protected:
template <unsigned InlineOperandCount, typename... Operands>
GenTreeMultiOp(genTreeOps oper,
var_types type,
CompAllocator allocator,
GenTree* (&inlineOperands)[InlineOperandCount] DEBUGARG(bool largeNode),
Operands... operands)
: GenTree(oper, type DEBUGARG(largeNode))
{
const size_t OperandCount = sizeof...(Operands);
m_operands = (OperandCount <= InlineOperandCount) ? inlineOperands : allocator.allocate<GenTree*>(OperandCount);
// "OperandCount + 1" so that it works well when OperandCount is 0.
GenTree* operandsArray[OperandCount + 1]{operands...};
InitializeOperands(operandsArray, OperandCount);
}
// Note that this constructor takes the owndership of the "operands" array.
template <unsigned InlineOperandCount>
GenTreeMultiOp(genTreeOps oper,
var_types type,
GenTree** operands,
size_t operandCount,
GenTree* (&inlineOperands)[InlineOperandCount] DEBUGARG(bool largeNode))
: GenTree(oper, type DEBUGARG(largeNode))
{
m_operands = (operandCount <= InlineOperandCount) ? inlineOperands : operands;
InitializeOperands(operands, operandCount);
}
public:
#if DEBUGGABLE_GENTREE
GenTreeMultiOp() : GenTree()
{
}
#endif
GenTree*& Op(size_t index)
{
size_t actualIndex = index - 1;
assert(actualIndex < m_operandCount);
assert(m_operands[actualIndex] != nullptr);
return m_operands[actualIndex];
}
GenTree* Op(size_t index) const
{
return const_cast<GenTreeMultiOp*>(this)->Op(index);
}
// Note that unlike the general "Operands" iterator, this specialized version does not respect GTF_REVERSE_OPS.
IteratorPair<OperandsIterator> Operands()
{
return MakeIteratorPair(OperandsIterator(GetOperandArray()),
OperandsIterator(GetOperandArray() + GetOperandCount()));
}
// Note that unlike the general "UseEdges" iterator, this specialized version does not respect GTF_REVERSE_OPS.
IteratorPair<UseEdgesIterator> UseEdges()
{
return MakeIteratorPair(UseEdgesIterator(GetOperandArray()),
UseEdgesIterator(GetOperandArray() + GetOperandCount()));
}
size_t GetOperandCount() const
{
return m_operandCount;
}
GenTree** GetOperandArray(size_t startIndex = 0) const
{
return m_operands + startIndex;
}
protected:
// Reconfigures the operand array, leaving it in a "dirty" state.
void ResetOperandArray(size_t newOperandCount,
Compiler* compiler,
GenTree** inlineOperands,
size_t inlineOperandCount);
static bool OperandsAreEqual(GenTreeMultiOp* op1, GenTreeMultiOp* op2);
private:
void InitializeOperands(GenTree** operands, size_t operandCount);
void SetOperandCount(size_t newOperandCount)
{
assert(FitsIn<uint8_t>(newOperandCount));
m_operandCount = static_cast<uint8_t>(newOperandCount);
}
};
// Helper class used to implement the constructor of GenTreeJitIntrinsic which
// transfers the ownership of the passed-in array to the underlying MultiOp node.
class IntrinsicNodeBuilder final
{
friend struct GenTreeJitIntrinsic;
GenTree** m_operands;
size_t m_operandCount;
GenTree* m_inlineOperands[2];
public:
IntrinsicNodeBuilder(CompAllocator allocator, size_t operandCount) : m_operandCount(operandCount)
{
m_operands =
(operandCount <= ArrLen(m_inlineOperands)) ? m_inlineOperands : allocator.allocate<GenTree*>(operandCount);
#ifdef DEBUG
for (size_t i = 0; i < operandCount; i++)
{
m_operands[i] = nullptr;
}
#endif // DEBUG
}
IntrinsicNodeBuilder(CompAllocator allocator, GenTreeMultiOp* source) : m_operandCount(source->GetOperandCount())
{
m_operands = (m_operandCount <= ArrLen(m_inlineOperands)) ? m_inlineOperands
: allocator.allocate<GenTree*>(m_operandCount);
for (size_t i = 0; i < m_operandCount; i++)
{
m_operands[i] = source->Op(i + 1);
}
}
void AddOperand(size_t index, GenTree* operand)
{
assert(index < m_operandCount);
assert(m_operands[index] == nullptr);
m_operands[index] = operand;
}
GenTree* GetOperand(size_t index) const
{
assert(index < m_operandCount);
assert(m_operands[index] != nullptr);
return m_operands[index];
}
size_t GetOperandCount() const
{
return m_operandCount;
}
private:
GenTree** GetBuiltOperands()
{
#ifdef DEBUG
for (size_t i = 0; i < m_operandCount; i++)
{
assert(m_operands[i] != nullptr);
}
#endif // DEBUG
return m_operands;
}
};
struct GenTreeJitIntrinsic : public GenTreeMultiOp
{
protected:
GenTree* gtInlineOperands[2];
uint16_t gtLayoutNum;
unsigned char gtAuxiliaryJitType; // For intrinsics than need another type (e.g. Avx2.Gather* or SIMD (by element))
regNumberSmall gtOtherReg; // For intrinsics that return 2 registers
unsigned char gtSimdBaseJitType; // SIMD vector base JIT type
unsigned char gtSimdSize; // SIMD vector size in bytes, use 0 for scalar intrinsics
#if defined(FEATURE_SIMD)
union {
SIMDIntrinsicID gtSIMDIntrinsicID; // operation Id
NamedIntrinsic gtHWIntrinsicId;
};
#else
NamedIntrinsic gtHWIntrinsicId;
#endif
public:
unsigned GetLayoutNum() const
{
return gtLayoutNum;
}
void SetLayoutNum(unsigned layoutNum)
{
assert(FitsIn<uint16_t>(layoutNum));
gtLayoutNum = static_cast<uint16_t>(layoutNum);
}
regNumber GetOtherReg() const
{
return (regNumber)gtOtherReg;
}
void SetOtherReg(regNumber reg)
{
gtOtherReg = (regNumberSmall)reg;
assert(gtOtherReg == reg);
}
CorInfoType GetAuxiliaryJitType() const
{
return (CorInfoType)gtAuxiliaryJitType;
}
void SetAuxiliaryJitType(CorInfoType auxiliaryJitType)
{
gtAuxiliaryJitType = (unsigned char)auxiliaryJitType;
assert(gtAuxiliaryJitType == auxiliaryJitType);
}
var_types GetAuxiliaryType() const;
CorInfoType GetSimdBaseJitType() const
{
return (CorInfoType)gtSimdBaseJitType;
}
CorInfoType GetNormalizedSimdBaseJitType() const
{
CorInfoType simdBaseJitType = GetSimdBaseJitType();
switch (simdBaseJitType)
{
case CORINFO_TYPE_NATIVEINT:
{
#ifdef TARGET_64BIT
return CORINFO_TYPE_LONG;
#else
return CORINFO_TYPE_INT;
#endif
}
case CORINFO_TYPE_NATIVEUINT:
{
#ifdef TARGET_64BIT
return CORINFO_TYPE_ULONG;
#else
return CORINFO_TYPE_UINT;
#endif
}
default:
return simdBaseJitType;
}
}
void SetSimdBaseJitType(CorInfoType simdBaseJitType)
{
gtSimdBaseJitType = (unsigned char)simdBaseJitType;
assert(gtSimdBaseJitType == simdBaseJitType);
}
var_types GetSimdBaseType() const;
unsigned char GetSimdSize() const
{
return gtSimdSize;
}
void SetSimdSize(unsigned simdSize)
{
gtSimdSize = (unsigned char)simdSize;
assert(gtSimdSize == simdSize);
}
template <typename... Operands>
GenTreeJitIntrinsic(genTreeOps oper,
var_types type,
CompAllocator allocator,
CorInfoType simdBaseJitType,
unsigned simdSize,
Operands... operands)
: GenTreeMultiOp(oper, type, allocator, gtInlineOperands DEBUGARG(false), operands...)
, gtLayoutNum(0)
, gtAuxiliaryJitType(CORINFO_TYPE_UNDEF)
, gtOtherReg(REG_NA)
, gtSimdBaseJitType((unsigned char)simdBaseJitType)
, gtSimdSize((unsigned char)simdSize)
, gtHWIntrinsicId(NI_Illegal)
{
assert(gtSimdBaseJitType == simdBaseJitType);
assert(gtSimdSize == simdSize);
}
#if DEBUGGABLE_GENTREE
GenTreeJitIntrinsic() : GenTreeMultiOp()
{
}
#endif
protected:
GenTreeJitIntrinsic(genTreeOps oper,
var_types type,
IntrinsicNodeBuilder&& nodeBuilder,
CorInfoType simdBaseJitType,
unsigned simdSize)
: GenTreeMultiOp(oper,
type,
nodeBuilder.GetBuiltOperands(),
nodeBuilder.GetOperandCount(),
gtInlineOperands DEBUGARG(false))
, gtLayoutNum(0)
, gtAuxiliaryJitType(CORINFO_TYPE_UNDEF)
, gtOtherReg(REG_NA)
, gtSimdBaseJitType((unsigned char)simdBaseJitType)
, gtSimdSize((unsigned char)simdSize)
, gtHWIntrinsicId(NI_Illegal)
{
assert(gtSimdBaseJitType == simdBaseJitType);
assert(gtSimdSize == simdSize);
}
public:
bool isSIMD() const
{
return gtSimdSize != 0;
}
};
#ifdef FEATURE_SIMD
/* gtSIMD -- SIMD intrinsic (possibly-binary op [NULL op2 is allowed] with additional fields) */
struct GenTreeSIMD : public GenTreeJitIntrinsic
{
GenTreeSIMD(var_types type,
IntrinsicNodeBuilder&& nodeBuilder,
SIMDIntrinsicID simdIntrinsicID,
CorInfoType simdBaseJitType,
unsigned simdSize)
: GenTreeJitIntrinsic(GT_SIMD, type, std::move(nodeBuilder), simdBaseJitType, simdSize)
{
gtSIMDIntrinsicID = simdIntrinsicID;
}
GenTreeSIMD(var_types type,
CompAllocator allocator,
GenTree* op1,
SIMDIntrinsicID simdIntrinsicID,
CorInfoType simdBaseJitType,
unsigned simdSize)
: GenTreeJitIntrinsic(GT_SIMD, type, allocator, simdBaseJitType, simdSize, op1)
{
gtSIMDIntrinsicID = simdIntrinsicID;
}
GenTreeSIMD(var_types type,
CompAllocator allocator,
GenTree* op1,
GenTree* op2,
SIMDIntrinsicID simdIntrinsicID,
CorInfoType simdBaseJitType,
unsigned simdSize)
: GenTreeJitIntrinsic(GT_SIMD, type, allocator, simdBaseJitType, simdSize, op1, op2)
{
gtSIMDIntrinsicID = simdIntrinsicID;
}
#if DEBUGGABLE_GENTREE
GenTreeSIMD() : GenTreeJitIntrinsic()
{
}
#endif
bool OperIsMemoryLoad() const; // Returns true for the SIMD Intrinsic instructions that have MemoryLoad semantics,
// false otherwise
SIMDIntrinsicID GetSIMDIntrinsicId() const
{
return gtSIMDIntrinsicID;
}
static bool Equals(GenTreeSIMD* op1, GenTreeSIMD* op2);
};
#endif // FEATURE_SIMD
#ifdef FEATURE_HW_INTRINSICS
struct GenTreeHWIntrinsic : public GenTreeJitIntrinsic
{
GenTreeHWIntrinsic(var_types type,
IntrinsicNodeBuilder&& nodeBuilder,
NamedIntrinsic hwIntrinsicID,
CorInfoType simdBaseJitType,
unsigned simdSize,
bool isSimdAsHWIntrinsic)
: GenTreeJitIntrinsic(GT_HWINTRINSIC, type, std::move(nodeBuilder), simdBaseJitType, simdSize)
{
SetHWIntrinsicId(hwIntrinsicID);
if (OperIsMemoryStore())
{
gtFlags |= (GTF_GLOB_REF | GTF_ASG);
}
if (isSimdAsHWIntrinsic)
{
gtFlags |= GTF_SIMDASHW_OP;
}
}
template <typename... Operands>
GenTreeHWIntrinsic(var_types type,
CompAllocator allocator,
NamedIntrinsic hwIntrinsicID,
CorInfoType simdBaseJitType,
unsigned simdSize,
bool isSimdAsHWIntrinsic,
Operands... operands)
: GenTreeJitIntrinsic(GT_HWINTRINSIC, type, allocator, simdBaseJitType, simdSize, operands...)
{
SetHWIntrinsicId(hwIntrinsicID);
if ((sizeof...(Operands) > 0) && OperIsMemoryStore())
{
gtFlags |= (GTF_GLOB_REF | GTF_ASG);
}
if (isSimdAsHWIntrinsic)
{
gtFlags |= GTF_SIMDASHW_OP;
}
}
#if DEBUGGABLE_GENTREE
GenTreeHWIntrinsic() : GenTreeJitIntrinsic()
{
}
#endif
bool OperIsMemoryLoad() const; // Returns true for the HW Intrinsic instructions that have MemoryLoad semantics,
// false otherwise
bool OperIsMemoryStore() const; // Returns true for the HW Intrinsic instructions that have MemoryStore semantics,
// false otherwise
bool OperIsMemoryLoadOrStore() const; // Returns true for the HW Intrinsic instructions that have MemoryLoad or
// MemoryStore semantics, false otherwise
bool IsSimdAsHWIntrinsic() const
{
return (gtFlags & GTF_SIMDASHW_OP) != 0;
}
unsigned GetResultOpNumForFMA(GenTree* use, GenTree* op1, GenTree* op2, GenTree* op3);
NamedIntrinsic GetHWIntrinsicId() const;
//---------------------------------------------------------------------------------------
// ChangeHWIntrinsicId: Change the intrinsic id for this node.
//
// This method just sets the intrinsic id, asserting that the new intrinsic
// has the same number of operands as the old one, optionally setting some of
// the new operands. Intrinsics with an unknown number of operands are exempt
// from the "do I have the same number of operands" check however, so this method must
// be used with care. Use "ResetHWIntrinsicId" if you need to fully reconfigure
// the node for a different intrinsic, with a possibly different number of operands.
//
// Arguments:
// intrinsicId - the new intrinsic id for the node
// operands - optional operands to set while changing the id
//
// Notes:
// It is the caller's responsibility to update side effect flags.
//
template <typename... Operands>
void ChangeHWIntrinsicId(NamedIntrinsic intrinsicId, Operands... operands)
{
const size_t OperandCount = sizeof...(Operands);
assert(OperandCount <= GetOperandCount());
SetHWIntrinsicId(intrinsicId);
GenTree* operandsArray[OperandCount + 1]{operands...};
GenTree** operandsStore = GetOperandArray();
for (size_t i = 0; i < OperandCount; i++)
{
operandsStore[i] = operandsArray[i];
}
}
//---------------------------------------------------------------------------------------
// ResetHWIntrinsicId: Reset the intrinsic id for this node.
//
// This method resets the intrinsic id, fully reconfiguring the node. It must
// be supplied with all the operands the new node needs, and can allocate a
// new dynamic array if the operands do not fit into in an inline one, in which
// case a compiler argument is used to get the memory allocator.
//
// This method is similar to "ChangeHWIntrinsicId" but is more versatile and
// thus more expensive. Use it when you need to bash to an intrinsic id with
// a different number of operands than what the original node had, or, which
// is equivalent, when you do not know the original number of operands.
//
// Arguments:
// intrinsicId - the new intrinsic id for the node
// compiler - compiler to allocate memory with, can be "nullptr" if the
// number of new operands does not exceed the length of the
// inline array (so, there are 2 or fewer of them)
// operands - *all* operands for the new node
//
// Notes:
// It is the caller's responsibility to update side effect flags.
//
template <typename... Operands>
void ResetHWIntrinsicId(NamedIntrinsic intrinsicId, Compiler* compiler, Operands... operands)
{
const size_t NewOperandCount = sizeof...(Operands);
assert((compiler != nullptr) || (NewOperandCount <= ArrLen(gtInlineOperands)));
ResetOperandArray(NewOperandCount, compiler, gtInlineOperands, ArrLen(gtInlineOperands));
ChangeHWIntrinsicId(intrinsicId, operands...);
}
void ResetHWIntrinsicId(NamedIntrinsic intrinsicId, GenTree* op1, GenTree* op2)
{
ResetHWIntrinsicId(intrinsicId, static_cast<Compiler*>(nullptr), op1, op2);
}
void ResetHWIntrinsicId(NamedIntrinsic intrinsicId, GenTree* op1)
{
ResetHWIntrinsicId(intrinsicId, static_cast<Compiler*>(nullptr), op1);
}
void ResetHWIntrinsicId(NamedIntrinsic intrinsicId)
{
ResetHWIntrinsicId(intrinsicId, static_cast<Compiler*>(nullptr));
}
static bool Equals(GenTreeHWIntrinsic* op1, GenTreeHWIntrinsic* op2);
private:
void SetHWIntrinsicId(NamedIntrinsic intrinsicId);
};
#endif // FEATURE_HW_INTRINSICS
/* gtIndex -- array access */
struct GenTreeIndex : public GenTreeOp
{
GenTree*& Arr()
{
return gtOp1;
}
GenTree*& Index()
{
return gtOp2;
}
unsigned gtIndElemSize; // size of elements in the array
CORINFO_CLASS_HANDLE gtStructElemClass; // If the element type is a struct, this is the struct type.
GenTreeIndex(var_types type, GenTree* arr, GenTree* ind, unsigned indElemSize)
: GenTreeOp(GT_INDEX, type, arr, ind)
, gtIndElemSize(indElemSize)
, gtStructElemClass(nullptr) // We always initialize this after construction.
{
#ifdef DEBUG
if (JitConfig.JitSkipArrayBoundCheck() == 1)
{
// Skip bounds check
}
else
#endif
{
// Do bounds check
gtFlags |= GTF_INX_RNGCHK;
}
gtFlags |= GTF_EXCEPT | GTF_GLOB_REF;
}
#if DEBUGGABLE_GENTREE
GenTreeIndex() : GenTreeOp()
{
}
#endif
};
// gtIndexAddr: given an array object and an index, checks that the index is within the bounds of the array if
// necessary and produces the address of the value at that index of the array.
struct GenTreeIndexAddr : public GenTreeOp
{
GenTree*& Arr()
{
return gtOp1;
}
GenTree*& Index()
{
return gtOp2;
}
CORINFO_CLASS_HANDLE gtStructElemClass; // If the element type is a struct, this is the struct type.
BasicBlock* gtIndRngFailBB; // Basic block to jump to for array-index-out-of-range
var_types gtElemType; // The element type of the array.
unsigned gtElemSize; // size of elements in the array
unsigned gtLenOffset; // The offset from the array's base address to its length.
unsigned gtElemOffset; // The offset from the array's base address to its first element.
GenTreeIndexAddr(GenTree* arr,
GenTree* ind,
var_types elemType,
CORINFO_CLASS_HANDLE structElemClass,
unsigned elemSize,
unsigned lenOffset,
unsigned elemOffset)
: GenTreeOp(GT_INDEX_ADDR, TYP_BYREF, arr, ind)
, gtStructElemClass(structElemClass)
, gtIndRngFailBB(nullptr)
, gtElemType(elemType)
, gtElemSize(elemSize)
, gtLenOffset(lenOffset)
, gtElemOffset(elemOffset)
{
#ifdef DEBUG
if (JitConfig.JitSkipArrayBoundCheck() == 1)
{
// Skip bounds check
}
else
#endif
{
// Do bounds check
gtFlags |= GTF_INX_RNGCHK;
}
gtFlags |= GTF_EXCEPT | GTF_GLOB_REF;
}
#if DEBUGGABLE_GENTREE
GenTreeIndexAddr() : GenTreeOp()
{
}
#endif
};
/* gtArrLen -- array length (GT_ARR_LENGTH)
GT_ARR_LENGTH is used for "arr.length" */
struct GenTreeArrLen : public GenTreeUnOp
{
GenTree*& ArrRef()
{
return gtOp1;
} // the array address node
private:
int gtArrLenOffset; // constant to add to "gtArrRef" to get the address of the array length.
public:
inline int ArrLenOffset()
{
return gtArrLenOffset;
}
GenTreeArrLen(var_types type, GenTree* arrRef, int lenOffset)
: GenTreeUnOp(GT_ARR_LENGTH, type, arrRef), gtArrLenOffset(lenOffset)
{
}
#if DEBUGGABLE_GENTREE
GenTreeArrLen() : GenTreeUnOp()
{
}
#endif
};
// This takes:
// - a length value
// - an index value, and
// - the label to jump to if the index is out of range.
// - the "kind" of the throw block to branch to on failure
// It generates no result.
//
struct GenTreeBoundsChk : public GenTreeOp
{
BasicBlock* gtIndRngFailBB; // Basic block to jump to for index-out-of-range
SpecialCodeKind gtThrowKind; // Kind of throw block to branch to on failure
GenTreeBoundsChk(GenTree* index, GenTree* length, SpecialCodeKind kind)
: GenTreeOp(GT_BOUNDS_CHECK, TYP_VOID, index, length), gtIndRngFailBB(nullptr), gtThrowKind(kind)
{
gtFlags |= GTF_EXCEPT;
}
#if DEBUGGABLE_GENTREE
GenTreeBoundsChk() : GenTreeOp()
{
}
#endif
// If this check is against GT_ARR_LENGTH, returns array reference, else "NULL".
GenTree* GetArray() const
{
return GetArrayLength()->OperIs(GT_ARR_LENGTH) ? GetArrayLength()->AsArrLen()->ArrRef() : nullptr;
}
// The index expression.
GenTree* GetIndex() const
{
return gtOp1;
}
// An expression for the length.
GenTree* GetArrayLength() const
{
return gtOp2;
}
};
// GenTreeArrElem - bounds checked address (byref) of a general array element,
// for multidimensional arrays, or 1-d arrays with non-zero lower bounds.
//
struct GenTreeArrElem : public GenTree
{
GenTree* gtArrObj;
#define GT_ARR_MAX_RANK 3
GenTree* gtArrInds[GT_ARR_MAX_RANK]; // Indices
unsigned char gtArrRank; // Rank of the array
unsigned char gtArrElemSize; // !!! Caution, this is an "unsigned char", it is used only
// on the optimization path of array intrisics.
// It stores the size of array elements WHEN it can fit
// into an "unsigned char".
// This has caused VSW 571394.
var_types gtArrElemType; // The array element type
// Requires that "inds" is a pointer to an array of "rank" nodes for the indices.
GenTreeArrElem(
var_types type, GenTree* arr, unsigned char rank, unsigned char elemSize, var_types elemType, GenTree** inds)
: GenTree(GT_ARR_ELEM, type), gtArrObj(arr), gtArrRank(rank), gtArrElemSize(elemSize), gtArrElemType(elemType)
{
gtFlags |= (arr->gtFlags & GTF_ALL_EFFECT);
for (unsigned char i = 0; i < rank; i++)
{
gtArrInds[i] = inds[i];
gtFlags |= (inds[i]->gtFlags & GTF_ALL_EFFECT);
}
gtFlags |= GTF_EXCEPT;
}
#if DEBUGGABLE_GENTREE
GenTreeArrElem() : GenTree()
{
}
#endif
};
//--------------------------------------------
//
// GenTreeArrIndex (gtArrIndex): Expression to bounds-check the index for one dimension of a
// multi-dimensional or non-zero-based array., and compute the effective index
// (i.e. subtracting the lower bound).
//
// Notes:
// This node is similar in some ways to GenTreeBoundsChk, which ONLY performs the check.
// The reason that this node incorporates the check into the effective index computation is
// to avoid duplicating the codegen, as the effective index is required to compute the
// offset anyway.
// TODO-CQ: Enable optimization of the lower bound and length by replacing this:
// /--* <arrObj>
// +--* <index0>
// +--* ArrIndex[i, ]
// with something like:
// /--* <arrObj>
// /--* ArrLowerBound[i, ]
// | /--* <arrObj>
// +--* ArrLen[i, ] (either generalize GT_ARR_LENGTH or add a new node)
// +--* <index0>
// +--* ArrIndex[i, ]
// Which could, for example, be optimized to the following when known to be within bounds:
// /--* TempForLowerBoundDim0
// +--* <index0>
// +--* - (GT_SUB)
//
struct GenTreeArrIndex : public GenTreeOp
{
// The array object - may be any expression producing an Array reference, but is likely to be a lclVar.
GenTree*& ArrObj()
{
return gtOp1;
}
// The index expression - may be any integral expression.
GenTree*& IndexExpr()
{
return gtOp2;
}
unsigned char gtCurrDim; // The current dimension
unsigned char gtArrRank; // Rank of the array
var_types gtArrElemType; // The array element type
GenTreeArrIndex(var_types type,
GenTree* arrObj,
GenTree* indexExpr,
unsigned char currDim,
unsigned char arrRank,
var_types elemType)
: GenTreeOp(GT_ARR_INDEX, type, arrObj, indexExpr)
, gtCurrDim(currDim)
, gtArrRank(arrRank)
, gtArrElemType(elemType)
{
gtFlags |= GTF_EXCEPT;
}
#if DEBUGGABLE_GENTREE
protected:
friend GenTree;
// Used only for GenTree::GetVtableForOper()
GenTreeArrIndex() : GenTreeOp()
{
}
#endif
};
//--------------------------------------------
//
// GenTreeArrOffset (gtArrOffset): Expression to compute the accumulated offset for the address
// of an element of a multi-dimensional or non-zero-based array.
//
// Notes:
// The result of this expression is (gtOffset * dimSize) + gtIndex
// where dimSize is the length/stride/size of the dimension, and is obtained from gtArrObj.
// This node is generated in conjunction with the GenTreeArrIndex node, which computes the
// effective index for a single dimension. The sub-trees can be separately optimized, e.g.
// within a loop body where the expression for the 0th dimension may be invariant.
//
// Here is an example of how the tree might look for a two-dimension array reference:
// /--* const 0
// | /--* <arrObj>
// | +--* <index0>
// +--* ArrIndex[i, ]
// +--* <arrObj>
// /--| arrOffs[i, ]
// | +--* <arrObj>
// | +--* <index1>
// +--* ArrIndex[*,j]
// +--* <arrObj>
// /--| arrOffs[*,j]
// TODO-CQ: see comment on GenTreeArrIndex for how its representation may change. When that
// is done, we will also want to replace the <arrObj> argument to arrOffs with the
// ArrLen as for GenTreeArrIndex.
//
struct GenTreeArrOffs : public GenTree
{
GenTree* gtOffset; // The accumulated offset for lower dimensions - must be TYP_I_IMPL, and
// will either be a CSE temp, the constant 0, or another GenTreeArrOffs node.
GenTree* gtIndex; // The effective index for the current dimension - must be non-negative
// and can be any expression (though it is likely to be either a GenTreeArrIndex,
// node, a lclVar, or a constant).
GenTree* gtArrObj; // The array object - may be any expression producing an Array reference,
// but is likely to be a lclVar.
unsigned char gtCurrDim; // The current dimension
unsigned char gtArrRank; // Rank of the array
var_types gtArrElemType; // The array element type
GenTreeArrOffs(var_types type,
GenTree* offset,
GenTree* index,
GenTree* arrObj,
unsigned char currDim,
unsigned char rank,
var_types elemType)
: GenTree(GT_ARR_OFFSET, type)
, gtOffset(offset)
, gtIndex(index)
, gtArrObj(arrObj)
, gtCurrDim(currDim)
, gtArrRank(rank)
, gtArrElemType(elemType)
{
assert(index->gtFlags & GTF_EXCEPT);
gtFlags |= GTF_EXCEPT;
}
#if DEBUGGABLE_GENTREE
GenTreeArrOffs() : GenTree()
{
}
#endif
};
/* gtAddrMode -- Target-specific canonicalized addressing expression (GT_LEA) */
struct GenTreeAddrMode : public GenTreeOp
{
// Address is Base + Index*Scale + Offset.
// These are the legal patterns:
//
// Base // Base != nullptr && Index == nullptr && Scale == 0 && Offset == 0
// Base + Index*Scale // Base != nullptr && Index != nullptr && Scale != 0 && Offset == 0
// Base + Offset // Base != nullptr && Index == nullptr && Scale == 0 && Offset != 0
// Base + Index*Scale + Offset // Base != nullptr && Index != nullptr && Scale != 0 && Offset != 0
// Index*Scale // Base == nullptr && Index != nullptr && Scale > 1 && Offset == 0
// Index*Scale + Offset // Base == nullptr && Index != nullptr && Scale > 1 && Offset != 0
// Offset // Base == nullptr && Index == nullptr && Scale == 0 && Offset != 0
//
// So, for example:
// 1. Base + Index is legal with Scale==1
// 2. If Index is null, Scale should be zero (or unintialized / unused)
// 3. If Scale==1, then we should have "Base" instead of "Index*Scale", and "Base + Offset" instead of
// "Index*Scale + Offset".
// First operand is base address/pointer
bool HasBase() const
{
return gtOp1 != nullptr;
}
GenTree*& Base()
{
return gtOp1;
}
void SetBase(GenTree* base)
{
gtOp1 = base;
}
// Second operand is scaled index value
bool HasIndex() const
{
return gtOp2 != nullptr;
}
GenTree*& Index()
{
return gtOp2;
}
void SetIndex(GenTree* index)
{
gtOp2 = index;
}
unsigned GetScale() const
{
return gtScale;
}
void SetScale(unsigned scale)
{
gtScale = scale;
}
int Offset()
{
return static_cast<int>(gtOffset);
}
void SetOffset(int offset)
{
gtOffset = offset;
}
unsigned gtScale; // The scale factor
private:
ssize_t gtOffset; // The offset to add
public:
GenTreeAddrMode(var_types type, GenTree* base, GenTree* index, unsigned scale, ssize_t offset)
: GenTreeOp(GT_LEA, type, base, index)
{
assert(base != nullptr || index != nullptr);
gtScale = scale;
gtOffset = offset;
}
#if DEBUGGABLE_GENTREE
protected:
friend GenTree;
// Used only for GenTree::GetVtableForOper()
GenTreeAddrMode() : GenTreeOp()
{
}
#endif
};
// Indir is just an op, no additional data, but some additional abstractions
struct GenTreeIndir : public GenTreeOp
{
// The address for the indirection.
GenTree*& Addr()
{
return gtOp1;
}
void SetAddr(GenTree* addr)
{
assert(addr != nullptr);
assert(addr->TypeIs(TYP_I_IMPL, TYP_BYREF));
gtOp1 = addr;
}
// these methods provide an interface to the indirection node which
bool HasBase();
bool HasIndex();
GenTree* Base();
GenTree* Index();
unsigned Scale();
ssize_t Offset();
GenTreeIndir(genTreeOps oper, var_types type, GenTree* addr, GenTree* data) : GenTreeOp(oper, type, addr, data)
{
}
// True if this indirection is a volatile memory operation.
bool IsVolatile() const
{
return (gtFlags & GTF_IND_VOLATILE) != 0;
}
// True if this indirection is an unaligned memory operation.
bool IsUnaligned() const
{
return (gtFlags & GTF_IND_UNALIGNED) != 0;
}
#if DEBUGGABLE_GENTREE
// Used only for GenTree::GetVtableForOper()
GenTreeIndir() : GenTreeOp()
{
}
#else
// Used by XARCH codegen to construct temporary trees to pass to the emitter.
GenTreeIndir() : GenTreeOp(GT_NOP, TYP_UNDEF)
{
}
#endif
};
// gtBlk -- 'block' (GT_BLK, GT_STORE_BLK).
//
// This is the base type for all of the nodes that represent block or struct
// values.
// Since it can be a store, it includes gtBlkOpKind to specify the type of
// code generation that will be used for the block operation.
struct GenTreeBlk : public GenTreeIndir
{
private:
ClassLayout* m_layout;
public:
ClassLayout* GetLayout() const
{
return m_layout;
}
void SetLayout(ClassLayout* layout)
{
assert((layout != nullptr) || OperIs(GT_STORE_DYN_BLK));
m_layout = layout;
}
// The data to be stored (null for GT_BLK)
GenTree*& Data()
{
return gtOp2;
}
void SetData(GenTree* dataNode)
{
gtOp2 = dataNode;
}
// The size of the buffer to be copied.
unsigned Size() const
{
assert((m_layout != nullptr) || OperIs(GT_STORE_DYN_BLK));
return (m_layout != nullptr) ? m_layout->GetSize() : 0;
}
// Instruction selection: during codegen time, what code sequence we will be using
// to encode this operation.
enum
{
BlkOpKindInvalid,
#ifndef TARGET_X86
BlkOpKindHelper,
#endif
#ifdef TARGET_XARCH
BlkOpKindRepInstr,
#endif
BlkOpKindUnroll,
} gtBlkOpKind;
#ifndef JIT32_GCENCODER
bool gtBlkOpGcUnsafe;
#endif
#ifdef TARGET_XARCH
bool IsOnHeapAndContainsReferences()
{
return (m_layout != nullptr) && m_layout->HasGCPtr() && !Addr()->OperIsLocalAddr();
}
#endif
GenTreeBlk(genTreeOps oper, var_types type, GenTree* addr, ClassLayout* layout)
: GenTreeIndir(oper, type, addr, nullptr)
, m_layout(layout)
, gtBlkOpKind(BlkOpKindInvalid)
#ifndef JIT32_GCENCODER
, gtBlkOpGcUnsafe(false)
#endif
{
assert(OperIsBlk(oper));
assert((layout != nullptr) || OperIs(GT_STORE_DYN_BLK));
gtFlags |= (addr->gtFlags & GTF_ALL_EFFECT);
}
GenTreeBlk(genTreeOps oper, var_types type, GenTree* addr, GenTree* data, ClassLayout* layout)
: GenTreeIndir(oper, type, addr, data)
, m_layout(layout)
, gtBlkOpKind(BlkOpKindInvalid)
#ifndef JIT32_GCENCODER
, gtBlkOpGcUnsafe(false)
#endif
{
assert(OperIsBlk(oper));
assert((layout != nullptr) || OperIs(GT_STORE_DYN_BLK));
gtFlags |= (addr->gtFlags & GTF_ALL_EFFECT);
gtFlags |= (data->gtFlags & GTF_ALL_EFFECT);
}
#if DEBUGGABLE_GENTREE
protected:
friend GenTree;
GenTreeBlk() : GenTreeIndir()
{
}
#endif // DEBUGGABLE_GENTREE
};
// gtObj -- 'object' (GT_OBJ).
//
// This node is used for block values that may have GC pointers.
struct GenTreeObj : public GenTreeBlk
{
void Init()
{
// By default, an OBJ is assumed to be a global reference, unless it is local.
GenTreeLclVarCommon* lcl = Addr()->IsLocalAddrExpr();
if ((lcl == nullptr) || ((lcl->gtFlags & GTF_GLOB_EFFECT) != 0))
{
gtFlags |= GTF_GLOB_REF;
}
noway_assert(GetLayout()->GetClassHandle() != NO_CLASS_HANDLE);
}
GenTreeObj(var_types type, GenTree* addr, ClassLayout* layout) : GenTreeBlk(GT_OBJ, type, addr, layout)
{
Init();
}
GenTreeObj(var_types type, GenTree* addr, GenTree* data, ClassLayout* layout)
: GenTreeBlk(GT_STORE_OBJ, type, addr, data, layout)
{
Init();
}
#if DEBUGGABLE_GENTREE
GenTreeObj() : GenTreeBlk()
{
}
#endif
};
// GenTreeStoreDynBlk -- 'dynamic block store' (GT_STORE_DYN_BLK).
//
// This node is used to represent stores that have a dynamic size - the "cpblk" and "initblk"
// IL instructions are implemented with it. Note that such stores assume the input has no GC
// pointers in it, and as such do not ever use write barriers.
//
// The "Data()" member of this node will either be a "dummy" IND(struct) node, for "cpblk", or
// the zero constant/INIT_VAL for "initblk".
//
struct GenTreeStoreDynBlk : public GenTreeBlk
{
public:
GenTree* gtDynamicSize;
GenTreeStoreDynBlk(GenTree* dstAddr, GenTree* data, GenTree* dynamicSize)
: GenTreeBlk(GT_STORE_DYN_BLK, TYP_VOID, dstAddr, data, nullptr), gtDynamicSize(dynamicSize)
{
// Conservatively the 'dstAddr' could be null or point into the global heap.
// Likewise, this is a store and so must be marked with the GTF_ASG flag.
gtFlags |= (GTF_ASG | GTF_EXCEPT | GTF_GLOB_REF);
gtFlags |= (dynamicSize->gtFlags & GTF_ALL_EFFECT);
}
#if DEBUGGABLE_GENTREE
protected:
friend GenTree;
GenTreeStoreDynBlk() : GenTreeBlk()
{
}
#endif // DEBUGGABLE_GENTREE
};
// Read-modify-write status of a RMW memory op rooted at a storeInd
enum RMWStatus
{
STOREIND_RMW_STATUS_UNKNOWN, // RMW status of storeInd unknown
// Default status unless modified by IsRMWMemOpRootedAtStoreInd()
// One of these denote storeind is a RMW memory operation.
STOREIND_RMW_DST_IS_OP1, // StoreInd is known to be a RMW memory op and dst candidate is op1
STOREIND_RMW_DST_IS_OP2, // StoreInd is known to be a RMW memory op and dst candidate is op2
// One of these denote the reason for storeind is marked as non-RMW operation
STOREIND_RMW_UNSUPPORTED_ADDR, // Addr mode is not yet supported for RMW memory
STOREIND_RMW_UNSUPPORTED_OPER, // Operation is not supported for RMW memory
STOREIND_RMW_UNSUPPORTED_TYPE, // Type is not supported for RMW memory
STOREIND_RMW_INDIR_UNEQUAL // Indir to read value is not equivalent to indir that writes the value
};
#ifdef DEBUG
inline const char* RMWStatusDescription(RMWStatus status)
{
switch (status)
{
case STOREIND_RMW_STATUS_UNKNOWN:
return "RMW status unknown";
case STOREIND_RMW_DST_IS_OP1:
return "dst candidate is op1";
case STOREIND_RMW_DST_IS_OP2:
return "dst candidate is op2";
case STOREIND_RMW_UNSUPPORTED_ADDR:
return "address mode is not supported";
case STOREIND_RMW_UNSUPPORTED_OPER:
return "oper is not supported";
case STOREIND_RMW_UNSUPPORTED_TYPE:
return "type is not supported";
case STOREIND_RMW_INDIR_UNEQUAL:
return "read indir is not equivalent to write indir";
default:
unreached();
}
}
#endif
// StoreInd is just a BinOp, with additional RMW status
struct GenTreeStoreInd : public GenTreeIndir
{
#if !CPU_LOAD_STORE_ARCH
// The below flag is set and used during lowering
RMWStatus gtRMWStatus;
bool IsRMWStatusUnknown()
{
return gtRMWStatus == STOREIND_RMW_STATUS_UNKNOWN;
}
bool IsNonRMWMemoryOp()
{
return gtRMWStatus == STOREIND_RMW_UNSUPPORTED_ADDR || gtRMWStatus == STOREIND_RMW_UNSUPPORTED_OPER ||
gtRMWStatus == STOREIND_RMW_UNSUPPORTED_TYPE || gtRMWStatus == STOREIND_RMW_INDIR_UNEQUAL;
}
bool IsRMWMemoryOp()
{
return gtRMWStatus == STOREIND_RMW_DST_IS_OP1 || gtRMWStatus == STOREIND_RMW_DST_IS_OP2;
}
bool IsRMWDstOp1()
{
return gtRMWStatus == STOREIND_RMW_DST_IS_OP1;
}
bool IsRMWDstOp2()
{
return gtRMWStatus == STOREIND_RMW_DST_IS_OP2;
}
#endif //! CPU_LOAD_STORE_ARCH
RMWStatus GetRMWStatus()
{
#if !CPU_LOAD_STORE_ARCH
return gtRMWStatus;
#else
return STOREIND_RMW_STATUS_UNKNOWN;
#endif
}
void SetRMWStatusDefault()
{
#if !CPU_LOAD_STORE_ARCH
gtRMWStatus = STOREIND_RMW_STATUS_UNKNOWN;
#endif
}
void SetRMWStatus(RMWStatus status)
{
#if !CPU_LOAD_STORE_ARCH
gtRMWStatus = status;
#endif
}
GenTree*& Data()
{
return gtOp2;
}
GenTreeStoreInd(var_types type, GenTree* destPtr, GenTree* data) : GenTreeIndir(GT_STOREIND, type, destPtr, data)
{
SetRMWStatusDefault();
}
#if DEBUGGABLE_GENTREE
protected:
friend GenTree;
// Used only for GenTree::GetVtableForOper()
GenTreeStoreInd() : GenTreeIndir()
{
SetRMWStatusDefault();
}
#endif
};
/* gtRetExp -- Place holder for the return expression from an inline candidate (GT_RET_EXPR) */
struct GenTreeRetExpr : public GenTree
{
GenTree* gtInlineCandidate;
BasicBlockFlags bbFlags;
CORINFO_CLASS_HANDLE gtRetClsHnd;
GenTreeRetExpr(var_types type) : GenTree(GT_RET_EXPR, type)
{
}
#if DEBUGGABLE_GENTREE
GenTreeRetExpr() : GenTree()
{
}
#endif
};
// In LIR there are no longer statements so debug information is inserted linearly using these nodes.
struct GenTreeILOffset : public GenTree
{
DebugInfo gtStmtDI; // debug info
#ifdef DEBUG
IL_OFFSET gtStmtLastILoffs; // instr offset at end of stmt
#endif
GenTreeILOffset(const DebugInfo& di DEBUGARG(IL_OFFSET lastOffset = BAD_IL_OFFSET))
: GenTree(GT_IL_OFFSET, TYP_VOID)
, gtStmtDI(di)
#ifdef DEBUG
, gtStmtLastILoffs(lastOffset)
#endif
{
}
#if DEBUGGABLE_GENTREE
GenTreeILOffset() : GenTree(GT_IL_OFFSET, TYP_VOID)
{
}
#endif
};
// GenTreeList: adapter class for forward iteration of the execution order GenTree linked list
// using range-based `for`, normally used via Statement::TreeList(), e.g.:
// for (GenTree* const tree : stmt->TreeList()) ...
//
class GenTreeList
{
GenTree* m_trees;
// Forward iterator for the execution order GenTree linked list (using `gtNext` pointer).
//
class iterator
{
GenTree* m_tree;
public:
iterator(GenTree* tree) : m_tree(tree)
{
}
GenTree* operator*() const
{
return m_tree;
}
iterator& operator++()
{
m_tree = m_tree->gtNext;
return *this;
}
bool operator!=(const iterator& i) const
{
return m_tree != i.m_tree;
}
};
public:
GenTreeList(GenTree* trees) : m_trees(trees)
{
}
iterator begin() const
{
return iterator(m_trees);
}
iterator end() const
{
return iterator(nullptr);
}
};
// We use the following format when printing the Statement number: Statement->GetID()
// This define is used with string concatenation to put this in printf format strings (Note that %u means unsigned int)
#define FMT_STMT "STMT%05u"
struct Statement
{
public:
Statement(GenTree* expr DEBUGARG(unsigned stmtID))
: m_rootNode(expr)
, m_treeList(nullptr)
, m_next(nullptr)
, m_prev(nullptr)
#ifdef DEBUG
, m_lastILOffset(BAD_IL_OFFSET)
, m_stmtID(stmtID)
#endif
{
}
GenTree* GetRootNode() const
{
return m_rootNode;
}
GenTree** GetRootNodePointer()
{
return &m_rootNode;
}
void SetRootNode(GenTree* treeRoot)
{
m_rootNode = treeRoot;
}
GenTree* GetTreeList() const
{
return m_treeList;
}
void SetTreeList(GenTree* treeHead)
{
m_treeList = treeHead;
}
// TreeList: convenience method for enabling range-based `for` iteration over the
// execution order of the GenTree linked list, e.g.:
// for (GenTree* const tree : stmt->TreeList()) ...
//
GenTreeList TreeList() const
{
return GenTreeList(GetTreeList());
}
const DebugInfo& GetDebugInfo() const
{
return m_debugInfo;
}
void SetDebugInfo(const DebugInfo& di)
{
m_debugInfo = di;
di.Validate();
}
#ifdef DEBUG
IL_OFFSET GetLastILOffset() const
{
return m_lastILOffset;
}
void SetLastILOffset(IL_OFFSET lastILOffset)
{
m_lastILOffset = lastILOffset;
}
unsigned GetID() const
{
return m_stmtID;
}
#endif // DEBUG
Statement* GetNextStmt() const
{
return m_next;
}
void SetNextStmt(Statement* nextStmt)
{
m_next = nextStmt;
}
Statement* GetPrevStmt() const
{
return m_prev;
}
void SetPrevStmt(Statement* prevStmt)
{
m_prev = prevStmt;
}
bool IsPhiDefnStmt() const
{
return m_rootNode->IsPhiDefn();
}
unsigned char GetCostSz() const
{
return m_rootNode->GetCostSz();
}
unsigned char GetCostEx() const
{
return m_rootNode->GetCostEx();
}
private:
// The root of the expression tree.
// Note: It will be the last node in evaluation order.
GenTree* m_rootNode;
// The tree list head (for forward walks in evaluation order).
// The value is `nullptr` until we have set the sequencing of the nodes.
GenTree* m_treeList;
// The statement nodes are doubly-linked. The first statement node in a block points
// to the last node in the block via its `m_prev` link. Note that the last statement node
// does not point to the first: it has `m_next == nullptr`; that is, the list is not fully circular.
Statement* m_next;
Statement* m_prev;
DebugInfo m_debugInfo;
#ifdef DEBUG
IL_OFFSET m_lastILOffset; // The instr offset at the end of this statement.
unsigned m_stmtID;
#endif
};
// StatementList: adapter class for forward iteration of the statement linked list using range-based `for`,
// normally used via BasicBlock::Statements(), e.g.:
// for (Statement* const stmt : block->Statements()) ...
// or:
// for (Statement* const stmt : block->NonPhiStatements()) ...
//
class StatementList
{
Statement* m_stmts;
// Forward iterator for the statement linked list.
//
class iterator
{
Statement* m_stmt;
public:
iterator(Statement* stmt) : m_stmt(stmt)
{
}
Statement* operator*() const
{
return m_stmt;
}
iterator& operator++()
{
m_stmt = m_stmt->GetNextStmt();
return *this;
}
bool operator!=(const iterator& i) const
{
return m_stmt != i.m_stmt;
}
};
public:
StatementList(Statement* stmts) : m_stmts(stmts)
{
}
iterator begin() const
{
return iterator(m_stmts);
}
iterator end() const
{
return iterator(nullptr);
}
};
/* NOTE: Any tree nodes that are larger than 8 bytes (two ints or
pointers) must be flagged as 'large' in GenTree::InitNodeSize().
*/
/* AsClsVar() -- 'static data member' (GT_CLS_VAR) */
struct GenTreeClsVar : public GenTree
{
CORINFO_FIELD_HANDLE gtClsVarHnd;
FieldSeqNode* gtFieldSeq;
GenTreeClsVar(var_types type, CORINFO_FIELD_HANDLE clsVarHnd, FieldSeqNode* fldSeq)
: GenTree(GT_CLS_VAR, type), gtClsVarHnd(clsVarHnd), gtFieldSeq(fldSeq)
{
gtFlags |= GTF_GLOB_REF;
}
GenTreeClsVar(genTreeOps oper, var_types type, CORINFO_FIELD_HANDLE clsVarHnd, FieldSeqNode* fldSeq)
: GenTree(oper, type), gtClsVarHnd(clsVarHnd), gtFieldSeq(fldSeq)
{
assert((oper == GT_CLS_VAR) || (oper == GT_CLS_VAR_ADDR));
gtFlags |= GTF_GLOB_REF;
}
#if DEBUGGABLE_GENTREE
GenTreeClsVar() : GenTree()
{
}
#endif
};
/* gtArgPlace -- 'register argument placeholder' (GT_ARGPLACE) */
struct GenTreeArgPlace : public GenTree
{
CORINFO_CLASS_HANDLE gtArgPlaceClsHnd; // Needed when we have a TYP_STRUCT argument
GenTreeArgPlace(var_types type, CORINFO_CLASS_HANDLE clsHnd) : GenTree(GT_ARGPLACE, type), gtArgPlaceClsHnd(clsHnd)
{
}
#if DEBUGGABLE_GENTREE
GenTreeArgPlace() : GenTree()
{
}
#endif
};
/* gtPhiArg -- phi node rhs argument, var = phi(phiarg, phiarg, phiarg...); GT_PHI_ARG */
struct GenTreePhiArg : public GenTreeLclVarCommon
{
BasicBlock* gtPredBB;
GenTreePhiArg(var_types type, unsigned lclNum, unsigned ssaNum, BasicBlock* block)
: GenTreeLclVarCommon(GT_PHI_ARG, type, lclNum), gtPredBB(block)
{
SetSsaNum(ssaNum);
}
#if DEBUGGABLE_GENTREE
GenTreePhiArg() : GenTreeLclVarCommon()
{
}
#endif
};
/* gtPutArgStk -- Argument passed on stack (GT_PUTARG_STK) */
struct GenTreePutArgStk : public GenTreeUnOp
{
private:
unsigned m_byteOffset;
#ifdef FEATURE_PUT_STRUCT_ARG_STK
unsigned m_byteSize; // The number of bytes that this argument is occupying on the stack with padding.
#endif
public:
#if defined(DEBUG_ARG_SLOTS)
unsigned gtSlotNum; // Slot number of the argument to be passed on stack
#if defined(FEATURE_PUT_STRUCT_ARG_STK)
unsigned gtNumSlots; // Number of slots for the argument to be passed on stack
#endif
#endif
#if defined(UNIX_X86_ABI)
unsigned gtPadAlign; // Number of padding slots for stack alignment
#endif
#if defined(DEBUG) || defined(UNIX_X86_ABI)
GenTreeCall* gtCall; // the call node to which this argument belongs
#endif
#if FEATURE_FASTTAILCALL
bool gtPutInIncomingArgArea; // Whether this arg needs to be placed in incoming arg area.
// By default this is false and will be placed in out-going arg area.
// Fast tail calls set this to true.
// In future if we need to add more such bool fields consider bit fields.
#endif
#ifdef FEATURE_PUT_STRUCT_ARG_STK
// Instruction selection: during codegen time, what code sequence we will be using
// to encode this operation.
// TODO-Throughput: The following information should be obtained from the child
// block node.
enum class Kind : __int8{
Invalid, RepInstr, PartialRepInstr, Unroll, Push, PushAllSlots,
};
Kind gtPutArgStkKind;
#endif
GenTreePutArgStk(genTreeOps oper,
var_types type,
GenTree* op1,
unsigned stackByteOffset,
#if defined(FEATURE_PUT_STRUCT_ARG_STK)
unsigned stackByteSize,
#endif
#if defined(DEBUG_ARG_SLOTS)
unsigned slotNum,
#if defined(FEATURE_PUT_STRUCT_ARG_STK)
unsigned numSlots,
#endif
#endif
GenTreeCall* callNode,
bool putInIncomingArgArea)
: GenTreeUnOp(oper, type, op1 DEBUGARG(/*largeNode*/ false))
, m_byteOffset(stackByteOffset)
#if defined(FEATURE_PUT_STRUCT_ARG_STK)
, m_byteSize(stackByteSize)
#endif
#if defined(DEBUG_ARG_SLOTS)
, gtSlotNum(slotNum)
#if defined(FEATURE_PUT_STRUCT_ARG_STK)
, gtNumSlots(numSlots)
#endif
#endif
#if defined(UNIX_X86_ABI)
, gtPadAlign(0)
#endif
#if defined(DEBUG) || defined(UNIX_X86_ABI)
, gtCall(callNode)
#endif
#if FEATURE_FASTTAILCALL
, gtPutInIncomingArgArea(putInIncomingArgArea)
#endif // FEATURE_FASTTAILCALL
#if defined(FEATURE_PUT_STRUCT_ARG_STK)
, gtPutArgStkKind(Kind::Invalid)
#endif
{
DEBUG_ARG_SLOTS_ASSERT(m_byteOffset == slotNum * TARGET_POINTER_SIZE);
#if defined(FEATURE_PUT_STRUCT_ARG_STK)
DEBUG_ARG_SLOTS_ASSERT(m_byteSize == gtNumSlots * TARGET_POINTER_SIZE);
#endif
}
GenTree*& Data()
{
return gtOp1;
}
#if FEATURE_FASTTAILCALL
bool putInIncomingArgArea() const
{
return gtPutInIncomingArgArea;
}
#else // !FEATURE_FASTTAILCALL
bool putInIncomingArgArea() const
{
return false;
}
#endif // !FEATURE_FASTTAILCALL
unsigned getArgOffset() const
{
DEBUG_ARG_SLOTS_ASSERT(m_byteOffset / TARGET_POINTER_SIZE == gtSlotNum);
DEBUG_ARG_SLOTS_ASSERT(m_byteOffset % TARGET_POINTER_SIZE == 0);
return m_byteOffset;
}
#if defined(UNIX_X86_ABI)
unsigned getArgPadding() const
{
return gtPadAlign;
}
void setArgPadding(unsigned padAlign)
{
gtPadAlign = padAlign;
}
#endif
#ifdef FEATURE_PUT_STRUCT_ARG_STK
unsigned GetStackByteSize() const
{
return m_byteSize;
}
// Return true if this is a PutArgStk of a SIMD12 struct.
// This is needed because such values are re-typed to SIMD16, and the type of PutArgStk is VOID.
unsigned isSIMD12() const
{
return (varTypeIsSIMD(gtOp1) && (GetStackByteSize() == 12));
}
bool isPushKind() const
{
return (gtPutArgStkKind == Kind::Push) || (gtPutArgStkKind == Kind::PushAllSlots);
}
#else // !FEATURE_PUT_STRUCT_ARG_STK
unsigned GetStackByteSize() const;
#endif // !FEATURE_PUT_STRUCT_ARG_STK
#if DEBUGGABLE_GENTREE
GenTreePutArgStk() : GenTreeUnOp()
{
}
#endif
};
#if FEATURE_ARG_SPLIT
// Represent the struct argument: split value in register(s) and stack
struct GenTreePutArgSplit : public GenTreePutArgStk
{
unsigned gtNumRegs;
GenTreePutArgSplit(GenTree* op1,
unsigned stackByteOffset,
#if defined(FEATURE_PUT_STRUCT_ARG_STK)
unsigned stackByteSize,
#endif
#if defined(DEBUG_ARG_SLOTS)
unsigned slotNum,
#if defined(FEATURE_PUT_STRUCT_ARG_STK)
unsigned numSlots,
#endif
#endif
unsigned numRegs,
GenTreeCall* callNode,
bool putIncomingArgArea)
: GenTreePutArgStk(GT_PUTARG_SPLIT,
TYP_STRUCT,
op1,
stackByteOffset,
#if defined(FEATURE_PUT_STRUCT_ARG_STK)
stackByteSize,
#endif
#if defined(DEBUG_ARG_SLOTS)
slotNum,
#if defined(FEATURE_PUT_STRUCT_ARG_STK)
numSlots,
#endif
#endif
callNode,
putIncomingArgArea)
, gtNumRegs(numRegs)
{
ClearOtherRegs();
ClearOtherRegFlags();
}
// Type required to support multi-reg struct arg.
var_types m_regType[MAX_REG_ARG];
// First reg of struct is always given by GetRegNum().
// gtOtherRegs holds the other reg numbers of struct.
regNumberSmall gtOtherRegs[MAX_REG_ARG - 1];
MultiRegSpillFlags gtSpillFlags;
//---------------------------------------------------------------------------
// GetRegNumByIdx: get i'th register allocated to this struct argument.
//
// Arguments:
// idx - index of the struct
//
// Return Value:
// Return regNumber of i'th register of this struct argument
//
regNumber GetRegNumByIdx(unsigned idx) const
{
assert(idx < MAX_REG_ARG);
if (idx == 0)
{
return GetRegNum();
}
return (regNumber)gtOtherRegs[idx - 1];
}
//----------------------------------------------------------------------
// SetRegNumByIdx: set i'th register of this struct argument
//
// Arguments:
// reg - reg number
// idx - index of the struct
//
// Return Value:
// None
//
void SetRegNumByIdx(regNumber reg, unsigned idx)
{
assert(idx < MAX_REG_ARG);
if (idx == 0)
{
SetRegNum(reg);
}
else
{
gtOtherRegs[idx - 1] = (regNumberSmall)reg;
assert(gtOtherRegs[idx - 1] == reg);
}
}
//----------------------------------------------------------------------------
// ClearOtherRegs: clear multi-reg state to indicate no regs are allocated
//
// Arguments:
// None
//
// Return Value:
// None
//
void ClearOtherRegs()
{
for (unsigned i = 0; i < MAX_REG_ARG - 1; ++i)
{
gtOtherRegs[i] = REG_NA;
}
}
GenTreeFlags GetRegSpillFlagByIdx(unsigned idx) const
{
return GetMultiRegSpillFlagsByIdx(gtSpillFlags, idx);
}
void SetRegSpillFlagByIdx(GenTreeFlags flags, unsigned idx)
{
#if FEATURE_MULTIREG_RET
gtSpillFlags = SetMultiRegSpillFlagsByIdx(gtSpillFlags, flags, idx);
#endif
}
//--------------------------------------------------------------------------
// GetRegType: Get var_type of the register specified by index.
//
// Arguments:
// index - Index of the register.
// First register will have an index 0 and so on.
//
// Return Value:
// var_type of the register specified by its index.
var_types GetRegType(unsigned index) const
{
assert(index < gtNumRegs);
var_types result = m_regType[index];
return result;
}
//-------------------------------------------------------------------
// clearOtherRegFlags: clear GTF_* flags associated with gtOtherRegs
//
// Arguments:
// None
//
// Return Value:
// None
//
void ClearOtherRegFlags()
{
gtSpillFlags = 0;
}
#if DEBUGGABLE_GENTREE
GenTreePutArgSplit() : GenTreePutArgStk()
{
}
#endif
};
#endif // FEATURE_ARG_SPLIT
// Represents GT_COPY or GT_RELOAD node
//
// As it turns out, these are only needed on targets that happen to have multi-reg returns.
// However, they are actually needed on any target that has any multi-reg ops. It is just
// coincidence that those are the same (and there isn't a FEATURE_MULTIREG_OPS).
//
struct GenTreeCopyOrReload : public GenTreeUnOp
{
#if FEATURE_MULTIREG_RET
// State required to support copy/reload of a multi-reg call node.
// The first register is always given by GetRegNum().
//
regNumberSmall gtOtherRegs[MAX_RET_REG_COUNT - 1];
#endif
//----------------------------------------------------------
// ClearOtherRegs: set gtOtherRegs to REG_NA.
//
// Arguments:
// None
//
// Return Value:
// None
//
void ClearOtherRegs()
{
#if FEATURE_MULTIREG_RET
for (unsigned i = 0; i < MAX_RET_REG_COUNT - 1; ++i)
{
gtOtherRegs[i] = REG_NA;
}
#endif
}
//-----------------------------------------------------------
// GetRegNumByIdx: Get regNumber of i'th position.
//
// Arguments:
// idx - register position.
//
// Return Value:
// Returns regNumber assigned to i'th position.
//
regNumber GetRegNumByIdx(unsigned idx) const
{
assert(idx < MAX_RET_REG_COUNT);
if (idx == 0)
{
return GetRegNum();
}
#if FEATURE_MULTIREG_RET
return (regNumber)gtOtherRegs[idx - 1];
#else
return REG_NA;
#endif
}
//-----------------------------------------------------------
// SetRegNumByIdx: Set the regNumber for i'th position.
//
// Arguments:
// reg - reg number
// idx - register position.
//
// Return Value:
// None.
//
void SetRegNumByIdx(regNumber reg, unsigned idx)
{
assert(idx < MAX_RET_REG_COUNT);
if (idx == 0)
{
SetRegNum(reg);
}
#if FEATURE_MULTIREG_RET
else
{
gtOtherRegs[idx - 1] = (regNumberSmall)reg;
assert(gtOtherRegs[idx - 1] == reg);
}
#else
else
{
unreached();
}
#endif
}
//----------------------------------------------------------------------------
// CopyOtherRegs: copy multi-reg state from the given copy/reload node to this
// node.
//
// Arguments:
// from - GenTree node from which to copy multi-reg state
//
// Return Value:
// None
//
// TODO-ARM: Implement this routine for Arm64 and Arm32
// TODO-X86: Implement this routine for x86
void CopyOtherRegs(GenTreeCopyOrReload* from)
{
assert(OperGet() == from->OperGet());
#ifdef UNIX_AMD64_ABI
for (unsigned i = 0; i < MAX_RET_REG_COUNT - 1; ++i)
{
gtOtherRegs[i] = from->gtOtherRegs[i];
}
#endif
}
unsigned GetRegCount() const
{
#if FEATURE_MULTIREG_RET
// We need to return the highest index for which we have a valid register.
// Note that the gtOtherRegs array is off by one (the 0th register is GetRegNum()).
// If there's no valid register in gtOtherRegs, GetRegNum() must be valid.
// Note that for most nodes, the set of valid registers must be contiguous,
// but for COPY or RELOAD there is only a valid register for the register positions
// that must be copied or reloaded.
//
for (unsigned i = MAX_RET_REG_COUNT; i > 1; i--)
{
if (gtOtherRegs[i - 2] != REG_NA)
{
return i;
}
}
#endif
// We should never have a COPY or RELOAD with no valid registers.
assert(GetRegNum() != REG_NA);
return 1;
}
GenTreeCopyOrReload(genTreeOps oper, var_types type, GenTree* op1) : GenTreeUnOp(oper, type, op1)
{
assert(type != TYP_STRUCT || op1->IsMultiRegNode());
SetRegNum(REG_NA);
ClearOtherRegs();
}
#if DEBUGGABLE_GENTREE
GenTreeCopyOrReload() : GenTreeUnOp()
{
}
#endif
};
// Represents GT_ALLOCOBJ node
struct GenTreeAllocObj final : public GenTreeUnOp
{
unsigned int gtNewHelper; // Value returned by ICorJitInfo::getNewHelper
bool gtHelperHasSideEffects;
CORINFO_CLASS_HANDLE gtAllocObjClsHnd;
#ifdef FEATURE_READYTORUN
CORINFO_CONST_LOOKUP gtEntryPoint;
#endif
GenTreeAllocObj(
var_types type, unsigned int helper, bool helperHasSideEffects, CORINFO_CLASS_HANDLE clsHnd, GenTree* op)
: GenTreeUnOp(GT_ALLOCOBJ, type, op DEBUGARG(/*largeNode*/ TRUE))
, // This node in most cases will be changed to a call node
gtNewHelper(helper)
, gtHelperHasSideEffects(helperHasSideEffects)
, gtAllocObjClsHnd(clsHnd)
{
#ifdef FEATURE_READYTORUN
gtEntryPoint.addr = nullptr;
#endif
}
#if DEBUGGABLE_GENTREE
GenTreeAllocObj() : GenTreeUnOp()
{
}
#endif
};
// Represents GT_RUNTIMELOOKUP node
struct GenTreeRuntimeLookup final : public GenTreeUnOp
{
CORINFO_GENERIC_HANDLE gtHnd;
CorInfoGenericHandleType gtHndType;
GenTreeRuntimeLookup(CORINFO_GENERIC_HANDLE hnd, CorInfoGenericHandleType hndTyp, GenTree* tree)
: GenTreeUnOp(GT_RUNTIMELOOKUP, tree->gtType, tree DEBUGARG(/*largeNode*/ FALSE)), gtHnd(hnd), gtHndType(hndTyp)
{
assert(hnd != nullptr);
}
#if DEBUGGABLE_GENTREE
GenTreeRuntimeLookup() : GenTreeUnOp()
{
}
#endif
// Return reference to the actual tree that does the lookup
GenTree*& Lookup()
{
return gtOp1;
}
bool IsClassHandle() const
{
return gtHndType == CORINFO_HANDLETYPE_CLASS;
}
bool IsMethodHandle() const
{
return gtHndType == CORINFO_HANDLETYPE_METHOD;
}
bool IsFieldHandle() const
{
return gtHndType == CORINFO_HANDLETYPE_FIELD;
}
// Note these operations describe the handle that is input to the
// lookup, not the handle produced by the lookup.
CORINFO_CLASS_HANDLE GetClassHandle() const
{
assert(IsClassHandle());
return (CORINFO_CLASS_HANDLE)gtHnd;
}
CORINFO_METHOD_HANDLE GetMethodHandle() const
{
assert(IsMethodHandle());
return (CORINFO_METHOD_HANDLE)gtHnd;
}
CORINFO_FIELD_HANDLE GetFieldHandle() const
{
assert(IsMethodHandle());
return (CORINFO_FIELD_HANDLE)gtHnd;
}
};
// Represents the condition of a GT_JCC or GT_SETCC node.
struct GenCondition
{
// clang-format off
enum Code : unsigned char
{
OperMask = 7,
Unsigned = 8,
Unordered = Unsigned,
Float = 16,
// 0 would be the encoding of "signed EQ" but since equality is sign insensitive
// we'll use 0 as invalid/uninitialized condition code. This will also leave 1
// as a spare code.
NONE = 0,
SLT = 2,
SLE = 3,
SGE = 4,
SGT = 5,
S = 6,
NS = 7,
EQ = Unsigned | 0, // = 8
NE = Unsigned | 1, // = 9
ULT = Unsigned | SLT, // = 10
ULE = Unsigned | SLE, // = 11
UGE = Unsigned | SGE, // = 12
UGT = Unsigned | SGT, // = 13
C = Unsigned | S, // = 14
NC = Unsigned | NS, // = 15
FEQ = Float | 0, // = 16
FNE = Float | 1, // = 17
FLT = Float | SLT, // = 18
FLE = Float | SLE, // = 19
FGE = Float | SGE, // = 20
FGT = Float | SGT, // = 21
O = Float | S, // = 22
NO = Float | NS, // = 23
FEQU = Unordered | FEQ, // = 24
FNEU = Unordered | FNE, // = 25
FLTU = Unordered | FLT, // = 26
FLEU = Unordered | FLE, // = 27
FGEU = Unordered | FGE, // = 28
FGTU = Unordered | FGT, // = 29
P = Unordered | O, // = 30
NP = Unordered | NO, // = 31
};
// clang-format on
private:
Code m_code;
public:
Code GetCode() const
{
return m_code;
}
bool IsFlag() const
{
return (m_code & OperMask) >= S;
}
bool IsUnsigned() const
{
return (ULT <= m_code) && (m_code <= UGT);
}
bool IsFloat() const
{
return !IsFlag() && (m_code & Float) != 0;
}
bool IsUnordered() const
{
return !IsFlag() && (m_code & (Float | Unordered)) == (Float | Unordered);
}
bool Is(Code cond) const
{
return m_code == cond;
}
template <typename... TRest>
bool Is(Code c, TRest... rest) const
{
return Is(c) || Is(rest...);
}
// Indicate whether the condition should be swapped in order to avoid generating
// multiple branches. This happens for certain floating point conditions on XARCH,
// see GenConditionDesc and its associated mapping table for more details.
bool PreferSwap() const
{
#ifdef TARGET_XARCH
return Is(GenCondition::FLT, GenCondition::FLE, GenCondition::FGTU, GenCondition::FGEU);
#else
return false;
#endif
}
const char* Name() const
{
// clang-format off
static const char* names[]
{
"NONE", "???", "SLT", "SLE", "SGE", "SGT", "S", "NS",
"UEQ", "UNE", "ULT", "ULE", "UGE", "UGT", "C", "NC",
"FEQ", "FNE", "FLT", "FLE", "FGE", "FGT", "O", "NO",
"FEQU", "FNEU", "FLTU", "FLEU", "FGEU", "FGTU", "P", "NP"
};
// clang-format on
assert(m_code < ArrLen(names));
return names[m_code];
}
GenCondition() : m_code()
{
}
GenCondition(Code cond) : m_code(cond)
{
}
static_assert((GT_NE - GT_EQ) == (NE & ~Unsigned), "bad relop");
static_assert((GT_LT - GT_EQ) == SLT, "bad relop");
static_assert((GT_LE - GT_EQ) == SLE, "bad relop");
static_assert((GT_GE - GT_EQ) == SGE, "bad relop");
static_assert((GT_GT - GT_EQ) == SGT, "bad relop");
static_assert((GT_TEST_NE - GT_TEST_EQ) == (NE & ~Unsigned), "bad relop");
static GenCondition FromRelop(GenTree* relop)
{
assert(relop->OperIsCompare());
if (varTypeIsFloating(relop->gtGetOp1()))
{
return FromFloatRelop(relop);
}
else
{
return FromIntegralRelop(relop);
}
}
static GenCondition FromFloatRelop(GenTree* relop)
{
assert(varTypeIsFloating(relop->gtGetOp1()) && varTypeIsFloating(relop->gtGetOp2()));
return FromFloatRelop(relop->OperGet(), (relop->gtFlags & GTF_RELOP_NAN_UN) != 0);
}
static GenCondition FromFloatRelop(genTreeOps oper, bool isUnordered)
{
assert(GenTree::OperIsCompare(oper));
unsigned code = oper - GT_EQ;
assert(code <= SGT);
code |= Float;
if (isUnordered)
{
code |= Unordered;
}
return GenCondition(static_cast<Code>(code));
}
static GenCondition FromIntegralRelop(GenTree* relop)
{
assert(!varTypeIsFloating(relop->gtGetOp1()) && !varTypeIsFloating(relop->gtGetOp2()));
return FromIntegralRelop(relop->OperGet(), relop->IsUnsigned());
}
static GenCondition FromIntegralRelop(genTreeOps oper, bool isUnsigned)
{
assert(GenTree::OperIsCompare(oper));
// GT_TEST_EQ/NE are special, they need to be mapped as GT_EQ/NE
unsigned code = oper - ((oper >= GT_TEST_EQ) ? GT_TEST_EQ : GT_EQ);
if (isUnsigned || (code <= 1)) // EQ/NE are treated as unsigned
{
code |= Unsigned;
}
return GenCondition(static_cast<Code>(code));
}
static GenCondition Reverse(GenCondition condition)
{
// clang-format off
static const Code reverse[]
{
// EQ NE LT LE GE GT F NF
NONE, NONE, SGE, SGT, SLT, SLE, NS, S,
NE, EQ, UGE, UGT, ULT, ULE, NC, C,
FNEU, FEQU, FGEU, FGTU, FLTU, FLEU, NO, O,
FNE, FEQ, FGE, FGT, FLT, FGT, NP, P
};
// clang-format on
assert(condition.m_code < ArrLen(reverse));
return GenCondition(reverse[condition.m_code]);
}
static GenCondition Swap(GenCondition condition)
{
// clang-format off
static const Code swap[]
{
// EQ NE LT LE GE GT F NF
NONE, NONE, SGT, SGE, SLE, SLT, S, NS,
EQ, NE, UGT, UGE, ULE, ULT, C, NC,
FEQ, FNE, FGT, FGE, FLE, FLT, O, NO,
FEQU, FNEU, FGTU, FGEU, FLEU, FLTU, P, NP
};
// clang-format on
assert(condition.m_code < ArrLen(swap));
return GenCondition(swap[condition.m_code]);
}
};
// Represents a GT_JCC or GT_SETCC node.
struct GenTreeCC final : public GenTree
{
GenCondition gtCondition;
GenTreeCC(genTreeOps oper, GenCondition condition, var_types type = TYP_VOID)
: GenTree(oper, type DEBUGARG(/*largeNode*/ FALSE)), gtCondition(condition)
{
assert(OperIs(GT_JCC, GT_SETCC));
}
#if DEBUGGABLE_GENTREE
GenTreeCC() : GenTree()
{
}
#endif // DEBUGGABLE_GENTREE
};
//------------------------------------------------------------------------
// Deferred inline functions of GenTree -- these need the subtypes above to
// be defined already.
//------------------------------------------------------------------------
inline bool GenTree::OperIsBlkOp()
{
return ((gtOper == GT_ASG) && varTypeIsStruct(AsOp()->gtOp1)) || OperIsStoreBlk();
}
inline bool GenTree::OperIsInitBlkOp()
{
if (!OperIsBlkOp())
{
return false;
}
GenTree* src;
if (gtOper == GT_ASG)
{
src = gtGetOp2();
}
else
{
src = AsBlk()->Data()->gtSkipReloadOrCopy();
}
return src->OperIsInitVal() || src->OperIsConst();
}
inline bool GenTree::OperIsCopyBlkOp()
{
return OperIsBlkOp() && !OperIsInitBlkOp();
}
//------------------------------------------------------------------------
// IsFPZero: Checks whether this is a floating point constant with value 0.0
//
// Return Value:
// Returns true iff the tree is an GT_CNS_DBL, with value of 0.0.
inline bool GenTree::IsFPZero() const
{
if ((gtOper == GT_CNS_DBL) && (AsDblCon()->gtDconVal == 0.0))
{
return true;
}
return false;
}
//------------------------------------------------------------------------
// IsIntegralConst: Checks whether this is a constant node with the given value
//
// Arguments:
// constVal - the value of interest
//
// Return Value:
// Returns true iff the tree is an integral constant opcode, with
// the given value.
//
// Notes:
// Like gtIconVal, the argument is of ssize_t, so cannot check for
// long constants in a target-independent way.
inline bool GenTree::IsIntegralConst(ssize_t constVal) const
{
if ((gtOper == GT_CNS_INT) && (AsIntConCommon()->IconValue() == constVal))
{
return true;
}
if ((gtOper == GT_CNS_LNG) && (AsIntConCommon()->LngValue() == constVal))
{
return true;
}
return false;
}
//-------------------------------------------------------------------
// IsIntegralConstVector: returns true if this this is a SIMD vector
// with all its elements equal to an integral constant.
//
// Arguments:
// constVal - const value of vector element
//
// Returns:
// True if this represents an integral const SIMD vector.
//
inline bool GenTree::IsIntegralConstVector(ssize_t constVal) const
{
#ifdef FEATURE_SIMD
// SIMDIntrinsicInit intrinsic with a const value as initializer
// represents a const vector.
if ((gtOper == GT_SIMD) && (AsSIMD()->GetSIMDIntrinsicId() == SIMDIntrinsicInit) &&
AsSIMD()->Op(1)->IsIntegralConst(constVal))
{
assert(varTypeIsIntegral(AsSIMD()->GetSimdBaseType()));
assert(AsSIMD()->GetOperandCount() == 1);
return true;
}
#endif // FEATURE_SIMD
#ifdef FEATURE_HW_INTRINSICS
if (gtOper == GT_HWINTRINSIC)
{
const GenTreeHWIntrinsic* node = AsHWIntrinsic();
if (!varTypeIsIntegral(node->GetSimdBaseType()))
{
// Can't be an integral constant
return false;
}
NamedIntrinsic intrinsicId = node->GetHWIntrinsicId();
if ((node->GetOperandCount() == 0) && (constVal == 0))
{
#if defined(TARGET_XARCH)
return (intrinsicId == NI_Vector128_get_Zero) || (intrinsicId == NI_Vector256_get_Zero);
#elif defined(TARGET_ARM64)
return (intrinsicId == NI_Vector64_get_Zero) || (intrinsicId == NI_Vector128_get_Zero);
#endif // !TARGET_XARCH && !TARGET_ARM64
}
else if ((node->GetOperandCount() == 1) && node->Op(1)->IsIntegralConst(constVal))
{
#if defined(TARGET_XARCH)
return (intrinsicId == NI_Vector128_Create) || (intrinsicId == NI_Vector256_Create);
#elif defined(TARGET_ARM64)
return (intrinsicId == NI_Vector64_Create) || (intrinsicId == NI_Vector128_Create);
#endif // !TARGET_XARCH && !TARGET_ARM64
}
}
#endif // FEATURE_HW_INTRINSICS
return false;
}
//-------------------------------------------------------------------
// IsSIMDZero: returns true if this this is a SIMD vector
// with all its elements equal to zero.
//
// Returns:
// True if this represents an integral const SIMD vector.
//
inline bool GenTree::IsSIMDZero() const
{
#ifdef FEATURE_SIMD
if ((gtOper == GT_SIMD) && (AsSIMD()->GetSIMDIntrinsicId() == SIMDIntrinsicInit))
{
return (AsSIMD()->Op(1)->IsIntegralConst(0) || AsSIMD()->Op(1)->IsFPZero());
}
#endif
return false;
}
//-------------------------------------------------------------------
// IsFloatPositiveZero: returns true if this is exactly a const float value of postive zero (+0.0)
//
// Returns:
// True if this represents a const floating-point value of exactly positive zero (+0.0).
// Will return false if the value is negative zero (-0.0).
//
inline bool GenTree::IsFloatPositiveZero() const
{
if (IsCnsFltOrDbl())
{
// This implementation is almost identical to IsCnsNonZeroFltOrDbl
// but it is easier to parse out
// rather than using !IsCnsNonZeroFltOrDbl.
double constValue = AsDblCon()->gtDconVal;
return *(__int64*)&constValue == 0;
}
return false;
}
//-------------------------------------------------------------------
// IsVectorZero: returns true if this node is a HWIntrinsic that is Vector*_get_Zero.
//
// Returns:
// True if this represents a HWIntrinsic node that is Vector*_get_Zero.
//
// TODO: We already have IsSIMDZero() and IsIntegralConstVector(0),
// however, IsSIMDZero() does not cover hardware intrinsics, and IsIntegralConstVector(0) does not cover floating
// point. In order to not risk adverse behaviour by modifying those, this function 'IsVectorZero' was introduced.
// At some point, it makes sense to normalize this logic to be a single function call rather than have several
// separate ones; preferably this one.
inline bool GenTree::IsVectorZero() const
{
#ifdef FEATURE_HW_INTRINSICS
if (gtOper == GT_HWINTRINSIC)
{
const GenTreeHWIntrinsic* node = AsHWIntrinsic();
const NamedIntrinsic intrinsicId = node->GetHWIntrinsicId();
#if defined(TARGET_XARCH)
return (intrinsicId == NI_Vector128_get_Zero) || (intrinsicId == NI_Vector256_get_Zero);
#elif defined(TARGET_ARM64)
return (intrinsicId == NI_Vector64_get_Zero) || (intrinsicId == NI_Vector128_get_Zero);
#endif // !TARGET_XARCH && !TARGET_ARM64
}
#endif // FEATURE_HW_INTRINSICS
return false;
}
inline bool GenTree::IsBoxedValue()
{
assert(gtOper != GT_BOX || AsBox()->BoxOp() != nullptr);
return (gtOper == GT_BOX) && (gtFlags & GTF_BOX_VALUE);
}
#ifdef DEBUG
//------------------------------------------------------------------------
// IsValidCallArgument: Given an GenTree node that represents an argument
// enforce (or don't enforce) the following invariant.
//
// Arguments:
// instance method for a GenTree node
//
// Return values:
// true: the GenTree node is accepted as a valid argument
// false: the GenTree node is not accepted as a valid argumeny
//
// Notes:
// For targets that don't support arguments as a list of fields, we do not support GT_FIELD_LIST.
//
// Currently for AMD64 UNIX we allow a limited case where a GT_FIELD_LIST is
// allowed but every element must be a GT_LCL_FLD.
//
// For the future targets that allow for Multireg args (and this includes the current ARM64 target),
// or that allow for passing promoted structs, we allow a GT_FIELD_LIST of arbitrary nodes.
// These would typically start out as GT_LCL_VARs or GT_LCL_FLDS or GT_INDs,
// but could be changed into constants or GT_COMMA trees by the later
// optimization phases.
inline bool GenTree::IsValidCallArgument()
{
if (OperIs(GT_FIELD_LIST))
{
#if !FEATURE_MULTIREG_ARGS && !FEATURE_PUT_STRUCT_ARG_STK
return false;
#else // FEATURE_MULTIREG_ARGS or FEATURE_PUT_STRUCT_ARG_STK
// We allow this GT_FIELD_LIST as an argument
return true;
#endif // FEATURE_MULTIREG_ARGS or FEATURE_PUT_STRUCT_ARG_STK
}
// We don't have either kind of list, so it satisfies the invariant.
return true;
}
#endif // DEBUG
inline GenTree* GenTree::gtGetOp1() const
{
return AsOp()->gtOp1;
}
#ifdef DEBUG
/* static */ inline bool GenTree::RequiresNonNullOp2(genTreeOps oper)
{
switch (oper)
{
case GT_ADD:
case GT_SUB:
case GT_MUL:
case GT_DIV:
case GT_MOD:
case GT_UDIV:
case GT_UMOD:
case GT_OR:
case GT_XOR:
case GT_AND:
case GT_LSH:
case GT_RSH:
case GT_RSZ:
case GT_ROL:
case GT_ROR:
case GT_INDEX:
case GT_ASG:
case GT_EQ:
case GT_NE:
case GT_LT:
case GT_LE:
case GT_GE:
case GT_GT:
case GT_COMMA:
case GT_QMARK:
case GT_COLON:
case GT_MKREFANY:
return true;
default:
return false;
}
}
#endif // DEBUG
inline GenTree* GenTree::gtGetOp2() const
{
assert(OperIsBinary());
GenTree* op2 = AsOp()->gtOp2;
// Only allow null op2 if the node type allows it, e.g. GT_LEA.
assert((op2 != nullptr) || !RequiresNonNullOp2(gtOper));
return op2;
}
inline GenTree* GenTree::gtGetOp2IfPresent() const
{
/* AsOp()->gtOp2 is only valid for GTK_BINOP nodes. */
GenTree* op2 = OperIsBinary() ? AsOp()->gtOp2 : nullptr;
// This documents the genTreeOps for which AsOp()->gtOp2 cannot be nullptr.
// This helps prefix in its analysis of code which calls gtGetOp2()
assert((op2 != nullptr) || !RequiresNonNullOp2(gtOper));
return op2;
}
inline GenTree* GenTree::gtEffectiveVal(bool commaOnly /* = false */)
{
GenTree* effectiveVal = this;
for (;;)
{
assert(!effectiveVal->OperIs(GT_PUTARG_TYPE));
if (effectiveVal->gtOper == GT_COMMA)
{
effectiveVal = effectiveVal->AsOp()->gtGetOp2();
}
else if (!commaOnly && (effectiveVal->gtOper == GT_NOP) && (effectiveVal->AsOp()->gtOp1 != nullptr))
{
effectiveVal = effectiveVal->AsOp()->gtOp1;
}
else
{
return effectiveVal;
}
}
}
//-------------------------------------------------------------------------
// gtCommaAssignVal - find value being assigned to a comma wrapped assigment
//
// Returns:
// tree representing value being assigned if this tree represents a
// comma-wrapped local definition and use.
//
// original tree, of not.
//
inline GenTree* GenTree::gtCommaAssignVal()
{
GenTree* result = this;
if (OperIs(GT_COMMA))
{
GenTree* commaOp1 = AsOp()->gtOp1;
GenTree* commaOp2 = AsOp()->gtOp2;
if (commaOp2->OperIs(GT_LCL_VAR) && commaOp1->OperIs(GT_ASG))
{
GenTree* asgOp1 = commaOp1->AsOp()->gtOp1;
GenTree* asgOp2 = commaOp1->AsOp()->gtOp2;
if (asgOp1->OperIs(GT_LCL_VAR) && (asgOp1->AsLclVar()->GetLclNum() == commaOp2->AsLclVar()->GetLclNum()))
{
result = asgOp2;
}
}
}
return result;
}
//-------------------------------------------------------------------------
// gtSkipPutArgType - skip PUTARG_TYPE if it is presented.
//
// Returns:
// the original tree or its child if it was a PUTARG_TYPE.
//
// Notes:
// PUTARG_TYPE should be skipped when we are doing transformations
// that are not affected by ABI, for example: inlining, implicit byref morphing.
//
inline GenTree* GenTree::gtSkipPutArgType()
{
if (OperIs(GT_PUTARG_TYPE))
{
GenTree* res = AsUnOp()->gtGetOp1();
assert(!res->OperIs(GT_PUTARG_TYPE));
return res;
}
return this;
}
inline GenTree* GenTree::gtSkipReloadOrCopy()
{
// There can be only one reload or copy (we can't have a reload/copy of a reload/copy)
if (gtOper == GT_RELOAD || gtOper == GT_COPY)
{
assert(gtGetOp1()->OperGet() != GT_RELOAD && gtGetOp1()->OperGet() != GT_COPY);
return gtGetOp1();
}
return this;
}
//-----------------------------------------------------------------------------------
// IsMultiRegCall: whether a call node returns its value in more than one register
//
// Arguments:
// None
//
// Return Value:
// Returns true if this GenTree is a multi register returning call
//
inline bool GenTree::IsMultiRegCall() const
{
if (this->IsCall())
{
return AsCall()->HasMultiRegRetVal();
}
return false;
}
//-----------------------------------------------------------------------------------
// IsMultiRegLclVar: whether a local var node defines multiple registers
//
// Arguments:
// None
//
// Return Value:
// Returns true if this GenTree is a multi register defining local var
//
inline bool GenTree::IsMultiRegLclVar() const
{
if (OperIsScalarLocal())
{
return AsLclVar()->IsMultiReg();
}
return false;
}
//-----------------------------------------------------------------------------------
// GetRegByIndex: Get a specific register, based on regIndex, that is produced by this node.
//
// Arguments:
// regIndex - which register to return (must be 0 for non-multireg nodes)
//
// Return Value:
// The register, if any, assigned to this index for this node.
//
// Notes:
// All targets that support multi-reg ops of any kind also support multi-reg return
// values for calls. Should that change with a future target, this method will need
// to change accordingly.
//
inline regNumber GenTree::GetRegByIndex(int regIndex) const
{
if (regIndex == 0)
{
return GetRegNum();
}
#if FEATURE_MULTIREG_RET
if (IsMultiRegCall())
{
return AsCall()->GetRegNumByIdx(regIndex);
}
#if FEATURE_ARG_SPLIT
if (OperIsPutArgSplit())
{
return AsPutArgSplit()->GetRegNumByIdx(regIndex);
}
#endif
#if !defined(TARGET_64BIT)
if (OperIsMultiRegOp())
{
return AsMultiRegOp()->GetRegNumByIdx(regIndex);
}
#endif
if (OperIs(GT_COPY, GT_RELOAD))
{
return AsCopyOrReload()->GetRegNumByIdx(regIndex);
}
#endif // FEATURE_MULTIREG_RET
#ifdef FEATURE_HW_INTRINSICS
if (OperIs(GT_HWINTRINSIC))
{
assert(regIndex == 1);
// TODO-ARM64-NYI: Support hardware intrinsics operating on multiple contiguous registers.
return AsHWIntrinsic()->GetOtherReg();
}
#endif // FEATURE_HW_INTRINSICS
if (OperIsScalarLocal())
{
return AsLclVar()->GetRegNumByIdx(regIndex);
}
assert(!"Invalid regIndex for GetRegFromMultiRegNode");
return REG_NA;
}
//-----------------------------------------------------------------------------------
// GetRegTypeByIndex: Get a specific register's type, based on regIndex, that is produced
// by this multi-reg node.
//
// Arguments:
// regIndex - index of register whose type will be returned
//
// Return Value:
// The register type assigned to this index for this node.
//
// Notes:
// This must be a multireg node that is *not* a copy or reload (which must retrieve the
// type from its source), and 'regIndex' must be a valid index for this node.
//
// All targets that support multi-reg ops of any kind also support multi-reg return
// values for calls. Should that change with a future target, this method will need
// to change accordingly.
//
inline var_types GenTree::GetRegTypeByIndex(int regIndex) const
{
#if FEATURE_MULTIREG_RET
if (IsMultiRegCall())
{
return AsCall()->AsCall()->GetReturnTypeDesc()->GetReturnRegType(regIndex);
}
#if FEATURE_ARG_SPLIT
if (OperIsPutArgSplit())
{
return AsPutArgSplit()->GetRegType(regIndex);
}
#endif // FEATURE_ARG_SPLIT
#if !defined(TARGET_64BIT)
if (OperIsMultiRegOp())
{
return AsMultiRegOp()->GetRegType(regIndex);
}
#endif // !defined(TARGET_64BIT)
#endif // FEATURE_MULTIREG_RET
if (OperIsHWIntrinsic())
{
assert(TypeGet() == TYP_STRUCT);
#ifdef TARGET_ARM64
if (AsHWIntrinsic()->GetSimdSize() == 16)
{
return TYP_SIMD16;
}
else
{
assert(AsHWIntrinsic()->GetSimdSize() == 8);
return TYP_SIMD8;
}
#elif defined(TARGET_XARCH)
// At this time, the only multi-reg HW intrinsics all return the type of their
// arguments. If this changes, we will need a way to record or determine this.
return gtGetOp1()->TypeGet();
#endif
}
if (OperIsScalarLocal())
{
if (TypeGet() == TYP_LONG)
{
return TYP_INT;
}
assert(TypeGet() == TYP_STRUCT);
assert((gtFlags & GTF_VAR_MULTIREG) != 0);
// The register type for a multireg lclVar requires looking at the LclVarDsc,
// which requires a Compiler instance. The caller must use the GetFieldTypeByIndex
// on GenTreeLclVar.
assert(!"GetRegTypeByIndex for LclVar");
}
assert(!"Invalid node type for GetRegTypeByIndex");
return TYP_UNDEF;
}
//-----------------------------------------------------------------------------------
// GetRegSpillFlagByIdx: Get a specific register's spill flags, based on regIndex,
// for this multi-reg node.
//
// Arguments:
// regIndex - which register's spill flags to return
//
// Return Value:
// The spill flags (GTF_SPILL GTF_SPILLED) for this register.
//
// Notes:
// This must be a multireg node and 'regIndex' must be a valid index for this node.
// This method returns the GTF "equivalent" flags based on the packed flags on the multireg node.
//
inline GenTreeFlags GenTree::GetRegSpillFlagByIdx(int regIndex) const
{
#if FEATURE_MULTIREG_RET
if (IsMultiRegCall())
{
return AsCall()->GetRegSpillFlagByIdx(regIndex);
}
#if FEATURE_ARG_SPLIT
if (OperIsPutArgSplit())
{
return AsPutArgSplit()->GetRegSpillFlagByIdx(regIndex);
}
#endif // FEATURE_ARG_SPLIT
#if !defined(TARGET_64BIT)
if (OperIsMultiRegOp())
{
return AsMultiRegOp()->GetRegSpillFlagByIdx(regIndex);
}
#endif // !defined(TARGET_64BIT)
#endif // FEATURE_MULTIREG_RET
if (OperIsScalarLocal())
{
return AsLclVar()->GetRegSpillFlagByIdx(regIndex);
}
assert(!"Invalid node type for GetRegSpillFlagByIdx");
return GTF_EMPTY;
}
//-----------------------------------------------------------------------------------
// GetLastUseBit: Get the last use bit for regIndex
//
// Arguments:
// regIndex - the register index
//
// Return Value:
// The bit to set, clear or query for the last-use of the regIndex'th value.
//
// Notes:
// This must be a GenTreeLclVar or GenTreeCopyOrReload node.
//
inline GenTreeFlags GenTree::GetLastUseBit(int regIndex) const
{
assert(regIndex < 4);
assert(OperIs(GT_LCL_VAR, GT_STORE_LCL_VAR, GT_COPY, GT_RELOAD));
static_assert_no_msg((1 << MULTIREG_LAST_USE_SHIFT) == GTF_VAR_MULTIREG_DEATH0);
return (GenTreeFlags)(1 << (MULTIREG_LAST_USE_SHIFT + regIndex));
}
//-----------------------------------------------------------------------------------
// IsLastUse: Determine whether this node is a last use of the regIndex'th value
//
// Arguments:
// regIndex - the register index
//
// Return Value:
// true iff this is a last use.
//
// Notes:
// This must be a GenTreeLclVar or GenTreeCopyOrReload node.
//
inline bool GenTree::IsLastUse(int regIndex) const
{
assert(OperIs(GT_LCL_VAR, GT_STORE_LCL_VAR, GT_COPY, GT_RELOAD));
return (gtFlags & GetLastUseBit(regIndex)) != 0;
}
//-----------------------------------------------------------------------------------
// IsLastUse: Determine whether this node is a last use of any value
//
// Return Value:
// true iff this has any last uses (i.e. at any index).
//
// Notes:
// This must be a GenTreeLclVar or GenTreeCopyOrReload node.
//
inline bool GenTree::HasLastUse() const
{
return (gtFlags & (GTF_VAR_DEATH_MASK)) != 0;
}
//-----------------------------------------------------------------------------------
// SetLastUse: Set the last use bit for the given index
//
// Arguments:
// regIndex - the register index
//
// Notes:
// This must be a GenTreeLclVar or GenTreeCopyOrReload node.
//
inline void GenTree::SetLastUse(int regIndex)
{
gtFlags |= GetLastUseBit(regIndex);
}
//-----------------------------------------------------------------------------------
// ClearLastUse: Clear the last use bit for the given index
//
// Arguments:
// regIndex - the register index
//
// Notes:
// This must be a GenTreeLclVar or GenTreeCopyOrReload node.
//
inline void GenTree::ClearLastUse(int regIndex)
{
gtFlags &= ~GetLastUseBit(regIndex);
}
//-------------------------------------------------------------------------
// IsCopyOrReload: whether this is a GT_COPY or GT_RELOAD node.
//
// Arguments:
// None
//
// Return Value:
// Returns true if this GenTree is a copy or reload node.
//
inline bool GenTree::IsCopyOrReload() const
{
return (gtOper == GT_COPY || gtOper == GT_RELOAD);
}
//-----------------------------------------------------------------------------------
// IsCopyOrReloadOfMultiRegCall: whether this is a GT_COPY or GT_RELOAD of a multi-reg
// call node.
//
// Arguments:
// None
//
// Return Value:
// Returns true if this GenTree is a copy or reload of multi-reg call node.
//
inline bool GenTree::IsCopyOrReloadOfMultiRegCall() const
{
if (IsCopyOrReload())
{
return gtGetOp1()->IsMultiRegCall();
}
return false;
}
inline bool GenTree::IsCnsIntOrI() const
{
return (gtOper == GT_CNS_INT);
}
inline bool GenTree::IsIntegralConst() const
{
#ifdef TARGET_64BIT
return IsCnsIntOrI();
#else // !TARGET_64BIT
return ((gtOper == GT_CNS_INT) || (gtOper == GT_CNS_LNG));
#endif // !TARGET_64BIT
}
//-------------------------------------------------------------------------
// IsIntegralConstUnsignedPow2: Determines whether the unsigned value of
// an integral constant is the power of 2.
//
// Return Value:
// Returns true if the unsigned value of a GenTree's integral constant
// is the power of 2.
//
// Notes:
// Integral constant nodes store its value in signed form.
// This should handle cases where an unsigned-int was logically used in
// user code.
//
inline bool GenTree::IsIntegralConstUnsignedPow2() const
{
if (IsIntegralConst())
{
return isPow2((UINT64)AsIntConCommon()->IntegralValue());
}
return false;
}
//-------------------------------------------------------------------------
// IsIntegralConstAbsPow2: Determines whether the absolute value of
// an integral constant is the power of 2.
//
// Return Value:
// Returns true if the absolute value of a GenTree's integral constant
// is the power of 2.
//
inline bool GenTree::IsIntegralConstAbsPow2() const
{
if (IsIntegralConst())
{
INT64 svalue = AsIntConCommon()->IntegralValue();
size_t value = (svalue == SSIZE_T_MIN) ? static_cast<size_t>(svalue) : static_cast<size_t>(abs(svalue));
return isPow2(value);
}
return false;
}
// Is this node an integer constant that fits in a 32-bit signed integer (INT32)
inline bool GenTree::IsIntCnsFitsInI32()
{
#ifdef TARGET_64BIT
return IsCnsIntOrI() && AsIntCon()->FitsInI32();
#else // !TARGET_64BIT
return IsCnsIntOrI();
#endif // !TARGET_64BIT
}
inline bool GenTree::IsCnsFltOrDbl() const
{
return OperGet() == GT_CNS_DBL;
}
inline bool GenTree::IsCnsNonZeroFltOrDbl() const
{
if (OperGet() == GT_CNS_DBL)
{
double constValue = AsDblCon()->gtDconVal;
return *(__int64*)&constValue != 0;
}
return false;
}
inline bool GenTree::IsHelperCall()
{
return OperGet() == GT_CALL && AsCall()->gtCallType == CT_HELPER;
}
inline var_types GenTree::CastFromType()
{
return this->AsCast()->CastOp()->TypeGet();
}
inline var_types& GenTree::CastToType()
{
return this->AsCast()->gtCastType;
}
inline bool GenTree::isUsedFromSpillTemp() const
{
// If spilled and no reg at use, then it is used from the spill temp location rather than being reloaded.
if (((gtFlags & GTF_SPILLED) != 0) && ((gtFlags & GTF_NOREG_AT_USE) != 0))
{
return true;
}
return false;
}
/*****************************************************************************/
#ifndef HOST_64BIT
#include <poppack.h>
#endif
/*****************************************************************************/
const size_t TREE_NODE_SZ_SMALL = sizeof(GenTreeLclFld);
const size_t TREE_NODE_SZ_LARGE = sizeof(GenTreeCall);
enum varRefKinds
{
VR_INVARIANT = 0x00, // an invariant value
VR_NONE = 0x00,
VR_IND_REF = 0x01, // an object reference
VR_IND_SCL = 0x02, // a non-object reference
VR_GLB_VAR = 0x04, // a global (clsVar)
};
/*****************************************************************************/
#endif // !GENTREE_H
/*****************************************************************************/
| 1 |
dotnet/runtime | 66,251 | Do not generate `TYP_STRUCT` `LCL_FLD` nodes | Today, the morpher can fold `IND(struct ADDR(LCL))` into `LCL_FLD` `TYP_STRUCT` layout-less nodes, these end up being locations, always (well -- there is one case where that's not true, such a node can end up under a return of a small struct later to be retyped by lowering, but this is rare).
These will be a significant impediment to actual, proper `TYP_STRUCT` local field nodes, as it would mean handling the case, everywhere in code, where the layout would be missing, that we don't want to allow in the first place.
This change fixed that by introducing the folding that the creation of these location nodes achieves at the address level, where we can choose the type of the location node arbitrarily and freely.
Along the way, `IsLocalAddrExpr` was fixed to not forget to look at zero-offset sequences attached to `ADDR`s, to avoid regressions.
We are expecting some positive diffs here, from the `IsLocalAddrExpr` change (enabling more precise VNs) and the folding change itself (the old code path would, ever so rarely, miss out on some things).
We are also expecting one small regression due to call retyping of reg-sized `IND struct(ADDR(LCL))` nodes losing the field sequence. I did not fix it because a) it was small, and rare, b) I do not want to permeate the zero-offset code any more than is strictly required.
[Diffs](https://dev.azure.com/dnceng/public/_build/results?buildId=1653792&view=results). | SingleAccretion | 2022-03-05T21:01:45Z | 2022-03-30T04:48:39Z | cbf3f9c43649035f59e626792f82e7b579c44865 | 2453f16807b85b279efc26d17d6f20de87801c09 | Do not generate `TYP_STRUCT` `LCL_FLD` nodes. Today, the morpher can fold `IND(struct ADDR(LCL))` into `LCL_FLD` `TYP_STRUCT` layout-less nodes, these end up being locations, always (well -- there is one case where that's not true, such a node can end up under a return of a small struct later to be retyped by lowering, but this is rare).
These will be a significant impediment to actual, proper `TYP_STRUCT` local field nodes, as it would mean handling the case, everywhere in code, where the layout would be missing, that we don't want to allow in the first place.
This change fixed that by introducing the folding that the creation of these location nodes achieves at the address level, where we can choose the type of the location node arbitrarily and freely.
Along the way, `IsLocalAddrExpr` was fixed to not forget to look at zero-offset sequences attached to `ADDR`s, to avoid regressions.
We are expecting some positive diffs here, from the `IsLocalAddrExpr` change (enabling more precise VNs) and the folding change itself (the old code path would, ever so rarely, miss out on some things).
We are also expecting one small regression due to call retyping of reg-sized `IND struct(ADDR(LCL))` nodes losing the field sequence. I did not fix it because a) it was small, and rare, b) I do not want to permeate the zero-offset code any more than is strictly required.
[Diffs](https://dev.azure.com/dnceng/public/_build/results?buildId=1653792&view=results). | ./src/coreclr/jit/morph.cpp | // Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
/*XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XX XX
XX Morph XX
XX XX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
*/
#include "jitpch.h"
#ifdef _MSC_VER
#pragma hdrstop
#endif
#include "allocacheck.h" // for alloca
// Convert the given node into a call to the specified helper passing
// the given argument list.
//
// Tries to fold constants and also adds an edge for overflow exception
// returns the morphed tree
GenTree* Compiler::fgMorphCastIntoHelper(GenTree* tree, int helper, GenTree* oper)
{
GenTree* result;
/* If the operand is a constant, we'll try to fold it */
if (oper->OperIsConst())
{
GenTree* oldTree = tree;
tree = gtFoldExprConst(tree); // This may not fold the constant (NaN ...)
if (tree != oldTree)
{
return fgMorphTree(tree);
}
else if (tree->OperIsConst())
{
return fgMorphConst(tree);
}
// assert that oper is unchanged and that it is still a GT_CAST node
noway_assert(tree->AsCast()->CastOp() == oper);
noway_assert(tree->gtOper == GT_CAST);
}
result = fgMorphIntoHelperCall(tree, helper, gtNewCallArgs(oper));
assert(result == tree);
return result;
}
/*****************************************************************************
*
* Convert the given node into a call to the specified helper passing
* the given argument list.
*/
GenTree* Compiler::fgMorphIntoHelperCall(GenTree* tree, int helper, GenTreeCall::Use* args, bool morphArgs)
{
// The helper call ought to be semantically equivalent to the original node, so preserve its VN.
tree->ChangeOper(GT_CALL, GenTree::PRESERVE_VN);
GenTreeCall* call = tree->AsCall();
call->gtCallType = CT_HELPER;
call->gtReturnType = tree->TypeGet();
call->gtCallMethHnd = eeFindHelper(helper);
call->gtCallThisArg = nullptr;
call->gtCallArgs = args;
call->gtCallLateArgs = nullptr;
call->fgArgInfo = nullptr;
call->gtRetClsHnd = nullptr;
call->gtCallMoreFlags = GTF_CALL_M_EMPTY;
call->gtInlineCandidateInfo = nullptr;
call->gtControlExpr = nullptr;
call->gtRetBufArg = nullptr;
#ifdef UNIX_X86_ABI
call->gtFlags |= GTF_CALL_POP_ARGS;
#endif // UNIX_X86_ABI
#if DEBUG
// Helper calls are never candidates.
call->gtInlineObservation = InlineObservation::CALLSITE_IS_CALL_TO_HELPER;
call->callSig = nullptr;
#endif // DEBUG
#ifdef FEATURE_READYTORUN
call->gtEntryPoint.addr = nullptr;
call->gtEntryPoint.accessType = IAT_VALUE;
#endif
#if FEATURE_MULTIREG_RET
call->ResetReturnType();
call->ClearOtherRegs();
call->ClearOtherRegFlags();
#ifndef TARGET_64BIT
if (varTypeIsLong(tree))
{
call->InitializeLongReturnType();
}
#endif // !TARGET_64BIT
#endif // FEATURE_MULTIREG_RET
if (tree->OperMayThrow(this))
{
tree->gtFlags |= GTF_EXCEPT;
}
else
{
tree->gtFlags &= ~GTF_EXCEPT;
}
tree->gtFlags |= GTF_CALL;
for (GenTreeCall::Use& use : GenTreeCall::UseList(args))
{
tree->gtFlags |= (use.GetNode()->gtFlags & GTF_ALL_EFFECT);
}
/* Perform the morphing */
if (morphArgs)
{
tree = fgMorphArgs(call);
}
return tree;
}
//------------------------------------------------------------------------
// fgMorphExpandCast: Performs the pre-order (required) morphing for a cast.
//
// Performs a rich variety of pre-order transformations (and some optimizations).
//
// Notably:
// 1. Splits long -> small type casts into long -> int -> small type
// for 32 bit targets. Does the same for float/double -> small type
// casts for all targets.
// 2. Morphs casts not supported by the target directly into helpers.
// These mostly have to do with casts from and to floating point
// types, especially checked ones. Refer to the implementation for
// what specific casts need to be handled - it is a complex matrix.
// 3. "Casts away" the GC-ness of a tree (for CAST(nint <- byref)) via
// assigning the GC tree to an inline - COMMA(ASG, LCL_VAR) - non-GC
// temporary.
// 3. "Pushes down" truncating long -> int casts for some operations:
// CAST(int <- MUL(long, long)) => MUL(CAST(int <- long), CAST(int <- long)).
// The purpose of this is to allow "optNarrowTree" in the post-order
// traversal to fold the tree into a TYP_INT one, which helps 32 bit
// targets (and AMD64 too since 32 bit instructions are more compact).
// TODO-Arm64-CQ: Re-evaluate the value of this optimization for ARM64.
//
// Arguments:
// tree - the cast tree to morph
//
// Return Value:
// The fully morphed tree, or "nullptr" if it needs further morphing,
// in which case the cast may be transformed into an unchecked one
// and its operand changed (the cast "expanded" into two).
//
GenTree* Compiler::fgMorphExpandCast(GenTreeCast* tree)
{
GenTree* oper = tree->CastOp();
if (fgGlobalMorph && (oper->gtOper == GT_ADDR))
{
// Make sure we've checked if 'oper' is an address of an implicit-byref parameter.
// If it is, fgMorphImplicitByRefArgs will change its type, and we want the cast
// morphing code to see that type.
fgMorphImplicitByRefArgs(oper);
}
var_types srcType = genActualType(oper);
var_types dstType = tree->CastToType();
unsigned dstSize = genTypeSize(dstType);
// See if the cast has to be done in two steps. R -> I
if (varTypeIsFloating(srcType) && varTypeIsIntegral(dstType))
{
if (srcType == TYP_FLOAT
#if defined(TARGET_ARM64)
// Arm64: src = float, dst is overflow conversion.
// This goes through helper and hence src needs to be converted to double.
&& tree->gtOverflow()
#elif defined(TARGET_AMD64)
// Amd64: src = float, dst = uint64 or overflow conversion.
// This goes through helper and hence src needs to be converted to double.
&& (tree->gtOverflow() || (dstType == TYP_ULONG))
#elif defined(TARGET_ARM)
// Arm: src = float, dst = int64/uint64 or overflow conversion.
&& (tree->gtOverflow() || varTypeIsLong(dstType))
#else
// x86: src = float, dst = uint32/int64/uint64 or overflow conversion.
&& (tree->gtOverflow() || varTypeIsLong(dstType) || (dstType == TYP_UINT))
#endif
)
{
oper = gtNewCastNode(TYP_DOUBLE, oper, false, TYP_DOUBLE);
}
// Do we need to do it in two steps R -> I -> smallType?
if (dstSize < genTypeSize(TYP_INT))
{
oper = gtNewCastNodeL(TYP_INT, oper, /* fromUnsigned */ false, TYP_INT);
oper->gtFlags |= (tree->gtFlags & (GTF_OVERFLOW | GTF_EXCEPT));
tree->AsCast()->CastOp() = oper;
// We must not mistreat the original cast, which was from a floating point type,
// as from an unsigned type, since we now have a TYP_INT node for the source and
// CAST_OVF(BYTE <- INT) != CAST_OVF(BYTE <- UINT).
assert(!tree->IsUnsigned());
}
else
{
if (!tree->gtOverflow())
{
#ifdef TARGET_ARM64 // ARM64 supports all non-overflow checking conversions directly.
return nullptr;
#else
switch (dstType)
{
case TYP_INT:
return nullptr;
case TYP_UINT:
#if defined(TARGET_ARM) || defined(TARGET_AMD64)
return nullptr;
#else // TARGET_X86
return fgMorphCastIntoHelper(tree, CORINFO_HELP_DBL2UINT, oper);
#endif // TARGET_X86
case TYP_LONG:
#ifdef TARGET_AMD64
// SSE2 has instructions to convert a float/double directly to a long
return nullptr;
#else // !TARGET_AMD64
return fgMorphCastIntoHelper(tree, CORINFO_HELP_DBL2LNG, oper);
#endif // !TARGET_AMD64
case TYP_ULONG:
return fgMorphCastIntoHelper(tree, CORINFO_HELP_DBL2ULNG, oper);
default:
unreached();
}
#endif // TARGET_ARM64
}
else
{
switch (dstType)
{
case TYP_INT:
return fgMorphCastIntoHelper(tree, CORINFO_HELP_DBL2INT_OVF, oper);
case TYP_UINT:
return fgMorphCastIntoHelper(tree, CORINFO_HELP_DBL2UINT_OVF, oper);
case TYP_LONG:
return fgMorphCastIntoHelper(tree, CORINFO_HELP_DBL2LNG_OVF, oper);
case TYP_ULONG:
return fgMorphCastIntoHelper(tree, CORINFO_HELP_DBL2ULNG_OVF, oper);
default:
unreached();
}
}
}
}
#ifndef TARGET_64BIT
// The code generation phase (for x86 & ARM32) does not handle casts
// directly from [u]long to anything other than [u]int. Insert an
// intermediate cast to native int.
else if (varTypeIsLong(srcType) && varTypeIsSmall(dstType))
{
oper = gtNewCastNode(TYP_I_IMPL, oper, tree->IsUnsigned(), TYP_I_IMPL);
oper->gtFlags |= (tree->gtFlags & (GTF_OVERFLOW | GTF_EXCEPT));
tree->ClearUnsigned();
tree->AsCast()->CastOp() = oper;
}
#endif //! TARGET_64BIT
#ifdef TARGET_ARMARCH
// AArch, unlike x86/amd64, has instructions that can cast directly from
// all integers (except for longs on AArch32 of course) to floats.
// Because there is no IL instruction conv.r4.un, uint/ulong -> float
// casts are always imported as CAST(float <- CAST(double <- uint/ulong)).
// We can eliminate the redundant intermediate cast as an optimization.
else if ((dstType == TYP_FLOAT) && (srcType == TYP_DOUBLE) && oper->OperIs(GT_CAST)
#ifdef TARGET_ARM
&& !varTypeIsLong(oper->AsCast()->CastOp())
#endif
)
{
oper->gtType = TYP_FLOAT;
oper->CastToType() = TYP_FLOAT;
return fgMorphTree(oper);
}
#endif // TARGET_ARMARCH
#ifdef TARGET_ARM
// converts long/ulong --> float/double casts into helper calls.
else if (varTypeIsFloating(dstType) && varTypeIsLong(srcType))
{
if (dstType == TYP_FLOAT)
{
// there is only a double helper, so we
// - change the dsttype to double
// - insert a cast from double to float
// - recurse into the resulting tree
tree->CastToType() = TYP_DOUBLE;
tree->gtType = TYP_DOUBLE;
tree = gtNewCastNode(TYP_FLOAT, tree, false, TYP_FLOAT);
return fgMorphTree(tree);
}
if (tree->gtFlags & GTF_UNSIGNED)
return fgMorphCastIntoHelper(tree, CORINFO_HELP_ULNG2DBL, oper);
return fgMorphCastIntoHelper(tree, CORINFO_HELP_LNG2DBL, oper);
}
#endif // TARGET_ARM
#ifdef TARGET_AMD64
// Do we have to do two step U4/8 -> R4/8 ?
// Codegen supports the following conversion as one-step operation
// a) Long -> R4/R8
// b) U8 -> R8
//
// The following conversions are performed as two-step operations using above.
// U4 -> R4/8 = U4-> Long -> R4/8
// U8 -> R4 = U8 -> R8 -> R4
else if (tree->IsUnsigned() && varTypeIsFloating(dstType))
{
srcType = varTypeToUnsigned(srcType);
if (srcType == TYP_ULONG)
{
if (dstType == TYP_FLOAT)
{
// Codegen can handle U8 -> R8 conversion.
// U8 -> R4 = U8 -> R8 -> R4
// - change the dsttype to double
// - insert a cast from double to float
// - recurse into the resulting tree
tree->CastToType() = TYP_DOUBLE;
tree->gtType = TYP_DOUBLE;
tree = gtNewCastNode(TYP_FLOAT, tree, false, TYP_FLOAT);
return fgMorphTree(tree);
}
}
else if (srcType == TYP_UINT)
{
oper = gtNewCastNode(TYP_LONG, oper, true, TYP_LONG);
oper->gtFlags |= (tree->gtFlags & (GTF_OVERFLOW | GTF_EXCEPT));
tree->ClearUnsigned();
tree->CastOp() = oper;
}
}
#endif // TARGET_AMD64
#ifdef TARGET_X86
// Do we have to do two step U4/8 -> R4/8 ?
else if (tree->IsUnsigned() && varTypeIsFloating(dstType))
{
srcType = varTypeToUnsigned(srcType);
if (srcType == TYP_ULONG)
{
return fgMorphCastIntoHelper(tree, CORINFO_HELP_ULNG2DBL, oper);
}
else if (srcType == TYP_UINT)
{
oper = gtNewCastNode(TYP_LONG, oper, true, TYP_LONG);
oper->gtFlags |= (tree->gtFlags & (GTF_OVERFLOW | GTF_EXCEPT));
tree->gtFlags &= ~GTF_UNSIGNED;
return fgMorphCastIntoHelper(tree, CORINFO_HELP_LNG2DBL, oper);
}
}
else if (((tree->gtFlags & GTF_UNSIGNED) == 0) && (srcType == TYP_LONG) && varTypeIsFloating(dstType))
{
oper = fgMorphCastIntoHelper(tree, CORINFO_HELP_LNG2DBL, oper);
// Since we don't have a Jit Helper that converts to a TYP_FLOAT
// we just use the one that converts to a TYP_DOUBLE
// and then add a cast to TYP_FLOAT
//
if ((dstType == TYP_FLOAT) && (oper->OperGet() == GT_CALL))
{
// Fix the return type to be TYP_DOUBLE
//
oper->gtType = TYP_DOUBLE;
// Add a Cast to TYP_FLOAT
//
tree = gtNewCastNode(TYP_FLOAT, oper, false, TYP_FLOAT);
INDEBUG(tree->gtDebugFlags |= GTF_DEBUG_NODE_MORPHED);
return tree;
}
else
{
return oper;
}
}
#endif // TARGET_X86
else if (varTypeIsGC(srcType) != varTypeIsGC(dstType))
{
// We are casting away GC information. we would like to just
// change the type to int, however this gives the emitter fits because
// it believes the variable is a GC variable at the beginning of the
// instruction group, but is not turned non-gc by the code generator
// we fix this by copying the GC pointer to a non-gc pointer temp.
noway_assert(!varTypeIsGC(dstType) && "How can we have a cast to a GCRef here?");
// We generate an assignment to an int and then do the cast from an int. With this we avoid
// the gc problem and we allow casts to bytes, longs, etc...
unsigned lclNum = lvaGrabTemp(true DEBUGARG("Cast away GC"));
oper->gtType = TYP_I_IMPL;
GenTree* asg = gtNewTempAssign(lclNum, oper);
oper->gtType = srcType;
// do the real cast
GenTree* cast = gtNewCastNode(tree->TypeGet(), gtNewLclvNode(lclNum, TYP_I_IMPL), false, dstType);
// Generate the comma tree
oper = gtNewOperNode(GT_COMMA, tree->TypeGet(), asg, cast);
return fgMorphTree(oper);
}
// Look for narrowing casts ([u]long -> [u]int) and try to push them
// down into the operand before morphing it.
//
// It doesn't matter if this is cast is from ulong or long (i.e. if
// GTF_UNSIGNED is set) because the transformation is only applied to
// overflow-insensitive narrowing casts, which always silently truncate.
//
// Note that casts from [u]long to small integer types are handled above.
if ((srcType == TYP_LONG) && ((dstType == TYP_INT) || (dstType == TYP_UINT)))
{
// As a special case, look for overflow-sensitive casts of an AND
// expression, and see if the second operand is a small constant. Since
// the result of an AND is bound by its smaller operand, it may be
// possible to prove that the cast won't overflow, which will in turn
// allow the cast's operand to be transformed.
if (tree->gtOverflow() && (oper->OperGet() == GT_AND))
{
GenTree* andOp2 = oper->AsOp()->gtOp2;
// Look for a constant less than 2^{32} for a cast to uint, or less
// than 2^{31} for a cast to int.
int maxWidth = (dstType == TYP_UINT) ? 32 : 31;
if ((andOp2->OperGet() == GT_CNS_NATIVELONG) && ((andOp2->AsIntConCommon()->LngValue() >> maxWidth) == 0))
{
tree->ClearOverflow();
tree->SetAllEffectsFlags(oper);
}
}
// Only apply this transformation during global morph,
// when neither the cast node nor the oper node may throw an exception
// based on the upper 32 bits.
//
if (fgGlobalMorph && !tree->gtOverflow() && !oper->gtOverflowEx())
{
// For these operations the lower 32 bits of the result only depends
// upon the lower 32 bits of the operands.
//
bool canPushCast = oper->OperIs(GT_ADD, GT_SUB, GT_MUL, GT_AND, GT_OR, GT_XOR, GT_NOT, GT_NEG);
// For long LSH cast to int, there is a discontinuity in behavior
// when the shift amount is 32 or larger.
//
// CAST(INT, LSH(1LL, 31)) == LSH(1, 31)
// LSH(CAST(INT, 1LL), CAST(INT, 31)) == LSH(1, 31)
//
// CAST(INT, LSH(1LL, 32)) == 0
// LSH(CAST(INT, 1LL), CAST(INT, 32)) == LSH(1, 32) == LSH(1, 0) == 1
//
// So some extra validation is needed.
//
if (oper->OperIs(GT_LSH))
{
GenTree* shiftAmount = oper->AsOp()->gtOp2;
// Expose constant value for shift, if possible, to maximize the number
// of cases we can handle.
shiftAmount = gtFoldExpr(shiftAmount);
oper->AsOp()->gtOp2 = shiftAmount;
#if DEBUG
// We may remorph the shift amount tree again later, so clear any morphed flag.
shiftAmount->gtDebugFlags &= ~GTF_DEBUG_NODE_MORPHED;
#endif // DEBUG
if (shiftAmount->IsIntegralConst())
{
const ssize_t shiftAmountValue = shiftAmount->AsIntCon()->IconValue();
if ((shiftAmountValue >= 64) || (shiftAmountValue < 0))
{
// Shift amount is large enough or negative so result is undefined.
// Don't try to optimize.
assert(!canPushCast);
}
else if (shiftAmountValue >= 32)
{
// We know that we have a narrowing cast ([u]long -> [u]int)
// and that we are casting to a 32-bit value, which will result in zero.
//
// Check to see if we have any side-effects that we must keep
//
if ((tree->gtFlags & GTF_ALL_EFFECT) == 0)
{
// Result of the shift is zero.
DEBUG_DESTROY_NODE(tree);
GenTree* zero = gtNewZeroConNode(TYP_INT);
return fgMorphTree(zero);
}
else // We do have a side-effect
{
// We could create a GT_COMMA node here to keep the side-effect and return a zero
// Instead we just don't try to optimize this case.
canPushCast = false;
}
}
else
{
// Shift amount is positive and small enough that we can push the cast through.
canPushCast = true;
}
}
else
{
// Shift amount is unknown. We can't optimize this case.
assert(!canPushCast);
}
}
if (canPushCast)
{
DEBUG_DESTROY_NODE(tree);
// Insert narrowing casts for op1 and op2.
oper->AsOp()->gtOp1 = gtNewCastNode(TYP_INT, oper->AsOp()->gtOp1, false, dstType);
if (oper->AsOp()->gtOp2 != nullptr)
{
oper->AsOp()->gtOp2 = gtNewCastNode(TYP_INT, oper->AsOp()->gtOp2, false, dstType);
}
// Clear the GT_MUL_64RSLT if it is set.
if (oper->gtOper == GT_MUL && (oper->gtFlags & GTF_MUL_64RSLT))
{
oper->gtFlags &= ~GTF_MUL_64RSLT;
}
// The operation now produces a 32-bit result.
oper->gtType = TYP_INT;
// Remorph the new tree as the casts that we added may be folded away.
return fgMorphTree(oper);
}
}
}
return nullptr;
}
#ifdef DEBUG
const char* getNonStandardArgKindName(NonStandardArgKind kind)
{
switch (kind)
{
case NonStandardArgKind::None:
return "None";
case NonStandardArgKind::PInvokeFrame:
return "PInvokeFrame";
case NonStandardArgKind::PInvokeTarget:
return "PInvokeTarget";
case NonStandardArgKind::PInvokeCookie:
return "PInvokeCookie";
case NonStandardArgKind::WrapperDelegateCell:
return "WrapperDelegateCell";
case NonStandardArgKind::ShiftLow:
return "ShiftLow";
case NonStandardArgKind::ShiftHigh:
return "ShiftHigh";
case NonStandardArgKind::FixedRetBuffer:
return "FixedRetBuffer";
case NonStandardArgKind::VirtualStubCell:
return "VirtualStubCell";
case NonStandardArgKind::R2RIndirectionCell:
return "R2RIndirectionCell";
case NonStandardArgKind::ValidateIndirectCallTarget:
return "ValidateIndirectCallTarget";
default:
unreached();
}
}
void fgArgTabEntry::Dump() const
{
printf("fgArgTabEntry[arg %u", argNum);
printf(" %d.%s", GetNode()->gtTreeID, GenTree::OpName(GetNode()->OperGet()));
printf(" %s", varTypeName(argType));
printf(" (%s)", passedByRef ? "By ref" : "By value");
if (GetRegNum() != REG_STK)
{
printf(", %u reg%s:", numRegs, numRegs == 1 ? "" : "s");
for (unsigned i = 0; i < numRegs; i++)
{
printf(" %s", getRegName(regNums[i]));
}
}
if (GetStackByteSize() > 0)
{
#if defined(DEBUG_ARG_SLOTS)
printf(", numSlots=%u, slotNum=%u, byteSize=%u, byteOffset=%u", numSlots, slotNum, m_byteSize, m_byteOffset);
#else
printf(", byteSize=%u, byteOffset=%u", m_byteSize, m_byteOffset);
#endif
}
printf(", byteAlignment=%u", m_byteAlignment);
if (isLateArg())
{
printf(", lateArgInx=%u", GetLateArgInx());
}
if (IsSplit())
{
printf(", isSplit");
}
if (needTmp)
{
printf(", tmpNum=V%02u", tmpNum);
}
if (needPlace)
{
printf(", needPlace");
}
if (isTmp)
{
printf(", isTmp");
}
if (processed)
{
printf(", processed");
}
if (IsHfaRegArg())
{
printf(", isHfa(%s)", varTypeName(GetHfaType()));
}
if (isBackFilled)
{
printf(", isBackFilled");
}
if (nonStandardArgKind != NonStandardArgKind::None)
{
printf(", nonStandard[%s]", getNonStandardArgKindName(nonStandardArgKind));
}
if (isStruct)
{
printf(", isStruct");
}
printf("]\n");
}
#endif
fgArgInfo::fgArgInfo(Compiler* comp, GenTreeCall* call, unsigned numArgs)
{
compiler = comp;
callTree = call;
argCount = 0; // filled in arg count, starts at zero
DEBUG_ARG_SLOTS_ONLY(nextSlotNum = INIT_ARG_STACK_SLOT;)
nextStackByteOffset = INIT_ARG_STACK_SLOT * TARGET_POINTER_SIZE;
stkLevel = 0;
#if defined(UNIX_X86_ABI)
alignmentDone = false;
stkSizeBytes = 0;
padStkAlign = 0;
#endif
#if FEATURE_FIXED_OUT_ARGS
outArgSize = 0;
#endif
argTableSize = numArgs; // the allocated table size
hasRegArgs = false;
hasStackArgs = false;
argsComplete = false;
argsSorted = false;
needsTemps = false;
if (argTableSize == 0)
{
argTable = nullptr;
}
else
{
argTable = new (compiler, CMK_fgArgInfoPtrArr) fgArgTabEntry*[argTableSize];
}
}
/*****************************************************************************
*
* fgArgInfo Copy Constructor
*
* This method needs to act like a copy constructor for fgArgInfo.
* The newCall needs to have its fgArgInfo initialized such that
* we have newCall that is an exact copy of the oldCall.
* We have to take care since the argument information
* in the argTable contains pointers that must point to the
* new arguments and not the old arguments.
*/
fgArgInfo::fgArgInfo(GenTreeCall* newCall, GenTreeCall* oldCall)
{
fgArgInfo* oldArgInfo = oldCall->AsCall()->fgArgInfo;
compiler = oldArgInfo->compiler;
callTree = newCall;
argCount = 0; // filled in arg count, starts at zero
DEBUG_ARG_SLOTS_ONLY(nextSlotNum = INIT_ARG_STACK_SLOT;)
nextStackByteOffset = INIT_ARG_STACK_SLOT * TARGET_POINTER_SIZE;
stkLevel = oldArgInfo->stkLevel;
#if defined(UNIX_X86_ABI)
alignmentDone = oldArgInfo->alignmentDone;
stkSizeBytes = oldArgInfo->stkSizeBytes;
padStkAlign = oldArgInfo->padStkAlign;
#endif
#if FEATURE_FIXED_OUT_ARGS
outArgSize = oldArgInfo->outArgSize;
#endif
argTableSize = oldArgInfo->argTableSize;
argsComplete = false;
argTable = nullptr;
assert(oldArgInfo->argsComplete);
if (argTableSize > 0)
{
argTable = new (compiler, CMK_fgArgInfoPtrArr) fgArgTabEntry*[argTableSize];
// Copy the old arg entries
for (unsigned i = 0; i < argTableSize; i++)
{
argTable[i] = new (compiler, CMK_fgArgInfo) fgArgTabEntry(*oldArgInfo->argTable[i]);
}
// The copied arg entries contain pointers to old uses, they need
// to be updated to point to new uses.
if (newCall->gtCallThisArg != nullptr)
{
for (unsigned i = 0; i < argTableSize; i++)
{
if (argTable[i]->use == oldCall->gtCallThisArg)
{
argTable[i]->use = newCall->gtCallThisArg;
break;
}
}
}
GenTreeCall::UseIterator newUse = newCall->Args().begin();
GenTreeCall::UseIterator newUseEnd = newCall->Args().end();
GenTreeCall::UseIterator oldUse = oldCall->Args().begin();
GenTreeCall::UseIterator oldUseEnd = newCall->Args().end();
for (; newUse != newUseEnd; ++newUse, ++oldUse)
{
for (unsigned i = 0; i < argTableSize; i++)
{
if (argTable[i]->use == oldUse.GetUse())
{
argTable[i]->use = newUse.GetUse();
break;
}
}
}
newUse = newCall->LateArgs().begin();
newUseEnd = newCall->LateArgs().end();
oldUse = oldCall->LateArgs().begin();
oldUseEnd = newCall->LateArgs().end();
for (; newUse != newUseEnd; ++newUse, ++oldUse)
{
for (unsigned i = 0; i < argTableSize; i++)
{
if (argTable[i]->lateUse == oldUse.GetUse())
{
argTable[i]->lateUse = newUse.GetUse();
break;
}
}
}
}
argCount = oldArgInfo->argCount;
DEBUG_ARG_SLOTS_ONLY(nextSlotNum = oldArgInfo->nextSlotNum;)
nextStackByteOffset = oldArgInfo->nextStackByteOffset;
hasRegArgs = oldArgInfo->hasRegArgs;
hasStackArgs = oldArgInfo->hasStackArgs;
argsComplete = true;
argsSorted = true;
}
void fgArgInfo::AddArg(fgArgTabEntry* curArgTabEntry)
{
assert(argCount < argTableSize);
argTable[argCount] = curArgTabEntry;
argCount++;
}
fgArgTabEntry* fgArgInfo::AddRegArg(unsigned argNum,
GenTree* node,
GenTreeCall::Use* use,
regNumber regNum,
unsigned numRegs,
unsigned byteSize,
unsigned byteAlignment,
bool isStruct,
bool isFloatHfa,
bool isVararg /*=false*/)
{
fgArgTabEntry* curArgTabEntry = new (compiler, CMK_fgArgInfo) fgArgTabEntry;
// Any additional register numbers are set by the caller.
// This is primarily because on ARM we don't yet know if it
// will be split or if it is a double HFA, so the number of registers
// may actually be less.
curArgTabEntry->setRegNum(0, regNum);
curArgTabEntry->argNum = argNum;
curArgTabEntry->argType = node->TypeGet();
curArgTabEntry->use = use;
curArgTabEntry->lateUse = nullptr;
curArgTabEntry->numRegs = numRegs;
#if defined(DEBUG_ARG_SLOTS)
curArgTabEntry->slotNum = 0;
curArgTabEntry->numSlots = 0;
#endif
curArgTabEntry->SetLateArgInx(UINT_MAX);
curArgTabEntry->tmpNum = BAD_VAR_NUM;
curArgTabEntry->SetSplit(false);
curArgTabEntry->isTmp = false;
curArgTabEntry->needTmp = false;
curArgTabEntry->needPlace = false;
curArgTabEntry->processed = false;
if (GlobalJitOptions::compFeatureHfa)
{
curArgTabEntry->SetHfaElemKind(CORINFO_HFA_ELEM_NONE);
}
curArgTabEntry->isBackFilled = false;
curArgTabEntry->nonStandardArgKind = NonStandardArgKind::None;
curArgTabEntry->isStruct = isStruct;
curArgTabEntry->SetIsVararg(isVararg);
curArgTabEntry->SetByteAlignment(byteAlignment);
curArgTabEntry->SetByteSize(byteSize, isStruct, isFloatHfa);
curArgTabEntry->SetByteOffset(0);
hasRegArgs = true;
if (argCount >= argTableSize)
{
fgArgTabEntry** oldTable = argTable;
argTable = new (compiler, CMK_fgArgInfoPtrArr) fgArgTabEntry*[argCount + 1];
memcpy(argTable, oldTable, argCount * sizeof(fgArgTabEntry*));
argTableSize++;
}
AddArg(curArgTabEntry);
return curArgTabEntry;
}
#if defined(UNIX_AMD64_ABI)
fgArgTabEntry* fgArgInfo::AddRegArg(unsigned argNum,
GenTree* node,
GenTreeCall::Use* use,
regNumber regNum,
unsigned numRegs,
unsigned byteSize,
unsigned byteAlignment,
const bool isStruct,
const bool isFloatHfa,
const bool isVararg,
const regNumber otherRegNum,
const unsigned structIntRegs,
const unsigned structFloatRegs,
const SYSTEMV_AMD64_CORINFO_STRUCT_REG_PASSING_DESCRIPTOR* const structDescPtr)
{
fgArgTabEntry* curArgTabEntry =
AddRegArg(argNum, node, use, regNum, numRegs, byteSize, byteAlignment, isStruct, isFloatHfa, isVararg);
assert(curArgTabEntry != nullptr);
curArgTabEntry->isStruct = isStruct; // is this a struct arg
curArgTabEntry->structIntRegs = structIntRegs;
curArgTabEntry->structFloatRegs = structFloatRegs;
INDEBUG(curArgTabEntry->checkIsStruct();)
assert(numRegs <= 2);
if (numRegs == 2)
{
curArgTabEntry->setRegNum(1, otherRegNum);
}
if (isStruct && structDescPtr != nullptr)
{
curArgTabEntry->structDesc.CopyFrom(*structDescPtr);
}
return curArgTabEntry;
}
#endif // defined(UNIX_AMD64_ABI)
fgArgTabEntry* fgArgInfo::AddStkArg(unsigned argNum,
GenTree* node,
GenTreeCall::Use* use,
unsigned numSlots,
unsigned byteSize,
unsigned byteAlignment,
bool isStruct,
bool isFloatHfa,
bool isVararg /*=false*/)
{
fgArgTabEntry* curArgTabEntry = new (compiler, CMK_fgArgInfo) fgArgTabEntry;
#if defined(DEBUG_ARG_SLOTS)
if (!compMacOsArm64Abi())
{
nextSlotNum = roundUp(nextSlotNum, byteAlignment / TARGET_POINTER_SIZE);
}
#endif
nextStackByteOffset = roundUp(nextStackByteOffset, byteAlignment);
DEBUG_ARG_SLOTS_ASSERT(nextStackByteOffset / TARGET_POINTER_SIZE == nextSlotNum);
curArgTabEntry->setRegNum(0, REG_STK);
curArgTabEntry->argNum = argNum;
curArgTabEntry->argType = node->TypeGet();
curArgTabEntry->use = use;
curArgTabEntry->lateUse = nullptr;
#if defined(DEBUG_ARG_SLOTS)
curArgTabEntry->numSlots = numSlots;
curArgTabEntry->slotNum = nextSlotNum;
#endif
curArgTabEntry->numRegs = 0;
#if defined(UNIX_AMD64_ABI)
curArgTabEntry->structIntRegs = 0;
curArgTabEntry->structFloatRegs = 0;
#endif // defined(UNIX_AMD64_ABI)
curArgTabEntry->SetLateArgInx(UINT_MAX);
curArgTabEntry->tmpNum = BAD_VAR_NUM;
curArgTabEntry->SetSplit(false);
curArgTabEntry->isTmp = false;
curArgTabEntry->needTmp = false;
curArgTabEntry->needPlace = false;
curArgTabEntry->processed = false;
if (GlobalJitOptions::compFeatureHfa)
{
curArgTabEntry->SetHfaElemKind(CORINFO_HFA_ELEM_NONE);
}
curArgTabEntry->isBackFilled = false;
curArgTabEntry->nonStandardArgKind = NonStandardArgKind::None;
curArgTabEntry->isStruct = isStruct;
curArgTabEntry->SetIsVararg(isVararg);
curArgTabEntry->SetByteAlignment(byteAlignment);
curArgTabEntry->SetByteSize(byteSize, isStruct, isFloatHfa);
curArgTabEntry->SetByteOffset(nextStackByteOffset);
hasStackArgs = true;
AddArg(curArgTabEntry);
DEBUG_ARG_SLOTS_ONLY(nextSlotNum += numSlots;)
nextStackByteOffset += curArgTabEntry->GetByteSize();
return curArgTabEntry;
}
void fgArgInfo::RemorphReset()
{
DEBUG_ARG_SLOTS_ONLY(nextSlotNum = INIT_ARG_STACK_SLOT;)
nextStackByteOffset = INIT_ARG_STACK_SLOT * TARGET_POINTER_SIZE;
}
//------------------------------------------------------------------------
// UpdateRegArg: Update the given fgArgTabEntry while morphing.
//
// Arguments:
// curArgTabEntry - the fgArgTabEntry to update.
// node - the tree node that defines the argument
// reMorphing - a boolean value indicate whether we are remorphing the call
//
// Assumptions:
// This must have already been determined to be at least partially passed in registers.
//
void fgArgInfo::UpdateRegArg(fgArgTabEntry* curArgTabEntry, GenTree* node, bool reMorphing)
{
bool isLateArg = curArgTabEntry->isLateArg();
// If this is a late arg, we'd better be updating it with a correctly marked node, and vice-versa.
assert((isLateArg && ((node->gtFlags & GTF_LATE_ARG) != 0)) ||
(!isLateArg && ((node->gtFlags & GTF_LATE_ARG) == 0)));
assert(curArgTabEntry->numRegs != 0);
assert(curArgTabEntry->use->GetNode() == node);
}
//------------------------------------------------------------------------
// UpdateStkArg: Update the given fgArgTabEntry while morphing.
//
// Arguments:
// curArgTabEntry - the fgArgTabEntry to update.
// node - the tree node that defines the argument
// reMorphing - a boolean value indicate whether we are remorphing the call
//
// Assumptions:
// This must have already been determined to be passed on the stack.
//
void fgArgInfo::UpdateStkArg(fgArgTabEntry* curArgTabEntry, GenTree* node, bool reMorphing)
{
bool isLateArg = curArgTabEntry->isLateArg();
// If this is a late arg, we'd better be updating it with a correctly marked node, and vice-versa.
assert((isLateArg && ((node->gtFlags & GTF_LATE_ARG) != 0)) ||
(!isLateArg && ((node->gtFlags & GTF_LATE_ARG) == 0)));
noway_assert(curArgTabEntry->use != callTree->gtCallThisArg);
assert((curArgTabEntry->GetRegNum() == REG_STK) || curArgTabEntry->IsSplit());
assert(curArgTabEntry->use->GetNode() == node);
#if defined(DEBUG_ARG_SLOTS)
if (!compMacOsArm64Abi())
{
nextSlotNum = roundUp(nextSlotNum, curArgTabEntry->GetByteAlignment() / TARGET_POINTER_SIZE);
assert(curArgTabEntry->slotNum == nextSlotNum);
nextSlotNum += curArgTabEntry->numSlots;
}
#endif
nextStackByteOffset = roundUp(nextStackByteOffset, curArgTabEntry->GetByteAlignment());
assert(curArgTabEntry->GetByteOffset() == nextStackByteOffset);
nextStackByteOffset += curArgTabEntry->GetStackByteSize();
}
void fgArgInfo::SplitArg(unsigned argNum, unsigned numRegs, unsigned numSlots)
{
fgArgTabEntry* curArgTabEntry = nullptr;
assert(argNum < argCount);
for (unsigned inx = 0; inx < argCount; inx++)
{
curArgTabEntry = argTable[inx];
if (curArgTabEntry->argNum == argNum)
{
break;
}
}
assert(numRegs > 0);
assert(numSlots > 0);
if (argsComplete)
{
assert(curArgTabEntry->IsSplit() == true);
assert(curArgTabEntry->numRegs == numRegs);
DEBUG_ARG_SLOTS_ONLY(assert(curArgTabEntry->numSlots == numSlots);)
assert(hasStackArgs == true);
}
else
{
curArgTabEntry->SetSplit(true);
curArgTabEntry->numRegs = numRegs;
DEBUG_ARG_SLOTS_ONLY(curArgTabEntry->numSlots = numSlots;)
curArgTabEntry->SetByteOffset(0);
hasStackArgs = true;
}
DEBUG_ARG_SLOTS_ONLY(nextSlotNum += numSlots;)
// TODO-Cleanup: structs are aligned to 8 bytes on arm64 apple, so it would work, but pass the precise size.
nextStackByteOffset += numSlots * TARGET_POINTER_SIZE;
}
//------------------------------------------------------------------------
// EvalToTmp: Replace the node in the given fgArgTabEntry with a temp
//
// Arguments:
// curArgTabEntry - the fgArgTabEntry for the argument
// tmpNum - the varNum for the temp
// newNode - the assignment of the argument value to the temp
//
// Notes:
// Although the name of this method is EvalToTmp, it doesn't actually create
// the temp or the copy.
//
void fgArgInfo::EvalToTmp(fgArgTabEntry* curArgTabEntry, unsigned tmpNum, GenTree* newNode)
{
assert(curArgTabEntry->use != callTree->gtCallThisArg);
assert(curArgTabEntry->use->GetNode() == newNode);
assert(curArgTabEntry->GetNode() == newNode);
curArgTabEntry->tmpNum = tmpNum;
curArgTabEntry->isTmp = true;
}
void fgArgInfo::ArgsComplete()
{
bool hasStructRegArg = false;
for (unsigned curInx = 0; curInx < argCount; curInx++)
{
fgArgTabEntry* curArgTabEntry = argTable[curInx];
assert(curArgTabEntry != nullptr);
GenTree* argx = curArgTabEntry->GetNode();
if (curArgTabEntry->GetRegNum() == REG_STK)
{
assert(hasStackArgs == true);
#if !FEATURE_FIXED_OUT_ARGS
// On x86 we use push instructions to pass arguments:
// The non-register arguments are evaluated and pushed in order
// and they are never evaluated into temps
//
continue;
#endif
}
#if FEATURE_ARG_SPLIT
else if (curArgTabEntry->IsSplit())
{
hasStructRegArg = true;
assert(hasStackArgs == true);
}
#endif // FEATURE_ARG_SPLIT
else // we have a register argument, next we look for a struct type.
{
if (varTypeIsStruct(argx) UNIX_AMD64_ABI_ONLY(|| curArgTabEntry->isStruct))
{
hasStructRegArg = true;
}
}
/* If the argument tree contains an assignment (GTF_ASG) then the argument and
and every earlier argument (except constants) must be evaluated into temps
since there may be other arguments that follow and they may use the value being assigned.
EXAMPLE: ArgTab is "a, a=5, a"
-> when we see the second arg "a=5"
we know the first two arguments "a, a=5" have to be evaluated into temps
For the case of an assignment, we only know that there exist some assignment someplace
in the tree. We don't know what is being assigned so we are very conservative here
and assume that any local variable could have been assigned.
*/
if (argx->gtFlags & GTF_ASG)
{
// If this is not the only argument, or it's a copyblk, or it already evaluates the expression to
// a tmp, then we need a temp in the late arg list.
if ((argCount > 1) || argx->OperIsCopyBlkOp()
#ifdef FEATURE_FIXED_OUT_ARGS
|| curArgTabEntry->isTmp // I protect this by "FEATURE_FIXED_OUT_ARGS" to preserve the property
// that we only have late non-register args when that feature is on.
#endif // FEATURE_FIXED_OUT_ARGS
)
{
curArgTabEntry->needTmp = true;
needsTemps = true;
}
// For all previous arguments, unless they are a simple constant
// we require that they be evaluated into temps
for (unsigned prevInx = 0; prevInx < curInx; prevInx++)
{
fgArgTabEntry* prevArgTabEntry = argTable[prevInx];
assert(prevArgTabEntry->argNum < curArgTabEntry->argNum);
if (!prevArgTabEntry->GetNode()->IsInvariant())
{
prevArgTabEntry->needTmp = true;
needsTemps = true;
}
}
}
bool treatLikeCall = ((argx->gtFlags & GTF_CALL) != 0);
#if FEATURE_FIXED_OUT_ARGS
// Like calls, if this argument has a tree that will do an inline throw,
// a call to a jit helper, then we need to treat it like a call (but only
// if there are/were any stack args).
// This means unnesting, sorting, etc. Technically this is overly
// conservative, but I want to avoid as much special-case debug-only code
// as possible, so leveraging the GTF_CALL flag is the easiest.
//
if (!treatLikeCall && (argx->gtFlags & GTF_EXCEPT) && (argCount > 1) && compiler->opts.compDbgCode &&
(compiler->fgWalkTreePre(&argx, Compiler::fgChkThrowCB) == Compiler::WALK_ABORT))
{
for (unsigned otherInx = 0; otherInx < argCount; otherInx++)
{
if (otherInx == curInx)
{
continue;
}
if (argTable[otherInx]->GetRegNum() == REG_STK)
{
treatLikeCall = true;
break;
}
}
}
#endif // FEATURE_FIXED_OUT_ARGS
/* If it contains a call (GTF_CALL) then itself and everything before the call
with a GLOB_EFFECT must eval to temp (this is because everything with SIDE_EFFECT
has to be kept in the right order since we will move the call to the first position)
For calls we don't have to be quite as conservative as we are with an assignment
since the call won't be modifying any non-address taken LclVars.
*/
if (treatLikeCall)
{
if (argCount > 1) // If this is not the only argument
{
curArgTabEntry->needTmp = true;
needsTemps = true;
}
else if (varTypeIsFloating(argx->TypeGet()) && (argx->OperGet() == GT_CALL))
{
// Spill all arguments that are floating point calls
curArgTabEntry->needTmp = true;
needsTemps = true;
}
// All previous arguments may need to be evaluated into temps
for (unsigned prevInx = 0; prevInx < curInx; prevInx++)
{
fgArgTabEntry* prevArgTabEntry = argTable[prevInx];
assert(prevArgTabEntry->argNum < curArgTabEntry->argNum);
// For all previous arguments, if they have any GTF_ALL_EFFECT
// we require that they be evaluated into a temp
if ((prevArgTabEntry->GetNode()->gtFlags & GTF_ALL_EFFECT) != 0)
{
prevArgTabEntry->needTmp = true;
needsTemps = true;
}
#if FEATURE_FIXED_OUT_ARGS
// Or, if they are stored into the FIXED_OUT_ARG area
// we require that they be moved to the gtCallLateArgs
// and replaced with a placeholder node
else if (prevArgTabEntry->GetRegNum() == REG_STK)
{
prevArgTabEntry->needPlace = true;
}
#if FEATURE_ARG_SPLIT
else if (prevArgTabEntry->IsSplit())
{
prevArgTabEntry->needPlace = true;
}
#endif // FEATURE_ARG_SPLIT
#endif
}
}
#if FEATURE_MULTIREG_ARGS
// For RyuJIT backend we will expand a Multireg arg into a GT_FIELD_LIST
// with multiple indirections, so here we consider spilling it into a tmp LclVar.
//
CLANG_FORMAT_COMMENT_ANCHOR;
#ifdef TARGET_ARM
bool isMultiRegArg =
(curArgTabEntry->numRegs > 0) && (curArgTabEntry->numRegs + curArgTabEntry->GetStackSlotsNumber() > 1);
#else
bool isMultiRegArg = (curArgTabEntry->numRegs > 1);
#endif
if ((varTypeIsStruct(argx->TypeGet())) && (curArgTabEntry->needTmp == false))
{
if (isMultiRegArg && ((argx->gtFlags & GTF_PERSISTENT_SIDE_EFFECTS) != 0))
{
// Spill multireg struct arguments that have Assignments or Calls embedded in them
curArgTabEntry->needTmp = true;
needsTemps = true;
}
else
{
// We call gtPrepareCost to measure the cost of evaluating this tree
compiler->gtPrepareCost(argx);
if (isMultiRegArg && (argx->GetCostEx() > (6 * IND_COST_EX)))
{
// Spill multireg struct arguments that are expensive to evaluate twice
curArgTabEntry->needTmp = true;
needsTemps = true;
}
#if defined(FEATURE_SIMD) && defined(TARGET_ARM64)
else if (isMultiRegArg && varTypeIsSIMD(argx->TypeGet()))
{
// SIMD types do not need the optimization below due to their sizes
if (argx->OperIsSimdOrHWintrinsic() ||
(argx->OperIs(GT_OBJ) && argx->AsObj()->gtOp1->OperIs(GT_ADDR) &&
argx->AsObj()->gtOp1->AsOp()->gtOp1->OperIsSimdOrHWintrinsic()))
{
curArgTabEntry->needTmp = true;
needsTemps = true;
}
}
#endif
#ifndef TARGET_ARM
// TODO-Arm: This optimization is not implemented for ARM32
// so we skip this for ARM32 until it is ported to use RyuJIT backend
//
else if (argx->OperGet() == GT_OBJ)
{
GenTreeObj* argObj = argx->AsObj();
unsigned structSize = argObj->GetLayout()->GetSize();
switch (structSize)
{
case 3:
case 5:
case 6:
case 7:
// If we have a stack based LclVar we can perform a wider read of 4 or 8 bytes
//
if (argObj->AsObj()->gtOp1->IsLocalAddrExpr() == nullptr) // Is the source not a LclVar?
{
// If we don't have a LclVar we need to read exactly 3,5,6 or 7 bytes
// For now we use a a GT_CPBLK to copy the exact size into a GT_LCL_VAR temp.
//
curArgTabEntry->needTmp = true;
needsTemps = true;
}
break;
case 11:
case 13:
case 14:
case 15:
// Spill any GT_OBJ multireg structs that are difficult to extract
//
// When we have a GT_OBJ of a struct with the above sizes we would need
// to use 3 or 4 load instructions to load the exact size of this struct.
// Instead we spill the GT_OBJ into a new GT_LCL_VAR temp and this sequence
// will use a GT_CPBLK to copy the exact size into the GT_LCL_VAR temp.
// Then we can just load all 16 bytes of the GT_LCL_VAR temp when passing
// the argument.
//
curArgTabEntry->needTmp = true;
needsTemps = true;
break;
default:
break;
}
}
#endif // !TARGET_ARM
}
}
#endif // FEATURE_MULTIREG_ARGS
}
// We only care because we can't spill structs and qmarks involve a lot of spilling, but
// if we don't have qmarks, then it doesn't matter.
// So check for Qmark's globally once here, instead of inside the loop.
//
const bool hasStructRegArgWeCareAbout = (hasStructRegArg && compiler->compQmarkUsed);
#if FEATURE_FIXED_OUT_ARGS
// For Arm/x64 we only care because we can't reorder a register
// argument that uses GT_LCLHEAP. This is an optimization to
// save a check inside the below loop.
//
const bool hasStackArgsWeCareAbout = (hasStackArgs && compiler->compLocallocUsed);
#else
const bool hasStackArgsWeCareAbout = hasStackArgs;
#endif // FEATURE_FIXED_OUT_ARGS
// If we have any stack args we have to force the evaluation
// of any arguments passed in registers that might throw an exception
//
// Technically we only a required to handle the following two cases:
// a GT_IND with GTF_IND_RNGCHK (only on x86) or
// a GT_LCLHEAP node that allocates stuff on the stack
//
if (hasStackArgsWeCareAbout || hasStructRegArgWeCareAbout)
{
for (unsigned curInx = 0; curInx < argCount; curInx++)
{
fgArgTabEntry* curArgTabEntry = argTable[curInx];
assert(curArgTabEntry != nullptr);
GenTree* argx = curArgTabEntry->GetNode();
// Examine the register args that are currently not marked needTmp
//
if (!curArgTabEntry->needTmp && (curArgTabEntry->GetRegNum() != REG_STK))
{
if (hasStackArgsWeCareAbout)
{
#if !FEATURE_FIXED_OUT_ARGS
// On x86 we previously recorded a stack depth of zero when
// morphing the register arguments of any GT_IND with a GTF_IND_RNGCHK flag
// Thus we can not reorder the argument after any stack based argument
// (Note that GT_LCLHEAP sets the GTF_EXCEPT flag so we don't need to
// check for it explicitly.)
//
if (argx->gtFlags & GTF_EXCEPT)
{
curArgTabEntry->needTmp = true;
needsTemps = true;
continue;
}
#else
// For Arm/X64 we can't reorder a register argument that uses a GT_LCLHEAP
//
if (argx->gtFlags & GTF_EXCEPT)
{
assert(compiler->compLocallocUsed);
// Returns WALK_ABORT if a GT_LCLHEAP node is encountered in the argx tree
//
if (compiler->fgWalkTreePre(&argx, Compiler::fgChkLocAllocCB) == Compiler::WALK_ABORT)
{
curArgTabEntry->needTmp = true;
needsTemps = true;
continue;
}
}
#endif
}
if (hasStructRegArgWeCareAbout)
{
// Returns true if a GT_QMARK node is encountered in the argx tree
//
if (compiler->fgWalkTreePre(&argx, Compiler::fgChkQmarkCB) == Compiler::WALK_ABORT)
{
curArgTabEntry->needTmp = true;
needsTemps = true;
continue;
}
}
}
}
}
// When CFG is enabled and this is a delegate call or vtable call we must
// compute the call target before all late args. However this will
// effectively null-check 'this', which should happen only after all
// arguments are evaluated. Thus we must evaluate all args with side
// effects to a temp.
if (compiler->opts.IsCFGEnabled() && (callTree->IsVirtualVtable() || callTree->IsDelegateInvoke()))
{
// Always evaluate 'this' to temp.
argTable[0]->needTmp = true;
needsTemps = true;
for (unsigned curInx = 1; curInx < argCount; curInx++)
{
fgArgTabEntry* curArgTabEntry = argTable[curInx];
GenTree* arg = curArgTabEntry->GetNode();
if ((arg->gtFlags & GTF_ALL_EFFECT) != 0)
{
curArgTabEntry->needTmp = true;
needsTemps = true;
}
}
}
argsComplete = true;
}
void fgArgInfo::SortArgs()
{
assert(argsComplete == true);
#ifdef DEBUG
if (compiler->verbose)
{
printf("\nSorting the arguments:\n");
}
#endif
/* Shuffle the arguments around before we build the gtCallLateArgs list.
The idea is to move all "simple" arguments like constants and local vars
to the end of the table, and move the complex arguments towards the beginning
of the table. This will help prevent registers from being spilled by
allowing us to evaluate the more complex arguments before the simpler arguments.
The argTable ends up looking like:
+------------------------------------+ <--- argTable[argCount - 1]
| constants |
+------------------------------------+
| local var / local field |
+------------------------------------+
| remaining arguments sorted by cost |
+------------------------------------+
| temps (argTable[].needTmp = true) |
+------------------------------------+
| args with calls (GTF_CALL) |
+------------------------------------+ <--- argTable[0]
*/
/* Set the beginning and end for the new argument table */
unsigned curInx;
int regCount = 0;
unsigned begTab = 0;
unsigned endTab = argCount - 1;
unsigned argsRemaining = argCount;
// First take care of arguments that are constants.
// [We use a backward iterator pattern]
//
curInx = argCount;
do
{
curInx--;
fgArgTabEntry* curArgTabEntry = argTable[curInx];
if (curArgTabEntry->GetRegNum() != REG_STK)
{
regCount++;
}
assert(curArgTabEntry->lateUse == nullptr);
// Skip any already processed args
//
if (!curArgTabEntry->processed)
{
GenTree* argx = curArgTabEntry->GetNode();
// put constants at the end of the table
//
if (argx->gtOper == GT_CNS_INT)
{
noway_assert(curInx <= endTab);
curArgTabEntry->processed = true;
// place curArgTabEntry at the endTab position by performing a swap
//
if (curInx != endTab)
{
argTable[curInx] = argTable[endTab];
argTable[endTab] = curArgTabEntry;
}
endTab--;
argsRemaining--;
}
}
} while (curInx > 0);
if (argsRemaining > 0)
{
// Next take care of arguments that are calls.
// [We use a forward iterator pattern]
//
for (curInx = begTab; curInx <= endTab; curInx++)
{
fgArgTabEntry* curArgTabEntry = argTable[curInx];
// Skip any already processed args
//
if (!curArgTabEntry->processed)
{
GenTree* argx = curArgTabEntry->GetNode();
// put calls at the beginning of the table
//
if (argx->gtFlags & GTF_CALL)
{
curArgTabEntry->processed = true;
// place curArgTabEntry at the begTab position by performing a swap
//
if (curInx != begTab)
{
argTable[curInx] = argTable[begTab];
argTable[begTab] = curArgTabEntry;
}
begTab++;
argsRemaining--;
}
}
}
}
if (argsRemaining > 0)
{
// Next take care arguments that are temps.
// These temps come before the arguments that are
// ordinary local vars or local fields
// since this will give them a better chance to become
// enregistered into their actual argument register.
// [We use a forward iterator pattern]
//
for (curInx = begTab; curInx <= endTab; curInx++)
{
fgArgTabEntry* curArgTabEntry = argTable[curInx];
// Skip any already processed args
//
if (!curArgTabEntry->processed)
{
if (curArgTabEntry->needTmp)
{
curArgTabEntry->processed = true;
// place curArgTabEntry at the begTab position by performing a swap
//
if (curInx != begTab)
{
argTable[curInx] = argTable[begTab];
argTable[begTab] = curArgTabEntry;
}
begTab++;
argsRemaining--;
}
}
}
}
if (argsRemaining > 0)
{
// Next take care of local var and local field arguments.
// These are moved towards the end of the argument evaluation.
// [We use a backward iterator pattern]
//
curInx = endTab + 1;
do
{
curInx--;
fgArgTabEntry* curArgTabEntry = argTable[curInx];
// Skip any already processed args
//
if (!curArgTabEntry->processed)
{
GenTree* argx = curArgTabEntry->GetNode();
if ((argx->gtOper == GT_LCL_VAR) || (argx->gtOper == GT_LCL_FLD))
{
noway_assert(curInx <= endTab);
curArgTabEntry->processed = true;
// place curArgTabEntry at the endTab position by performing a swap
//
if (curInx != endTab)
{
argTable[curInx] = argTable[endTab];
argTable[endTab] = curArgTabEntry;
}
endTab--;
argsRemaining--;
}
}
} while (curInx > begTab);
}
// Finally, take care of all the remaining arguments.
// Note that we fill in one arg at a time using a while loop.
bool costsPrepared = false; // Only prepare tree costs once, the first time through this loop
while (argsRemaining > 0)
{
/* Find the most expensive arg remaining and evaluate it next */
fgArgTabEntry* expensiveArgTabEntry = nullptr;
unsigned expensiveArg = UINT_MAX;
unsigned expensiveArgCost = 0;
// [We use a forward iterator pattern]
//
for (curInx = begTab; curInx <= endTab; curInx++)
{
fgArgTabEntry* curArgTabEntry = argTable[curInx];
// Skip any already processed args
//
if (!curArgTabEntry->processed)
{
GenTree* argx = curArgTabEntry->GetNode();
// We should have already handled these kinds of args
assert(argx->gtOper != GT_LCL_VAR);
assert(argx->gtOper != GT_LCL_FLD);
assert(argx->gtOper != GT_CNS_INT);
// This arg should either have no persistent side effects or be the last one in our table
// assert(((argx->gtFlags & GTF_PERSISTENT_SIDE_EFFECTS) == 0) || (curInx == (argCount-1)));
if (argsRemaining == 1)
{
// This is the last arg to place
expensiveArg = curInx;
expensiveArgTabEntry = curArgTabEntry;
assert(begTab == endTab);
break;
}
else
{
if (!costsPrepared)
{
/* We call gtPrepareCost to measure the cost of evaluating this tree */
compiler->gtPrepareCost(argx);
}
if (argx->GetCostEx() > expensiveArgCost)
{
// Remember this arg as the most expensive one that we have yet seen
expensiveArgCost = argx->GetCostEx();
expensiveArg = curInx;
expensiveArgTabEntry = curArgTabEntry;
}
}
}
}
noway_assert(expensiveArg != UINT_MAX);
// put the most expensive arg towards the beginning of the table
expensiveArgTabEntry->processed = true;
// place expensiveArgTabEntry at the begTab position by performing a swap
//
if (expensiveArg != begTab)
{
argTable[expensiveArg] = argTable[begTab];
argTable[begTab] = expensiveArgTabEntry;
}
begTab++;
argsRemaining--;
costsPrepared = true; // If we have more expensive arguments, don't re-evaluate the tree cost on the next loop
}
// The table should now be completely filled and thus begTab should now be adjacent to endTab
// and regArgsRemaining should be zero
assert(begTab == (endTab + 1));
assert(argsRemaining == 0);
argsSorted = true;
}
#ifdef DEBUG
void fgArgInfo::Dump(Compiler* compiler) const
{
for (unsigned curInx = 0; curInx < ArgCount(); curInx++)
{
fgArgTabEntry* curArgEntry = ArgTable()[curInx];
curArgEntry->Dump();
}
}
#endif
//------------------------------------------------------------------------------
// fgMakeTmpArgNode : This function creates a tmp var only if needed.
// We need this to be done in order to enforce ordering
// of the evaluation of arguments.
//
// Arguments:
// curArgTabEntry
//
// Return Value:
// the newly created temp var tree.
GenTree* Compiler::fgMakeTmpArgNode(fgArgTabEntry* curArgTabEntry)
{
unsigned tmpVarNum = curArgTabEntry->tmpNum;
LclVarDsc* varDsc = lvaGetDesc(tmpVarNum);
assert(varDsc->lvIsTemp);
var_types type = varDsc->TypeGet();
// Create a copy of the temp to go into the late argument list
GenTree* arg = gtNewLclvNode(tmpVarNum, type);
GenTree* addrNode = nullptr;
if (varTypeIsStruct(type))
{
#if defined(TARGET_AMD64) || defined(TARGET_ARM64) || defined(TARGET_ARM)
// Can this type be passed as a primitive type?
// If so, the following call will return the corresponding primitive type.
// Otherwise, it will return TYP_UNKNOWN and we will pass it as a struct type.
bool passedAsPrimitive = false;
if (curArgTabEntry->TryPassAsPrimitive())
{
CORINFO_CLASS_HANDLE clsHnd = varDsc->GetStructHnd();
var_types structBaseType =
getPrimitiveTypeForStruct(lvaLclExactSize(tmpVarNum), clsHnd, curArgTabEntry->IsVararg());
if (structBaseType != TYP_UNKNOWN)
{
passedAsPrimitive = true;
#if defined(UNIX_AMD64_ABI)
// TODO-Cleanup: This is inelegant, but eventually we'll track this in the fgArgTabEntry,
// and otherwise we'd have to either modify getPrimitiveTypeForStruct() to take
// a structDesc or call eeGetSystemVAmd64PassStructInRegisterDescriptor yet again.
//
if (genIsValidFloatReg(curArgTabEntry->GetRegNum()))
{
if (structBaseType == TYP_INT)
{
structBaseType = TYP_FLOAT;
}
else
{
assert(structBaseType == TYP_LONG);
structBaseType = TYP_DOUBLE;
}
}
#endif
type = structBaseType;
}
}
// If it is passed in registers, don't get the address of the var. Make it a
// field instead. It will be loaded in registers with putarg_reg tree in lower.
if (passedAsPrimitive)
{
arg->ChangeOper(GT_LCL_FLD);
arg->gtType = type;
lvaSetVarDoNotEnregister(tmpVarNum DEBUGARG(DoNotEnregisterReason::SwizzleArg));
}
else
{
var_types addrType = TYP_BYREF;
arg = gtNewOperNode(GT_ADDR, addrType, arg);
lvaSetVarAddrExposed(tmpVarNum DEBUGARG(AddressExposedReason::ESCAPE_ADDRESS));
addrNode = arg;
#if FEATURE_MULTIREG_ARGS
#ifdef TARGET_ARM64
assert(varTypeIsStruct(type));
if (lvaIsMultiregStruct(varDsc, curArgTabEntry->IsVararg()))
{
// We will create a GT_OBJ for the argument below.
// This will be passed by value in two registers.
assert(addrNode != nullptr);
// Create an Obj of the temp to use it as a call argument.
arg = gtNewObjNode(lvaGetStruct(tmpVarNum), arg);
}
#else
// Always create an Obj of the temp to use it as a call argument.
arg = gtNewObjNode(lvaGetStruct(tmpVarNum), arg);
#endif // !TARGET_ARM64
#endif // FEATURE_MULTIREG_ARGS
}
#else // not (TARGET_AMD64 or TARGET_ARM64 or TARGET_ARM)
// other targets, we pass the struct by value
assert(varTypeIsStruct(type));
addrNode = gtNewOperNode(GT_ADDR, TYP_BYREF, arg);
// Get a new Obj node temp to use it as a call argument.
// gtNewObjNode will set the GTF_EXCEPT flag if this is not a local stack object.
arg = gtNewObjNode(lvaGetStruct(tmpVarNum), addrNode);
#endif // not (TARGET_AMD64 or TARGET_ARM64 or TARGET_ARM)
} // (varTypeIsStruct(type))
if (addrNode != nullptr)
{
assert(addrNode->gtOper == GT_ADDR);
// the child of a GT_ADDR is required to have this flag set
addrNode->AsOp()->gtOp1->gtFlags |= GTF_DONT_CSE;
}
return arg;
}
//------------------------------------------------------------------------------
// EvalArgsToTemps : Create temp assignments and populate the LateArgs list.
void fgArgInfo::EvalArgsToTemps()
{
assert(argsSorted);
unsigned regArgInx = 0;
// Now go through the argument table and perform the necessary evaluation into temps
GenTreeCall::Use* tmpRegArgNext = nullptr;
for (unsigned curInx = 0; curInx < argCount; curInx++)
{
fgArgTabEntry* curArgTabEntry = argTable[curInx];
assert(curArgTabEntry->lateUse == nullptr);
GenTree* argx = curArgTabEntry->GetNode();
GenTree* setupArg = nullptr;
GenTree* defArg;
#if !FEATURE_FIXED_OUT_ARGS
// Only ever set for FEATURE_FIXED_OUT_ARGS
assert(curArgTabEntry->needPlace == false);
// On x86 and other archs that use push instructions to pass arguments:
// Only the register arguments need to be replaced with placeholder nodes.
// Stacked arguments are evaluated and pushed (or stored into the stack) in order.
//
if (curArgTabEntry->GetRegNum() == REG_STK)
continue;
#endif
if (curArgTabEntry->needTmp)
{
if (curArgTabEntry->isTmp)
{
// Create a copy of the temp to go into the late argument list
defArg = compiler->fgMakeTmpArgNode(curArgTabEntry);
// mark the original node as a late argument
argx->gtFlags |= GTF_LATE_ARG;
}
else
{
// Create a temp assignment for the argument
// Put the temp in the gtCallLateArgs list
CLANG_FORMAT_COMMENT_ANCHOR;
#ifdef DEBUG
if (compiler->verbose)
{
printf("Argument with 'side effect'...\n");
compiler->gtDispTree(argx);
}
#endif
#if defined(TARGET_AMD64) && !defined(UNIX_AMD64_ABI)
noway_assert(argx->gtType != TYP_STRUCT);
#endif
unsigned tmpVarNum = compiler->lvaGrabTemp(true DEBUGARG("argument with side effect"));
if (argx->gtOper == GT_MKREFANY)
{
// For GT_MKREFANY, typically the actual struct copying does
// not have any side-effects and can be delayed. So instead
// of using a temp for the whole struct, we can just use a temp
// for operand that that has a side-effect
GenTree* operand;
if ((argx->AsOp()->gtOp2->gtFlags & GTF_ALL_EFFECT) == 0)
{
operand = argx->AsOp()->gtOp1;
// In the early argument evaluation, place an assignment to the temp
// from the source operand of the mkrefany
setupArg = compiler->gtNewTempAssign(tmpVarNum, operand);
// Replace the operand for the mkrefany with the new temp.
argx->AsOp()->gtOp1 = compiler->gtNewLclvNode(tmpVarNum, operand->TypeGet());
}
else if ((argx->AsOp()->gtOp1->gtFlags & GTF_ALL_EFFECT) == 0)
{
operand = argx->AsOp()->gtOp2;
// In the early argument evaluation, place an assignment to the temp
// from the source operand of the mkrefany
setupArg = compiler->gtNewTempAssign(tmpVarNum, operand);
// Replace the operand for the mkrefany with the new temp.
argx->AsOp()->gtOp2 = compiler->gtNewLclvNode(tmpVarNum, operand->TypeGet());
}
}
if (setupArg != nullptr)
{
// Now keep the mkrefany for the late argument list
defArg = argx;
// Clear the side-effect flags because now both op1 and op2 have no side-effects
defArg->gtFlags &= ~GTF_ALL_EFFECT;
}
else
{
setupArg = compiler->gtNewTempAssign(tmpVarNum, argx);
LclVarDsc* varDsc = compiler->lvaGetDesc(tmpVarNum);
var_types lclVarType = genActualType(argx->gtType);
var_types scalarType = TYP_UNKNOWN;
if (setupArg->OperIsCopyBlkOp())
{
setupArg = compiler->fgMorphCopyBlock(setupArg);
#if defined(TARGET_ARMARCH) || defined(UNIX_AMD64_ABI)
if (lclVarType == TYP_STRUCT)
{
// This scalar LclVar widening step is only performed for ARM architectures.
//
CORINFO_CLASS_HANDLE clsHnd = compiler->lvaGetStruct(tmpVarNum);
unsigned structSize = varDsc->lvExactSize;
scalarType =
compiler->getPrimitiveTypeForStruct(structSize, clsHnd, curArgTabEntry->IsVararg());
}
#endif // TARGET_ARMARCH || defined (UNIX_AMD64_ABI)
}
// scalarType can be set to a wider type for ARM or unix amd64 architectures: (3 => 4) or (5,6,7 =>
// 8)
if ((scalarType != TYP_UNKNOWN) && (scalarType != lclVarType))
{
// Create a GT_LCL_FLD using the wider type to go to the late argument list
defArg = compiler->gtNewLclFldNode(tmpVarNum, scalarType, 0);
}
else
{
// Create a copy of the temp to go to the late argument list
defArg = compiler->gtNewLclvNode(tmpVarNum, lclVarType);
}
curArgTabEntry->isTmp = true;
curArgTabEntry->tmpNum = tmpVarNum;
#ifdef TARGET_ARM
// Previously we might have thought the local was promoted, and thus the 'COPYBLK'
// might have left holes in the used registers (see
// fgAddSkippedRegsInPromotedStructArg).
// Too bad we're not that smart for these intermediate temps...
if (isValidIntArgReg(curArgTabEntry->GetRegNum()) && (curArgTabEntry->numRegs > 1))
{
regNumber argReg = curArgTabEntry->GetRegNum();
regMaskTP allUsedRegs = genRegMask(curArgTabEntry->GetRegNum());
for (unsigned i = 1; i < curArgTabEntry->numRegs; i++)
{
argReg = genRegArgNext(argReg);
allUsedRegs |= genRegMask(argReg);
}
}
#endif // TARGET_ARM
}
/* mark the assignment as a late argument */
setupArg->gtFlags |= GTF_LATE_ARG;
#ifdef DEBUG
if (compiler->verbose)
{
printf("\n Evaluate to a temp:\n");
compiler->gtDispTree(setupArg);
}
#endif
}
}
else // curArgTabEntry->needTmp == false
{
// On x86 -
// Only register args are replaced with placeholder nodes
// and the stack based arguments are evaluated and pushed in order.
//
// On Arm/x64 - When needTmp is false and needPlace is false,
// the non-register arguments are evaluated and stored in order.
// When needPlace is true we have a nested call that comes after
// this argument so we have to replace it in the gtCallArgs list
// (the initial argument evaluation list) with a placeholder.
//
if ((curArgTabEntry->GetRegNum() == REG_STK) && (curArgTabEntry->needPlace == false))
{
continue;
}
/* No temp needed - move the whole node to the gtCallLateArgs list */
/* The argument is deferred and put in the late argument list */
defArg = argx;
// Create a placeholder node to put in its place in gtCallLateArgs.
// For a struct type we also need to record the class handle of the arg.
CORINFO_CLASS_HANDLE clsHnd = NO_CLASS_HANDLE;
#if defined(TARGET_AMD64) && !defined(UNIX_AMD64_ABI)
// All structs are either passed (and retyped) as integral types, OR they
// are passed by reference.
noway_assert(argx->gtType != TYP_STRUCT);
#else // !defined(TARGET_AMD64) || defined(UNIX_AMD64_ABI)
if (defArg->TypeGet() == TYP_STRUCT)
{
clsHnd = compiler->gtGetStructHandleIfPresent(defArg);
noway_assert(clsHnd != NO_CLASS_HANDLE);
}
#endif // !(defined(TARGET_AMD64) && !defined(UNIX_AMD64_ABI))
setupArg = compiler->gtNewArgPlaceHolderNode(defArg->gtType, clsHnd);
/* mark the placeholder node as a late argument */
setupArg->gtFlags |= GTF_LATE_ARG;
#ifdef DEBUG
if (compiler->verbose)
{
if (curArgTabEntry->GetRegNum() == REG_STK)
{
printf("Deferred stack argument :\n");
}
else
{
printf("Deferred argument ('%s'):\n", getRegName(curArgTabEntry->GetRegNum()));
}
compiler->gtDispTree(argx);
printf("Replaced with placeholder node:\n");
compiler->gtDispTree(setupArg);
}
#endif
}
if (setupArg != nullptr)
{
noway_assert(curArgTabEntry->use->GetNode() == argx);
curArgTabEntry->use->SetNode(setupArg);
}
/* deferred arg goes into the late argument list */
if (tmpRegArgNext == nullptr)
{
tmpRegArgNext = compiler->gtNewCallArgs(defArg);
callTree->AsCall()->gtCallLateArgs = tmpRegArgNext;
}
else
{
noway_assert(tmpRegArgNext->GetNode() != nullptr);
tmpRegArgNext->SetNext(compiler->gtNewCallArgs(defArg));
tmpRegArgNext = tmpRegArgNext->GetNext();
}
curArgTabEntry->lateUse = tmpRegArgNext;
curArgTabEntry->SetLateArgInx(regArgInx++);
if ((setupArg != nullptr) && setupArg->OperIs(GT_ARGPLACE) && (callTree->gtRetBufArg == curArgTabEntry->use))
{
callTree->SetLclRetBufArg(tmpRegArgNext);
}
}
#ifdef DEBUG
if (compiler->verbose)
{
printf("\nShuffled argument table: ");
for (unsigned curInx = 0; curInx < argCount; curInx++)
{
fgArgTabEntry* curArgTabEntry = argTable[curInx];
if (curArgTabEntry->GetRegNum() != REG_STK)
{
printf("%s ", getRegName(curArgTabEntry->GetRegNum()));
}
}
printf("\n");
}
#endif
}
//------------------------------------------------------------------------------
// fgMakeMultiUse : If the node is an unaliased local or constant clone it,
// otherwise insert a comma form temp
//
// Arguments:
// ppTree - a pointer to the child node we will be replacing with the comma expression that
// evaluates ppTree to a temp and returns the result
//
// Return Value:
// A fresh GT_LCL_VAR node referencing the temp which has not been used
//
// Notes:
// Caller must ensure that if the node is an unaliased local, the second use this
// creates will be evaluated before the local can be reassigned.
//
// Can be safely called in morph preorder, before GTF_GLOB_REF is reliable.
//
GenTree* Compiler::fgMakeMultiUse(GenTree** pOp)
{
GenTree* const tree = *pOp;
if (tree->IsInvariant())
{
return gtClone(tree);
}
else if (tree->IsLocal())
{
// Can't rely on GTF_GLOB_REF here.
//
if (!lvaGetDesc(tree->AsLclVarCommon())->IsAddressExposed())
{
return gtClone(tree);
}
}
return fgInsertCommaFormTemp(pOp);
}
//------------------------------------------------------------------------------
// fgInsertCommaFormTemp: Create a new temporary variable to hold the result of *ppTree,
// and replace *ppTree with comma(asg(newLcl, *ppTree), newLcl)
//
// Arguments:
// ppTree - a pointer to the child node we will be replacing with the comma expression that
// evaluates ppTree to a temp and returns the result
//
// structType - value type handle if the temp created is of TYP_STRUCT.
//
// Return Value:
// A fresh GT_LCL_VAR node referencing the temp which has not been used
//
GenTree* Compiler::fgInsertCommaFormTemp(GenTree** ppTree, CORINFO_CLASS_HANDLE structType /*= nullptr*/)
{
GenTree* subTree = *ppTree;
unsigned lclNum = lvaGrabTemp(true DEBUGARG("fgInsertCommaFormTemp is creating a new local variable"));
if (varTypeIsStruct(subTree))
{
assert(structType != nullptr);
lvaSetStruct(lclNum, structType, false);
}
// If subTree->TypeGet() == TYP_STRUCT, gtNewTempAssign() will create a GT_COPYBLK tree.
// The type of GT_COPYBLK is TYP_VOID. Therefore, we should use subTree->TypeGet() for
// setting type of lcl vars created.
GenTree* asg = gtNewTempAssign(lclNum, subTree);
GenTree* load = new (this, GT_LCL_VAR) GenTreeLclVar(GT_LCL_VAR, subTree->TypeGet(), lclNum);
GenTree* comma = gtNewOperNode(GT_COMMA, subTree->TypeGet(), asg, load);
*ppTree = comma;
return new (this, GT_LCL_VAR) GenTreeLclVar(GT_LCL_VAR, subTree->TypeGet(), lclNum);
}
//------------------------------------------------------------------------
// fgInitArgInfo: Construct the fgArgInfo for the call with the fgArgEntry for each arg
//
// Arguments:
// callNode - the call for which we are generating the fgArgInfo
//
// Return Value:
// None
//
// Notes:
// This method is idempotent in that it checks whether the fgArgInfo has already been
// constructed, and just returns.
// This method only computes the arg table and arg entries for the call (the fgArgInfo),
// and makes no modification of the args themselves.
//
// The IR for the call args can change for calls with non-standard arguments: some non-standard
// arguments add new call argument IR nodes.
//
void Compiler::fgInitArgInfo(GenTreeCall* call)
{
GenTreeCall::Use* args;
GenTree* argx;
unsigned argIndex = 0;
unsigned intArgRegNum = 0;
unsigned fltArgRegNum = 0;
DEBUG_ARG_SLOTS_ONLY(unsigned argSlots = 0;)
bool callHasRetBuffArg = call->HasRetBufArg();
bool callIsVararg = call->IsVarargs();
#ifdef TARGET_ARM
regMaskTP argSkippedRegMask = RBM_NONE;
regMaskTP fltArgSkippedRegMask = RBM_NONE;
#endif // TARGET_ARM
#if defined(TARGET_X86)
unsigned maxRegArgs = MAX_REG_ARG; // X86: non-const, must be calculated
#else
const unsigned maxRegArgs = MAX_REG_ARG; // other arch: fixed constant number
#endif
if (call->fgArgInfo != nullptr)
{
// We've already initialized and set the fgArgInfo.
return;
}
JITDUMP("Initializing arg info for %d.%s:\n", call->gtTreeID, GenTree::OpName(call->gtOper));
// At this point, we should never have gtCallLateArgs, as this needs to be done before those are determined.
assert(call->gtCallLateArgs == nullptr);
if (TargetOS::IsUnix && callIsVararg)
{
// Currently native varargs is not implemented on non windows targets.
//
// Note that some targets like Arm64 Unix should not need much work as
// the ABI is the same. While other targets may only need small changes
// such as amd64 Unix, which just expects RAX to pass numFPArguments.
NYI("Morphing Vararg call not yet implemented on non Windows targets.");
}
// Data structure for keeping track of non-standard args. Non-standard args are those that are not passed
// following the normal calling convention or in the normal argument registers. We either mark existing
// arguments as non-standard (such as the x8 return buffer register on ARM64), or we manually insert the
// non-standard arguments into the argument list, below.
class NonStandardArgs
{
struct NonStandardArg
{
GenTree* node; // The tree node representing this non-standard argument.
// Note that this must be updated if the tree node changes due to morphing!
regNumber reg; // The register to be assigned to this non-standard argument.
NonStandardArgKind kind; // The kind of the non-standard arg
};
ArrayStack<NonStandardArg> args;
public:
NonStandardArgs(CompAllocator alloc) : args(alloc, 3) // We will have at most 3 non-standard arguments
{
}
//-----------------------------------------------------------------------------
// Add: add a non-standard argument to the table of non-standard arguments
//
// Arguments:
// node - a GenTree node that has a non-standard argument.
// reg - the register to assign to this node.
//
// Return Value:
// None.
//
void Add(GenTree* node, regNumber reg, NonStandardArgKind kind)
{
NonStandardArg nsa = {node, reg, kind};
args.Push(nsa);
}
//-----------------------------------------------------------------------------
// Find: Look for a GenTree* in the set of non-standard args.
//
// Arguments:
// node - a GenTree node to look for
//
// Return Value:
// The index of the non-standard argument (a non-negative, unique, stable number).
// If the node is not a non-standard argument, return -1.
//
int Find(GenTree* node)
{
for (int i = 0; i < args.Height(); i++)
{
if (node == args.Top(i).node)
{
return i;
}
}
return -1;
}
//-----------------------------------------------------------------------------
// Find: Look for a GenTree node in the non-standard arguments set. If found,
// set the register to use for the node.
//
// Arguments:
// node - a GenTree node to look for
// pReg - an OUT argument. *pReg is set to the non-standard register to use if
// 'node' is found in the non-standard argument set.
// pKind - an OUT argument. *pKind is set to the kind of the non-standard arg.
//
// Return Value:
// 'true' if 'node' is a non-standard argument. In this case, *pReg and *pKing are set.
// 'false' otherwise (in this case, *pReg and *pKind are unmodified).
//
bool Find(GenTree* node, regNumber* pReg, NonStandardArgKind* pKind)
{
for (int i = 0; i < args.Height(); i++)
{
NonStandardArg& nsa = args.TopRef(i);
if (node == nsa.node)
{
*pReg = nsa.reg;
*pKind = nsa.kind;
return true;
}
}
return false;
}
//-----------------------------------------------------------------------------
// Replace: Replace the non-standard argument node at a given index. This is done when
// the original node was replaced via morphing, but we need to continue to assign a
// particular non-standard arg to it.
//
// Arguments:
// index - the index of the non-standard arg. It must exist.
// node - the new GenTree node.
//
// Return Value:
// None.
//
void Replace(int index, GenTree* node)
{
args.TopRef(index).node = node;
}
} nonStandardArgs(getAllocator(CMK_ArrayStack));
// Count of args. On first morph, this is counted before we've filled in the arg table.
// On remorph, we grab it from the arg table.
unsigned numArgs = 0;
// First we need to count the args
if (call->gtCallThisArg != nullptr)
{
numArgs++;
}
for (GenTreeCall::Use& use : call->Args())
{
numArgs++;
}
// Insert or mark non-standard args. These are either outside the normal calling convention, or
// arguments registers that don't follow the normal progression of argument registers in the calling
// convention (such as for the ARM64 fixed return buffer argument x8).
//
// *********** NOTE *************
// The logic here must remain in sync with GetNonStandardAddedArgCount(), which is used to map arguments
// in the implementation of fast tail call.
// *********** END NOTE *********
CLANG_FORMAT_COMMENT_ANCHOR;
#if defined(TARGET_X86) || defined(TARGET_ARM)
// The x86 and arm32 CORINFO_HELP_INIT_PINVOKE_FRAME helpers has a custom calling convention.
// Set the argument registers correctly here.
if (call->IsHelperCall(this, CORINFO_HELP_INIT_PINVOKE_FRAME))
{
GenTreeCall::Use* args = call->gtCallArgs;
GenTree* arg1 = args->GetNode();
assert(arg1 != nullptr);
nonStandardArgs.Add(arg1, REG_PINVOKE_FRAME, NonStandardArgKind::PInvokeFrame);
}
#endif // defined(TARGET_X86) || defined(TARGET_ARM)
#if defined(TARGET_ARM)
// A non-standard calling convention using wrapper delegate invoke is used on ARM, only, for wrapper
// delegates. It is used for VSD delegate calls where the VSD custom calling convention ABI requires passing
// R4, a callee-saved register, with a special value. Since R4 is a callee-saved register, its value needs
// to be preserved. Thus, the VM uses a wrapper delegate IL stub, which preserves R4 and also sets up R4
// correctly for the VSD call. The VM is simply reusing an existing mechanism (wrapper delegate IL stub)
// to achieve its goal for delegate VSD call. See COMDelegate::NeedsWrapperDelegate() in the VM for details.
else if (call->gtCallMoreFlags & GTF_CALL_M_WRAPPER_DELEGATE_INV)
{
GenTree* arg = call->gtCallThisArg->GetNode();
if (arg->OperIsLocal())
{
arg = gtClone(arg, true);
}
else
{
GenTree* tmp = fgInsertCommaFormTemp(&arg);
call->gtCallThisArg->SetNode(arg);
call->gtFlags |= GTF_ASG;
arg = tmp;
}
noway_assert(arg != nullptr);
GenTree* newArg = new (this, GT_ADDR)
GenTreeAddrMode(TYP_BYREF, arg, nullptr, 0, eeGetEEInfo()->offsetOfWrapperDelegateIndirectCell);
// Append newArg as the last arg
GenTreeCall::Use** insertionPoint = &call->gtCallArgs;
for (; *insertionPoint != nullptr; insertionPoint = &((*insertionPoint)->NextRef()))
{
}
*insertionPoint = gtNewCallArgs(newArg);
numArgs++;
nonStandardArgs.Add(newArg, virtualStubParamInfo->GetReg(), NonStandardArgKind::WrapperDelegateCell);
}
#endif // defined(TARGET_ARM)
#if defined(TARGET_X86)
// The x86 shift helpers have custom calling conventions and expect the lo part of the long to be in EAX and the
// hi part to be in EDX. This sets the argument registers up correctly.
else if (call->IsHelperCall(this, CORINFO_HELP_LLSH) || call->IsHelperCall(this, CORINFO_HELP_LRSH) ||
call->IsHelperCall(this, CORINFO_HELP_LRSZ))
{
GenTreeCall::Use* args = call->gtCallArgs;
GenTree* arg1 = args->GetNode();
assert(arg1 != nullptr);
nonStandardArgs.Add(arg1, REG_LNGARG_LO, NonStandardArgKind::ShiftLow);
args = args->GetNext();
GenTree* arg2 = args->GetNode();
assert(arg2 != nullptr);
nonStandardArgs.Add(arg2, REG_LNGARG_HI, NonStandardArgKind::ShiftHigh);
}
#else // !TARGET_X86
// TODO-X86-CQ: Currently RyuJIT/x86 passes args on the stack, so this is not needed.
// If/when we change that, the following code needs to be changed to correctly support the (TBD) managed calling
// convention for x86/SSE.
// If we have a Fixed Return Buffer argument register then we setup a non-standard argument for it.
//
// We don't use the fixed return buffer argument if we have the special unmanaged instance call convention.
// That convention doesn't use the fixed return buffer register.
//
CLANG_FORMAT_COMMENT_ANCHOR;
if (call->HasFixedRetBufArg())
{
args = call->gtCallArgs;
assert(args != nullptr);
argx = call->gtCallArgs->GetNode();
// We don't increment numArgs here, since we already counted this argument above.
nonStandardArgs.Add(argx, theFixedRetBuffReg(), NonStandardArgKind::FixedRetBuffer);
}
// We are allowed to have a Fixed Return Buffer argument combined
// with any of the remaining non-standard arguments
//
CLANG_FORMAT_COMMENT_ANCHOR;
if (call->IsVirtualStub())
{
if (!call->IsTailCallViaJitHelper())
{
GenTree* stubAddrArg = fgGetStubAddrArg(call);
// And push the stub address onto the list of arguments
call->gtCallArgs = gtPrependNewCallArg(stubAddrArg, call->gtCallArgs);
numArgs++;
nonStandardArgs.Add(stubAddrArg, stubAddrArg->GetRegNum(), NonStandardArgKind::VirtualStubCell);
}
else
{
// If it is a VSD call getting dispatched via tail call helper,
// fgMorphTailCallViaJitHelper() would materialize stub addr as an additional
// parameter added to the original arg list and hence no need to
// add as a non-standard arg.
}
}
else
#endif // !TARGET_X86
if (call->gtCallType == CT_INDIRECT && (call->gtCallCookie != nullptr))
{
assert(!call->IsUnmanaged());
GenTree* arg = call->gtCallCookie;
noway_assert(arg != nullptr);
call->gtCallCookie = nullptr;
// All architectures pass the cookie in a register.
call->gtCallArgs = gtPrependNewCallArg(arg, call->gtCallArgs);
nonStandardArgs.Add(arg, REG_PINVOKE_COOKIE_PARAM, NonStandardArgKind::PInvokeCookie);
numArgs++;
// put destination into R10/EAX
arg = gtClone(call->gtCallAddr, true);
call->gtCallArgs = gtPrependNewCallArg(arg, call->gtCallArgs);
numArgs++;
nonStandardArgs.Add(arg, REG_PINVOKE_TARGET_PARAM, NonStandardArgKind::PInvokeTarget);
// finally change this call to a helper call
call->gtCallType = CT_HELPER;
call->gtCallMethHnd = eeFindHelper(CORINFO_HELP_PINVOKE_CALLI);
}
#if defined(FEATURE_READYTORUN)
// For arm/arm64, we dispatch code same as VSD using virtualStubParamInfo->GetReg()
// for indirection cell address, which ZapIndirectHelperThunk expects.
// For x64/x86 we use return address to get the indirection cell by disassembling the call site.
// That is not possible for fast tailcalls, so we only need this logic for fast tailcalls on xarch.
// Note that we call this before we know if something will be a fast tailcall or not.
// That's ok; after making something a tailcall, we will invalidate this information
// and reconstruct it if necessary. The tailcalling decision does not change since
// this is a non-standard arg in a register.
bool needsIndirectionCell = call->IsR2RRelativeIndir() && !call->IsDelegateInvoke();
#if defined(TARGET_XARCH)
needsIndirectionCell &= call->IsFastTailCall();
#endif
if (needsIndirectionCell)
{
assert(call->gtEntryPoint.addr != nullptr);
size_t addrValue = (size_t)call->gtEntryPoint.addr;
GenTree* indirectCellAddress = gtNewIconHandleNode(addrValue, GTF_ICON_FTN_ADDR);
#ifdef DEBUG
indirectCellAddress->AsIntCon()->gtTargetHandle = (size_t)call->gtCallMethHnd;
#endif
indirectCellAddress->SetRegNum(REG_R2R_INDIRECT_PARAM);
#ifdef TARGET_ARM
// Issue #xxxx : Don't attempt to CSE this constant on ARM32
//
// This constant has specific register requirements, and LSRA doesn't currently correctly
// handle them when the value is in a CSE'd local.
indirectCellAddress->SetDoNotCSE();
#endif // TARGET_ARM
// Push the stub address onto the list of arguments.
call->gtCallArgs = gtPrependNewCallArg(indirectCellAddress, call->gtCallArgs);
numArgs++;
nonStandardArgs.Add(indirectCellAddress, indirectCellAddress->GetRegNum(),
NonStandardArgKind::R2RIndirectionCell);
}
#endif
if ((REG_VALIDATE_INDIRECT_CALL_ADDR != REG_ARG_0) && call->IsHelperCall(this, CORINFO_HELP_VALIDATE_INDIRECT_CALL))
{
assert(call->gtCallArgs != nullptr);
GenTreeCall::Use* args = call->gtCallArgs;
GenTree* tar = args->GetNode();
nonStandardArgs.Add(tar, REG_VALIDATE_INDIRECT_CALL_ADDR, NonStandardArgKind::ValidateIndirectCallTarget);
}
// Allocate the fgArgInfo for the call node;
//
call->fgArgInfo = new (this, CMK_Unknown) fgArgInfo(this, call, numArgs);
// Add the 'this' argument value, if present.
if (call->gtCallThisArg != nullptr)
{
argx = call->gtCallThisArg->GetNode();
assert(argIndex == 0);
assert(call->gtCallType == CT_USER_FUNC || call->gtCallType == CT_INDIRECT);
assert(varTypeIsGC(argx) || (argx->gtType == TYP_I_IMPL));
const regNumber regNum = genMapIntRegArgNumToRegNum(intArgRegNum);
const unsigned numRegs = 1;
const unsigned byteSize = TARGET_POINTER_SIZE;
const unsigned byteAlignment = TARGET_POINTER_SIZE;
const bool isStruct = false;
const bool isFloatHfa = false;
// This is a register argument - put it in the table.
call->fgArgInfo->AddRegArg(argIndex, argx, call->gtCallThisArg, regNum, numRegs, byteSize, byteAlignment,
isStruct, isFloatHfa,
callIsVararg UNIX_AMD64_ABI_ONLY_ARG(REG_STK) UNIX_AMD64_ABI_ONLY_ARG(0)
UNIX_AMD64_ABI_ONLY_ARG(0) UNIX_AMD64_ABI_ONLY_ARG(nullptr));
intArgRegNum++;
#ifdef WINDOWS_AMD64_ABI
// Whenever we pass an integer register argument
// we skip the corresponding floating point register argument
fltArgRegNum++;
#endif // WINDOWS_AMD64_ABI
argIndex++;
DEBUG_ARG_SLOTS_ONLY(argSlots++;)
}
#ifdef TARGET_X86
// Compute the maximum number of arguments that can be passed in registers.
// For X86 we handle the varargs and unmanaged calling conventions
#ifndef UNIX_X86_ABI
if (call->gtFlags & GTF_CALL_POP_ARGS)
{
noway_assert(intArgRegNum < MAX_REG_ARG);
// No more register arguments for varargs (CALL_POP_ARGS)
maxRegArgs = intArgRegNum;
// Add in the ret buff arg
if (callHasRetBuffArg)
maxRegArgs++;
}
#endif // UNIX_X86_ABI
if (call->IsUnmanaged())
{
noway_assert(intArgRegNum == 0);
if (call->gtCallMoreFlags & GTF_CALL_M_UNMGD_THISCALL)
{
noway_assert(call->gtCallArgs->GetNode()->TypeGet() == TYP_I_IMPL ||
call->gtCallArgs->GetNode()->TypeGet() == TYP_BYREF ||
call->gtCallArgs->GetNode()->gtOper ==
GT_NOP); // the arg was already morphed to a register (fgMorph called twice)
maxRegArgs = 1;
}
else
{
maxRegArgs = 0;
}
#ifdef UNIX_X86_ABI
// Add in the ret buff arg
if (callHasRetBuffArg &&
call->unmgdCallConv != CorInfoCallConvExtension::C && // C and Stdcall calling conventions do not
call->unmgdCallConv != CorInfoCallConvExtension::Stdcall) // use registers to pass arguments.
maxRegArgs++;
#endif
}
#endif // TARGET_X86
/* Morph the user arguments */
CLANG_FORMAT_COMMENT_ANCHOR;
#if defined(TARGET_ARM)
// The ARM ABI has a concept of back-filling of floating-point argument registers, according
// to the "Procedure Call Standard for the ARM Architecture" document, especially
// section 6.1.2.3 "Parameter passing". Back-filling is where floating-point argument N+1 can
// appear in a lower-numbered register than floating point argument N. That is, argument
// register allocation is not strictly increasing. To support this, we need to keep track of unused
// floating-point argument registers that we can back-fill. We only support 4-byte float and
// 8-byte double types, and one to four element HFAs composed of these types. With this, we will
// only back-fill single registers, since there is no way with these types to create
// an alignment hole greater than one register. However, there can be up to 3 back-fill slots
// available (with 16 FP argument registers). Consider this code:
//
// struct HFA { float x, y, z; }; // a three element HFA
// void bar(float a1, // passed in f0
// double a2, // passed in f2/f3; skip f1 for alignment
// HFA a3, // passed in f4/f5/f6
// double a4, // passed in f8/f9; skip f7 for alignment. NOTE: it doesn't fit in the f1 back-fill slot
// HFA a5, // passed in f10/f11/f12
// double a6, // passed in f14/f15; skip f13 for alignment. NOTE: it doesn't fit in the f1 or f7 back-fill
// // slots
// float a7, // passed in f1 (back-filled)
// float a8, // passed in f7 (back-filled)
// float a9, // passed in f13 (back-filled)
// float a10) // passed on the stack in [OutArg+0]
//
// Note that if we ever support FP types with larger alignment requirements, then there could
// be more than single register back-fills.
//
// Once we assign a floating-pointer register to the stack, they all must be on the stack.
// See "Procedure Call Standard for the ARM Architecture", section 6.1.2.3, "The back-filling
// continues only so long as no VFP CPRC has been allocated to a slot on the stack."
// We set anyFloatStackArgs to true when a floating-point argument has been assigned to the stack
// and prevent any additional floating-point arguments from going in registers.
bool anyFloatStackArgs = false;
#endif // TARGET_ARM
#ifdef UNIX_AMD64_ABI
SYSTEMV_AMD64_CORINFO_STRUCT_REG_PASSING_DESCRIPTOR structDesc;
#endif // UNIX_AMD64_ABI
#if defined(DEBUG)
// Check that we have valid information about call's argument types.
// For example:
// load byte; call(int) -> CALL(PUTARG_TYPE byte(IND byte));
// load int; call(byte) -> CALL(PUTARG_TYPE int (IND int));
// etc.
if (call->callSig != nullptr)
{
CORINFO_SIG_INFO* sig = call->callSig;
const unsigned sigArgsCount = sig->numArgs;
GenTreeCall::Use* nodeArgs = call->gtCallArgs;
// It could include many arguments not included in `sig->numArgs`, for example, `this`, runtime lookup, cookie
// etc.
unsigned nodeArgsCount = 0;
call->VisitOperands([&nodeArgsCount](GenTree* operand) -> GenTree::VisitResult {
nodeArgsCount++;
return GenTree::VisitResult::Continue;
});
if (call->gtCallThisArg != nullptr)
{
// Handle the most common argument not in the `sig->numArgs`.
// so the following check works on more methods.
nodeArgsCount--;
}
assert(nodeArgsCount >= sigArgsCount);
if ((nodeArgsCount == sigArgsCount) &&
((Target::g_tgtArgOrder == Target::ARG_ORDER_R2L) || (nodeArgsCount == 1)))
{
CORINFO_ARG_LIST_HANDLE sigArg = sig->args;
for (unsigned i = 0; i < sig->numArgs; ++i)
{
CORINFO_CLASS_HANDLE argClass;
const CorInfoType corType = strip(info.compCompHnd->getArgType(sig, sigArg, &argClass));
const var_types sigType = JITtype2varType(corType);
assert(nodeArgs != nullptr);
const GenTree* nodeArg = nodeArgs->GetNode();
assert(nodeArg != nullptr);
const var_types nodeType = nodeArg->TypeGet();
assert((nodeType == sigType) || varTypeIsStruct(sigType) ||
genTypeSize(nodeType) == genTypeSize(sigType));
sigArg = info.compCompHnd->getArgNext(sigArg);
nodeArgs = nodeArgs->GetNext();
}
assert(nodeArgs == nullptr);
}
}
#endif // DEBUG
for (args = call->gtCallArgs; args != nullptr; args = args->GetNext(), argIndex++)
{
argx = args->GetNode()->gtSkipPutArgType();
// Change the node to TYP_I_IMPL so we don't report GC info
// NOTE: We deferred this from the importer because of the inliner.
if (argx->IsLocalAddrExpr() != nullptr)
{
argx->gtType = TYP_I_IMPL;
}
// We should never have any ArgPlaceHolder nodes at this point.
assert(!argx->IsArgPlaceHolderNode());
// Setup any HFA information about 'argx'
bool isHfaArg = false;
var_types hfaType = TYP_UNDEF;
unsigned hfaSlots = 0;
bool passUsingFloatRegs;
unsigned argAlignBytes = TARGET_POINTER_SIZE;
unsigned size = 0;
unsigned byteSize = 0;
if (GlobalJitOptions::compFeatureHfa)
{
hfaType = GetHfaType(argx);
isHfaArg = varTypeIsValidHfaType(hfaType);
#if defined(TARGET_ARM64)
if (TargetOS::IsWindows)
{
// Make sure for vararg methods isHfaArg is not true.
isHfaArg = callIsVararg ? false : isHfaArg;
}
#endif // defined(TARGET_ARM64)
if (isHfaArg)
{
isHfaArg = true;
hfaSlots = GetHfaCount(argx);
// If we have a HFA struct it's possible we transition from a method that originally
// only had integer types to now start having FP types. We have to communicate this
// through this flag since LSRA later on will use this flag to determine whether
// or not to track the FP register set.
//
compFloatingPointUsed = true;
}
}
const bool isFloatHfa = (hfaType == TYP_FLOAT);
#ifdef TARGET_ARM
passUsingFloatRegs = !callIsVararg && (isHfaArg || varTypeUsesFloatReg(argx)) && !opts.compUseSoftFP;
bool passUsingIntRegs = passUsingFloatRegs ? false : (intArgRegNum < MAX_REG_ARG);
// We don't use the "size" return value from InferOpSizeAlign().
codeGen->InferOpSizeAlign(argx, &argAlignBytes);
argAlignBytes = roundUp(argAlignBytes, TARGET_POINTER_SIZE);
if (argAlignBytes == 2 * TARGET_POINTER_SIZE)
{
if (passUsingFloatRegs)
{
if (fltArgRegNum % 2 == 1)
{
fltArgSkippedRegMask |= genMapArgNumToRegMask(fltArgRegNum, TYP_FLOAT);
fltArgRegNum++;
}
}
else if (passUsingIntRegs)
{
if (intArgRegNum % 2 == 1)
{
argSkippedRegMask |= genMapArgNumToRegMask(intArgRegNum, TYP_I_IMPL);
intArgRegNum++;
}
}
#if defined(DEBUG)
if (argSlots % 2 == 1)
{
argSlots++;
}
#endif
}
#elif defined(TARGET_ARM64)
assert(!callIsVararg || !isHfaArg);
passUsingFloatRegs = !callIsVararg && (isHfaArg || varTypeUsesFloatReg(argx));
#elif defined(TARGET_AMD64)
passUsingFloatRegs = varTypeIsFloating(argx);
#elif defined(TARGET_X86)
passUsingFloatRegs = false;
#else
#error Unsupported or unset target architecture
#endif // TARGET*
bool isBackFilled = false;
unsigned nextFltArgRegNum = fltArgRegNum; // This is the next floating-point argument register number to use
var_types structBaseType = TYP_STRUCT;
unsigned structSize = 0;
bool passStructByRef = false;
bool isStructArg;
GenTree* actualArg = argx->gtEffectiveVal(true /* Commas only */);
//
// Figure out the size of the argument. This is either in number of registers, or number of
// TARGET_POINTER_SIZE stack slots, or the sum of these if the argument is split between the registers and
// the stack.
//
isStructArg = varTypeIsStruct(argx);
CORINFO_CLASS_HANDLE objClass = NO_CLASS_HANDLE;
if (isStructArg)
{
objClass = gtGetStructHandle(argx);
if (argx->TypeGet() == TYP_STRUCT)
{
// For TYP_STRUCT arguments we must have an OBJ, LCL_VAR or MKREFANY
switch (actualArg->OperGet())
{
case GT_OBJ:
structSize = actualArg->AsObj()->GetLayout()->GetSize();
assert(structSize == info.compCompHnd->getClassSize(objClass));
break;
case GT_LCL_VAR:
structSize = lvaGetDesc(actualArg->AsLclVarCommon())->lvExactSize;
break;
case GT_MKREFANY:
structSize = info.compCompHnd->getClassSize(objClass);
break;
default:
BADCODE("illegal argument tree in fgInitArgInfo");
break;
}
}
else
{
structSize = genTypeSize(argx);
assert(structSize == info.compCompHnd->getClassSize(objClass));
}
}
#if defined(TARGET_AMD64)
#ifdef UNIX_AMD64_ABI
if (!isStructArg)
{
size = 1; // On AMD64, all primitives fit in a single (64-bit) 'slot'
byteSize = genTypeSize(argx);
}
else
{
size = (unsigned)(roundUp(structSize, TARGET_POINTER_SIZE)) / TARGET_POINTER_SIZE;
byteSize = structSize;
eeGetSystemVAmd64PassStructInRegisterDescriptor(objClass, &structDesc);
}
#else // !UNIX_AMD64_ABI
size = 1; // On AMD64 Windows, all args fit in a single (64-bit) 'slot'
if (!isStructArg)
{
byteSize = genTypeSize(argx);
}
#endif // UNIX_AMD64_ABI
#elif defined(TARGET_ARM64)
if (isStructArg)
{
if (isHfaArg)
{
// HFA structs are passed by value in multiple registers.
// The "size" in registers may differ the size in pointer-sized units.
CORINFO_CLASS_HANDLE structHnd = gtGetStructHandle(argx);
size = GetHfaCount(structHnd);
byteSize = info.compCompHnd->getClassSize(structHnd);
}
else
{
// Structs are either passed in 1 or 2 (64-bit) slots.
// Structs that are the size of 2 pointers are passed by value in multiple registers,
// if sufficient registers are available.
// Structs that are larger than 2 pointers (except for HFAs) are passed by
// reference (to a copy)
size = (unsigned)(roundUp(structSize, TARGET_POINTER_SIZE)) / TARGET_POINTER_SIZE;
byteSize = structSize;
if (size > 2)
{
size = 1;
}
}
// Note that there are some additional rules for multireg structs.
// (i.e they cannot be split between registers and the stack)
}
else
{
size = 1; // Otherwise, all primitive types fit in a single (64-bit) 'slot'
byteSize = genTypeSize(argx);
}
#elif defined(TARGET_ARM) || defined(TARGET_X86)
if (isStructArg)
{
size = (unsigned)(roundUp(structSize, TARGET_POINTER_SIZE)) / TARGET_POINTER_SIZE;
byteSize = structSize;
}
else
{
// The typical case.
// Long/double type argument(s) will be modified as needed in Lowering.
size = genTypeStSz(argx->gtType);
byteSize = genTypeSize(argx);
}
#else
#error Unsupported or unset target architecture
#endif // TARGET_XXX
if (isStructArg)
{
assert(argx == args->GetNode());
assert(structSize != 0);
structPassingKind howToPassStruct;
structBaseType = getArgTypeForStruct(objClass, &howToPassStruct, callIsVararg, structSize);
passStructByRef = (howToPassStruct == SPK_ByReference);
if (howToPassStruct == SPK_ByReference)
{
byteSize = TARGET_POINTER_SIZE;
}
else
{
byteSize = structSize;
}
if (howToPassStruct == SPK_PrimitiveType)
{
#ifdef TARGET_ARM
// TODO-CQ: getArgTypeForStruct should *not* return TYP_DOUBLE for a double struct,
// or for a struct of two floats. This causes the struct to be address-taken.
if (structBaseType == TYP_DOUBLE)
{
size = 2;
}
else
#endif // TARGET_ARM
{
size = 1;
}
}
else if (passStructByRef)
{
size = 1;
}
}
const var_types argType = args->GetNode()->TypeGet();
if (args->GetNode()->OperIs(GT_PUTARG_TYPE))
{
byteSize = genTypeSize(argType);
}
// The 'size' value has now must have been set. (the original value of zero is an invalid value)
assert(size != 0);
assert(byteSize != 0);
if (compMacOsArm64Abi())
{
// Arm64 Apple has a special ABI for passing small size arguments on stack,
// bytes are aligned to 1-byte, shorts to 2-byte, int/float to 4-byte, etc.
// It means passing 8 1-byte arguments on stack can take as small as 8 bytes.
argAlignBytes = eeGetArgSizeAlignment(argType, isFloatHfa);
}
//
// Figure out if the argument will be passed in a register.
//
bool isRegArg = false;
NonStandardArgKind nonStandardArgKind = NonStandardArgKind::None;
regNumber nonStdRegNum = REG_NA;
if (isRegParamType(genActualType(argx->TypeGet()))
#ifdef UNIX_AMD64_ABI
&& (!isStructArg || structDesc.passedInRegisters)
#elif defined(TARGET_X86)
|| (isStructArg && isTrivialPointerSizedStruct(objClass))
#endif
)
{
#ifdef TARGET_ARM
if (passUsingFloatRegs)
{
// First, see if it can be back-filled
if (!anyFloatStackArgs && // Is it legal to back-fill? (We haven't put any FP args on the stack yet)
(fltArgSkippedRegMask != RBM_NONE) && // Is there an available back-fill slot?
(size == 1)) // The size to back-fill is one float register
{
// Back-fill the register.
isBackFilled = true;
regMaskTP backFillBitMask = genFindLowestBit(fltArgSkippedRegMask);
fltArgSkippedRegMask &=
~backFillBitMask; // Remove the back-filled register(s) from the skipped mask
nextFltArgRegNum = genMapFloatRegNumToRegArgNum(genRegNumFromMask(backFillBitMask));
assert(nextFltArgRegNum < MAX_FLOAT_REG_ARG);
}
// Does the entire float, double, or HFA fit in the FP arg registers?
// Check if the last register needed is still in the argument register range.
isRegArg = (nextFltArgRegNum + size - 1) < MAX_FLOAT_REG_ARG;
if (!isRegArg)
{
anyFloatStackArgs = true;
}
}
else
{
isRegArg = intArgRegNum < MAX_REG_ARG;
}
#elif defined(TARGET_ARM64)
if (passUsingFloatRegs)
{
// Check if the last register needed is still in the fp argument register range.
isRegArg = (nextFltArgRegNum + (size - 1)) < MAX_FLOAT_REG_ARG;
// Do we have a HFA arg that we wanted to pass in registers, but we ran out of FP registers?
if (isHfaArg && !isRegArg)
{
// recompute the 'size' so that it represent the number of stack slots rather than the number of
// registers
//
unsigned roundupSize = (unsigned)roundUp(structSize, TARGET_POINTER_SIZE);
size = roundupSize / TARGET_POINTER_SIZE;
// We also must update fltArgRegNum so that we no longer try to
// allocate any new floating point registers for args
// This prevents us from backfilling a subsequent arg into d7
//
fltArgRegNum = MAX_FLOAT_REG_ARG;
}
}
else
{
// Check if the last register needed is still in the int argument register range.
isRegArg = (intArgRegNum + (size - 1)) < maxRegArgs;
// Did we run out of registers when we had a 16-byte struct (size===2) ?
// (i.e we only have one register remaining but we needed two registers to pass this arg)
// This prevents us from backfilling a subsequent arg into x7
//
if (!isRegArg && (size > 1))
{
// Arm64 windows native varargs allows splitting a 16 byte struct between stack
// and the last general purpose register.
if (TargetOS::IsWindows && callIsVararg)
{
// Override the decision and force a split.
isRegArg = (intArgRegNum + (size - 1)) <= maxRegArgs;
}
else
{
// We also must update intArgRegNum so that we no longer try to
// allocate any new general purpose registers for args
//
intArgRegNum = maxRegArgs;
}
}
}
#else // not TARGET_ARM or TARGET_ARM64
#if defined(UNIX_AMD64_ABI)
// Here a struct can be passed in register following the classifications of its members and size.
// Now make sure there are actually enough registers to do so.
if (isStructArg)
{
unsigned int structFloatRegs = 0;
unsigned int structIntRegs = 0;
for (unsigned int i = 0; i < structDesc.eightByteCount; i++)
{
if (structDesc.IsIntegralSlot(i))
{
structIntRegs++;
}
else if (structDesc.IsSseSlot(i))
{
structFloatRegs++;
}
}
isRegArg = ((nextFltArgRegNum + structFloatRegs) <= MAX_FLOAT_REG_ARG) &&
((intArgRegNum + structIntRegs) <= MAX_REG_ARG);
}
else
{
if (passUsingFloatRegs)
{
isRegArg = nextFltArgRegNum < MAX_FLOAT_REG_ARG;
}
else
{
isRegArg = intArgRegNum < MAX_REG_ARG;
}
}
#else // !defined(UNIX_AMD64_ABI)
isRegArg = (intArgRegNum + (size - 1)) < maxRegArgs;
#endif // !defined(UNIX_AMD64_ABI)
#endif // TARGET_ARM
}
else
{
isRegArg = false;
}
// If there are nonstandard args (outside the calling convention) they were inserted above
// and noted them in a table so we can recognize them here and build their argInfo.
//
// They should not affect the placement of any other args or stack space required.
// Example: on AMD64 R10 and R11 are used for indirect VSD (generic interface) and cookie calls.
bool isNonStandard = nonStandardArgs.Find(argx, &nonStdRegNum, &nonStandardArgKind);
if (isNonStandard)
{
isRegArg = (nonStdRegNum != REG_STK);
}
else if (call->IsTailCallViaJitHelper())
{
// We have already (before calling fgMorphArgs()) appended the 4 special args
// required by the x86 tailcall helper. These args are required to go on the
// stack. Force them to the stack here.
assert(numArgs >= 4);
if (argIndex >= numArgs - 4)
{
isRegArg = false;
}
}
// Now we know if the argument goes in registers or not and how big it is.
CLANG_FORMAT_COMMENT_ANCHOR;
#ifdef TARGET_ARM
// If we ever allocate a floating point argument to the stack, then all
// subsequent HFA/float/double arguments go on the stack.
if (!isRegArg && passUsingFloatRegs)
{
for (; fltArgRegNum < MAX_FLOAT_REG_ARG; ++fltArgRegNum)
{
fltArgSkippedRegMask |= genMapArgNumToRegMask(fltArgRegNum, TYP_FLOAT);
}
}
// If we think we're going to split a struct between integer registers and the stack, check to
// see if we've already assigned a floating-point arg to the stack.
if (isRegArg && // We decided above to use a register for the argument
!passUsingFloatRegs && // We're using integer registers
(intArgRegNum + size > MAX_REG_ARG) && // We're going to split a struct type onto registers and stack
anyFloatStackArgs) // We've already used the stack for a floating-point argument
{
isRegArg = false; // Change our mind; don't pass this struct partially in registers
// Skip the rest of the integer argument registers
for (; intArgRegNum < MAX_REG_ARG; ++intArgRegNum)
{
argSkippedRegMask |= genMapArgNumToRegMask(intArgRegNum, TYP_I_IMPL);
}
}
#endif // TARGET_ARM
// Now create the fgArgTabEntry.
fgArgTabEntry* newArgEntry;
if (isRegArg)
{
regNumber nextRegNum = REG_STK;
#if defined(UNIX_AMD64_ABI)
regNumber nextOtherRegNum = REG_STK;
unsigned int structFloatRegs = 0;
unsigned int structIntRegs = 0;
#endif // defined(UNIX_AMD64_ABI)
if (isNonStandard)
{
nextRegNum = nonStdRegNum;
}
#if defined(UNIX_AMD64_ABI)
else if (isStructArg && structDesc.passedInRegisters)
{
// It is a struct passed in registers. Assign the next available register.
assert((structDesc.eightByteCount <= 2) && "Too many eightbytes.");
regNumber* nextRegNumPtrs[2] = {&nextRegNum, &nextOtherRegNum};
for (unsigned int i = 0; i < structDesc.eightByteCount; i++)
{
if (structDesc.IsIntegralSlot(i))
{
*nextRegNumPtrs[i] = genMapIntRegArgNumToRegNum(intArgRegNum + structIntRegs);
++structIntRegs;
}
else if (structDesc.IsSseSlot(i))
{
*nextRegNumPtrs[i] = genMapFloatRegArgNumToRegNum(nextFltArgRegNum + structFloatRegs);
++structFloatRegs;
}
}
}
#endif // defined(UNIX_AMD64_ABI)
else
{
// fill in or update the argInfo table
nextRegNum = passUsingFloatRegs ? genMapFloatRegArgNumToRegNum(nextFltArgRegNum)
: genMapIntRegArgNumToRegNum(intArgRegNum);
}
#ifdef TARGET_AMD64
#ifndef UNIX_AMD64_ABI
assert(size == 1);
#endif
#endif
// This is a register argument - put it in the table
newArgEntry =
call->fgArgInfo->AddRegArg(argIndex, argx, args, nextRegNum, size, byteSize, argAlignBytes, isStructArg,
isFloatHfa, callIsVararg UNIX_AMD64_ABI_ONLY_ARG(nextOtherRegNum)
UNIX_AMD64_ABI_ONLY_ARG(structIntRegs)
UNIX_AMD64_ABI_ONLY_ARG(structFloatRegs)
UNIX_AMD64_ABI_ONLY_ARG(&structDesc));
newArgEntry->SetIsBackFilled(isBackFilled);
// Set up the next intArgRegNum and fltArgRegNum values.
if (!isBackFilled)
{
#if defined(UNIX_AMD64_ABI)
if (isStructArg)
{
// For this case, we've already set the regNums in the argTabEntry
intArgRegNum += structIntRegs;
fltArgRegNum += structFloatRegs;
}
else
#endif // defined(UNIX_AMD64_ABI)
{
if (!isNonStandard)
{
#if FEATURE_ARG_SPLIT
// Check for a split (partially enregistered) struct
if (compFeatureArgSplit() && !passUsingFloatRegs && ((intArgRegNum + size) > MAX_REG_ARG))
{
// This indicates a partial enregistration of a struct type
assert((isStructArg) || argx->OperIs(GT_FIELD_LIST) || argx->OperIsCopyBlkOp() ||
(argx->gtOper == GT_COMMA && (argx->gtFlags & GTF_ASG)));
unsigned numRegsPartial = MAX_REG_ARG - intArgRegNum;
assert((unsigned char)numRegsPartial == numRegsPartial);
call->fgArgInfo->SplitArg(argIndex, numRegsPartial, size - numRegsPartial);
}
#endif // FEATURE_ARG_SPLIT
if (passUsingFloatRegs)
{
fltArgRegNum += size;
#ifdef WINDOWS_AMD64_ABI
// Whenever we pass an integer register argument
// we skip the corresponding floating point register argument
intArgRegNum = min(intArgRegNum + size, MAX_REG_ARG);
#endif // WINDOWS_AMD64_ABI
// No supported architecture supports partial structs using float registers.
assert(fltArgRegNum <= MAX_FLOAT_REG_ARG);
}
else
{
// Increment intArgRegNum by 'size' registers
intArgRegNum += size;
#ifdef WINDOWS_AMD64_ABI
fltArgRegNum = min(fltArgRegNum + size, MAX_FLOAT_REG_ARG);
#endif // WINDOWS_AMD64_ABI
}
}
}
}
}
else // We have an argument that is not passed in a register
{
// This is a stack argument - put it in the table
newArgEntry = call->fgArgInfo->AddStkArg(argIndex, argx, args, size, byteSize, argAlignBytes, isStructArg,
isFloatHfa, callIsVararg);
#ifdef UNIX_AMD64_ABI
// TODO-Amd64-Unix-CQ: This is temporary (see also in fgMorphArgs).
if (structDesc.passedInRegisters)
{
newArgEntry->structDesc.CopyFrom(structDesc);
}
#endif
}
newArgEntry->nonStandardArgKind = nonStandardArgKind;
if (GlobalJitOptions::compFeatureHfa)
{
if (isHfaArg)
{
newArgEntry->SetHfaType(hfaType, hfaSlots);
}
}
newArgEntry->SetMultiRegNums();
noway_assert(newArgEntry != nullptr);
if (newArgEntry->isStruct)
{
newArgEntry->passedByRef = passStructByRef;
newArgEntry->argType = (structBaseType == TYP_UNKNOWN) ? argx->TypeGet() : structBaseType;
}
else
{
newArgEntry->argType = argx->TypeGet();
}
DEBUG_ARG_SLOTS_ONLY(argSlots += size;)
} // end foreach argument loop
#ifdef DEBUG
if (verbose)
{
JITDUMP("ArgTable for %d.%s after fgInitArgInfo:\n", call->gtTreeID, GenTree::OpName(call->gtOper));
call->fgArgInfo->Dump(this);
JITDUMP("\n");
}
#endif
}
//------------------------------------------------------------------------
// fgMorphArgs: Walk and transform (morph) the arguments of a call
//
// Arguments:
// callNode - the call for which we are doing the argument morphing
//
// Return Value:
// Like most morph methods, this method returns the morphed node,
// though in this case there are currently no scenarios where the
// node itself is re-created.
//
// Notes:
// This calls fgInitArgInfo to create the 'fgArgInfo' for the call.
// If it has already been created, that method will simply return.
//
// This method changes the state of the call node. It uses the existence
// of gtCallLateArgs (the late arguments list) to determine if it has
// already done the first round of morphing.
//
// The first time it is called (i.e. during global morphing), this method
// computes the "late arguments". This is when it determines which arguments
// need to be evaluated to temps prior to the main argument setup, and which
// can be directly evaluated into the argument location. It also creates a
// second argument list (gtCallLateArgs) that does the final placement of the
// arguments, e.g. into registers or onto the stack.
//
// The "non-late arguments", aka the gtCallArgs, are doing the in-order
// evaluation of the arguments that might have side-effects, such as embedded
// assignments, calls or possible throws. In these cases, it and earlier
// arguments must be evaluated to temps.
//
// On targets with a fixed outgoing argument area (FEATURE_FIXED_OUT_ARGS),
// if we have any nested calls, we need to defer the copying of the argument
// into the fixed argument area until after the call. If the argument did not
// otherwise need to be computed into a temp, it is moved to gtCallLateArgs and
// replaced in the "early" arg list (gtCallArgs) with a placeholder node.
#ifdef _PREFAST_
#pragma warning(push)
#pragma warning(disable : 21000) // Suppress PREFast warning about overly large function
#endif
GenTreeCall* Compiler::fgMorphArgs(GenTreeCall* call)
{
GenTreeCall::Use* args;
GenTree* argx;
GenTreeFlags flagsSummary = GTF_EMPTY;
unsigned argIndex = 0;
DEBUG_ARG_SLOTS_ONLY(unsigned argSlots = 0;)
bool reMorphing = call->AreArgsComplete();
// Set up the fgArgInfo.
fgInitArgInfo(call);
JITDUMP("%sMorphing args for %d.%s:\n", (reMorphing) ? "Re" : "", call->gtTreeID, GenTree::OpName(call->gtOper));
// If we are remorphing, process the late arguments (which were determined by a previous caller).
if (reMorphing)
{
for (GenTreeCall::Use& use : call->LateArgs())
{
use.SetNode(fgMorphTree(use.GetNode()));
flagsSummary |= use.GetNode()->gtFlags;
}
assert(call->fgArgInfo != nullptr);
}
call->fgArgInfo->RemorphReset();
// First we morph the argument subtrees ('this' pointer, arguments, etc.).
// During the first call to fgMorphArgs we also record the
// information about late arguments we have in 'fgArgInfo'.
// This information is used later to contruct the gtCallLateArgs */
// Process the 'this' argument value, if present.
if (call->gtCallThisArg != nullptr)
{
argx = call->gtCallThisArg->GetNode();
fgArgTabEntry* thisArgEntry = call->fgArgInfo->GetArgEntry(0, reMorphing);
argx = fgMorphTree(argx);
call->gtCallThisArg->SetNode(argx);
// This is a register argument - possibly update it in the table.
call->fgArgInfo->UpdateRegArg(thisArgEntry, argx, reMorphing);
flagsSummary |= argx->gtFlags;
if (!reMorphing && call->IsExpandedEarly() && call->IsVirtualVtable())
{
if (!argx->OperIsLocal())
{
thisArgEntry->needTmp = true;
call->fgArgInfo->SetNeedsTemps();
}
}
assert(argIndex == 0);
argIndex++;
DEBUG_ARG_SLOTS_ONLY(argSlots++;)
}
// Note that this name is a bit of a misnomer - it indicates that there are struct args
// that occupy more than a single slot that are passed by value (not necessarily in regs).
bool hasMultiregStructArgs = false;
for (args = call->gtCallArgs; args != nullptr; args = args->GetNext(), argIndex++)
{
GenTree** parentArgx = &args->NodeRef();
fgArgTabEntry* argEntry = call->fgArgInfo->GetArgEntry(argIndex, reMorphing);
// Morph the arg node, and update the parent and argEntry pointers.
argx = *parentArgx;
argx = fgMorphTree(argx);
*parentArgx = argx;
assert(argx == args->GetNode());
DEBUG_ARG_SLOTS_ONLY(unsigned size = argEntry->getSize();)
CORINFO_CLASS_HANDLE copyBlkClass = NO_CLASS_HANDLE;
#if defined(DEBUG_ARG_SLOTS)
if (!compMacOsArm64Abi())
{
if (argEntry->GetByteAlignment() == 2 * TARGET_POINTER_SIZE)
{
if (argSlots % 2 == 1)
{
argSlots++;
}
}
}
#endif // DEBUG
if (argEntry->isNonStandard() && argEntry->isPassedInRegisters())
{
// We need to update the node field for this nonStandard arg here
// as it may have been changed by the call to fgMorphTree.
call->fgArgInfo->UpdateRegArg(argEntry, argx, reMorphing);
flagsSummary |= argx->gtFlags;
continue;
}
DEBUG_ARG_SLOTS_ASSERT(size != 0);
DEBUG_ARG_SLOTS_ONLY(argSlots += argEntry->getSlotCount();)
if (argx->IsLocalAddrExpr() != nullptr)
{
argx->gtType = TYP_I_IMPL;
}
// Get information about this argument.
var_types hfaType = argEntry->GetHfaType();
bool isHfaArg = (hfaType != TYP_UNDEF);
bool passUsingFloatRegs = argEntry->isPassedInFloatRegisters();
unsigned structSize = 0;
// Struct arguments may be morphed into a node that is not a struct type.
// In such case the fgArgTabEntry keeps track of whether the original node (before morphing)
// was a struct and the struct classification.
bool isStructArg = argEntry->isStruct;
GenTree* argObj = argx->gtEffectiveVal(true /*commaOnly*/);
if (isStructArg && varTypeIsStruct(argObj) && !argObj->OperIs(GT_ASG, GT_MKREFANY, GT_FIELD_LIST, GT_ARGPLACE))
{
CORINFO_CLASS_HANDLE objClass = gtGetStructHandle(argObj);
unsigned originalSize;
if (argObj->TypeGet() == TYP_STRUCT)
{
if (argObj->OperIs(GT_OBJ))
{
// Get the size off the OBJ node.
originalSize = argObj->AsObj()->GetLayout()->GetSize();
assert(originalSize == info.compCompHnd->getClassSize(objClass));
}
else
{
// We have a BADCODE assert for this in fgInitArgInfo.
assert(argObj->OperIs(GT_LCL_VAR));
originalSize = lvaGetDesc(argObj->AsLclVarCommon())->lvExactSize;
}
}
else
{
originalSize = genTypeSize(argx);
assert(originalSize == info.compCompHnd->getClassSize(objClass));
}
unsigned roundupSize = (unsigned)roundUp(originalSize, TARGET_POINTER_SIZE);
var_types structBaseType = argEntry->argType;
// First, handle the case where the argument is passed by reference.
if (argEntry->passedByRef)
{
DEBUG_ARG_SLOTS_ASSERT(size == 1);
copyBlkClass = objClass;
#ifdef UNIX_AMD64_ABI
assert(!"Structs are not passed by reference on x64/ux");
#endif // UNIX_AMD64_ABI
}
else // This is passed by value.
{
// Check to see if we can transform this into load of a primitive type.
// 'size' must be the number of pointer sized items
DEBUG_ARG_SLOTS_ASSERT(size == roundupSize / TARGET_POINTER_SIZE);
structSize = originalSize;
unsigned passingSize = originalSize;
// Check to see if we can transform this struct load (GT_OBJ) into a GT_IND of the appropriate size.
// When it can do this is platform-dependent:
// - In general, it can be done for power of 2 structs that fit in a single register.
// - For ARM and ARM64 it must also be a non-HFA struct, or have a single field.
// - This is irrelevant for X86, since structs are always passed by value on the stack.
GenTree* lclVar = fgIsIndirOfAddrOfLocal(argObj);
bool canTransform = false;
if (structBaseType != TYP_STRUCT)
{
if (isPow2(passingSize))
{
canTransform = (!argEntry->IsHfaArg() || (passingSize == genTypeSize(argEntry->GetHfaType())));
}
#if defined(TARGET_ARM64) || defined(UNIX_AMD64_ABI)
// For ARM64 or AMD64/UX we can pass non-power-of-2 structs in a register, but we can
// only transform in that case if the arg is a local.
// TODO-CQ: This transformation should be applicable in general, not just for the ARM64
// or UNIX_AMD64_ABI cases where they will be passed in registers.
else
{
canTransform = (lclVar != nullptr);
passingSize = genTypeSize(structBaseType);
}
#endif // TARGET_ARM64 || UNIX_AMD64_ABI
}
if (!canTransform)
{
#if defined(TARGET_AMD64)
#ifndef UNIX_AMD64_ABI
// On Windows structs are always copied and passed by reference (handled above) unless they are
// passed by value in a single register.
assert(size == 1);
copyBlkClass = objClass;
#else // UNIX_AMD64_ABI
// On Unix, structs are always passed by value.
// We only need a copy if we have one of the following:
// - The sizes don't match for a non-lclVar argument.
// - We have a known struct type (e.g. SIMD) that requires multiple registers.
// TODO-Amd64-Unix-Throughput: We don't need to keep the structDesc in the argEntry if it's not
// actually passed in registers.
if (argEntry->isPassedInRegisters())
{
if (argObj->OperIs(GT_OBJ))
{
if (passingSize != structSize)
{
copyBlkClass = objClass;
}
}
else if (lclVar == nullptr)
{
// This should only be the case of a value directly producing a known struct type.
assert(argObj->TypeGet() != TYP_STRUCT);
if (argEntry->numRegs > 1)
{
copyBlkClass = objClass;
}
}
}
#endif // UNIX_AMD64_ABI
#elif defined(TARGET_ARM64)
if ((passingSize != structSize) && (lclVar == nullptr))
{
copyBlkClass = objClass;
}
#endif
#ifdef TARGET_ARM
// TODO-1stClassStructs: Unify these conditions across targets.
if (((lclVar != nullptr) &&
(lvaGetPromotionType(lclVar->AsLclVarCommon()->GetLclNum()) == PROMOTION_TYPE_INDEPENDENT)) ||
((argObj->OperIs(GT_OBJ)) && (passingSize != structSize)))
{
copyBlkClass = objClass;
}
if (structSize < TARGET_POINTER_SIZE)
{
copyBlkClass = objClass;
}
#endif // TARGET_ARM
}
else
{
// We have a struct argument that fits into a register, and it is either a power of 2,
// or a local.
// Change our argument, as needed, into a value of the appropriate type.
CLANG_FORMAT_COMMENT_ANCHOR;
#ifdef TARGET_ARM
DEBUG_ARG_SLOTS_ASSERT((size == 1) || ((structBaseType == TYP_DOUBLE) && (size == 2)));
#else
DEBUG_ARG_SLOTS_ASSERT((size == 1) || (varTypeIsSIMD(structBaseType) &&
size == (genTypeSize(structBaseType) / REGSIZE_BYTES)));
#endif
assert((structBaseType != TYP_STRUCT) && (genTypeSize(structBaseType) >= originalSize));
if (argObj->OperIs(GT_OBJ))
{
argObj->ChangeOper(GT_IND);
// Now see if we can fold *(&X) into X
if (argObj->AsOp()->gtOp1->gtOper == GT_ADDR)
{
GenTree* temp = argObj->AsOp()->gtOp1->AsOp()->gtOp1;
// Keep the DONT_CSE flag in sync
// (as the addr always marks it for its op1)
temp->gtFlags &= ~GTF_DONT_CSE;
temp->gtFlags |= (argObj->gtFlags & GTF_DONT_CSE);
DEBUG_DESTROY_NODE(argObj->AsOp()->gtOp1); // GT_ADDR
DEBUG_DESTROY_NODE(argObj); // GT_IND
argObj = temp;
*parentArgx = temp;
argx = temp;
}
}
if (argObj->gtOper == GT_LCL_VAR)
{
unsigned lclNum = argObj->AsLclVarCommon()->GetLclNum();
LclVarDsc* varDsc = lvaGetDesc(lclNum);
if (varDsc->lvPromoted)
{
if (varDsc->lvFieldCnt == 1)
{
// get the first and only promoted field
LclVarDsc* fieldVarDsc = lvaGetDesc(varDsc->lvFieldLclStart);
if (genTypeSize(fieldVarDsc->TypeGet()) >= originalSize)
{
// we will use the first and only promoted field
argObj->AsLclVarCommon()->SetLclNum(varDsc->lvFieldLclStart);
if (varTypeIsEnregisterable(fieldVarDsc->TypeGet()) &&
(genTypeSize(fieldVarDsc->TypeGet()) == originalSize))
{
// Just use the existing field's type
argObj->gtType = fieldVarDsc->TypeGet();
}
else
{
// Can't use the existing field's type, so use GT_LCL_FLD to swizzle
// to a new type
lvaSetVarDoNotEnregister(lclNum DEBUGARG(DoNotEnregisterReason::SwizzleArg));
argObj->ChangeOper(GT_LCL_FLD);
argObj->gtType = structBaseType;
}
assert(varTypeIsEnregisterable(argObj->TypeGet()));
assert(copyBlkClass == NO_CLASS_HANDLE);
}
else
{
// use GT_LCL_FLD to swizzle the single field struct to a new type
lvaSetVarDoNotEnregister(lclNum DEBUGARG(DoNotEnregisterReason::SwizzleArg));
argObj->ChangeOper(GT_LCL_FLD);
argObj->gtType = structBaseType;
}
}
else
{
// The struct fits into a single register, but it has been promoted into its
// constituent fields, and so we have to re-assemble it
copyBlkClass = objClass;
}
}
else if (genTypeSize(varDsc->TypeGet()) != genTypeSize(structBaseType))
{
// Not a promoted struct, so just swizzle the type by using GT_LCL_FLD
lvaSetVarDoNotEnregister(lclNum DEBUGARG(DoNotEnregisterReason::SwizzleArg));
argObj->ChangeOper(GT_LCL_FLD);
argObj->gtType = structBaseType;
}
}
else
{
// Not a GT_LCL_VAR, so we can just change the type on the node
argObj->gtType = structBaseType;
}
assert(varTypeIsEnregisterable(argObj->TypeGet()) ||
((copyBlkClass != NO_CLASS_HANDLE) && varTypeIsEnregisterable(structBaseType)));
}
#if !defined(UNIX_AMD64_ABI) && !defined(TARGET_ARMARCH)
// TODO-CQ-XARCH: there is no need for a temp copy if we improve our code generation in
// `genPutStructArgStk` for xarch like we did it for Arm/Arm64.
// We still have a struct unless we converted the GT_OBJ into a GT_IND above...
if (isHfaArg && passUsingFloatRegs)
{
}
else if (structBaseType == TYP_STRUCT)
{
// If the valuetype size is not a multiple of TARGET_POINTER_SIZE,
// we must copyblk to a temp before doing the obj to avoid
// the obj reading memory past the end of the valuetype
CLANG_FORMAT_COMMENT_ANCHOR;
if (roundupSize > originalSize)
{
copyBlkClass = objClass;
// There are a few special cases where we can omit using a CopyBlk
// where we normally would need to use one.
if (argObj->OperIs(GT_OBJ) &&
argObj->AsObj()->gtGetOp1()->IsLocalAddrExpr() != nullptr) // Is the source a LclVar?
{
copyBlkClass = NO_CLASS_HANDLE;
}
}
}
#endif // !UNIX_AMD64_ABI
}
}
if (argEntry->isPassedInRegisters())
{
call->fgArgInfo->UpdateRegArg(argEntry, argx, reMorphing);
}
else
{
call->fgArgInfo->UpdateStkArg(argEntry, argx, reMorphing);
}
if (copyBlkClass != NO_CLASS_HANDLE)
{
fgMakeOutgoingStructArgCopy(call, args, copyBlkClass);
}
if (argx->gtOper == GT_MKREFANY)
{
// 'Lower' the MKREFANY tree and insert it.
noway_assert(!reMorphing);
#ifdef TARGET_X86
// Build the mkrefany as a GT_FIELD_LIST
GenTreeFieldList* fieldList = new (this, GT_FIELD_LIST) GenTreeFieldList();
fieldList->AddField(this, argx->AsOp()->gtGetOp1(), OFFSETOF__CORINFO_TypedReference__dataPtr, TYP_BYREF);
fieldList->AddField(this, argx->AsOp()->gtGetOp2(), OFFSETOF__CORINFO_TypedReference__type, TYP_I_IMPL);
fgArgTabEntry* fp = gtArgEntryByNode(call, argx);
args->SetNode(fieldList);
assert(fp->GetNode() == fieldList);
#else // !TARGET_X86
// Get a new temp
// Here we don't need unsafe value cls check since the addr of temp is used only in mkrefany
unsigned tmp = lvaGrabTemp(true DEBUGARG("by-value mkrefany struct argument"));
lvaSetStruct(tmp, impGetRefAnyClass(), false);
// Build the mkrefany as a comma node:
// (tmp.ptr=argx),(tmp.type=handle)
GenTreeLclFld* destPtrSlot = gtNewLclFldNode(tmp, TYP_I_IMPL, OFFSETOF__CORINFO_TypedReference__dataPtr);
GenTreeLclFld* destTypeSlot = gtNewLclFldNode(tmp, TYP_I_IMPL, OFFSETOF__CORINFO_TypedReference__type);
destPtrSlot->SetFieldSeq(GetFieldSeqStore()->CreateSingleton(GetRefanyDataField()));
destPtrSlot->gtFlags |= GTF_VAR_DEF;
destTypeSlot->SetFieldSeq(GetFieldSeqStore()->CreateSingleton(GetRefanyTypeField()));
destTypeSlot->gtFlags |= GTF_VAR_DEF;
GenTree* asgPtrSlot = gtNewAssignNode(destPtrSlot, argx->AsOp()->gtOp1);
GenTree* asgTypeSlot = gtNewAssignNode(destTypeSlot, argx->AsOp()->gtOp2);
GenTree* asg = gtNewOperNode(GT_COMMA, TYP_VOID, asgPtrSlot, asgTypeSlot);
// Change the expression to "(tmp=val)"
args->SetNode(asg);
// EvalArgsToTemps will cause tmp to actually get loaded as the argument
call->fgArgInfo->EvalToTmp(argEntry, tmp, asg);
lvaSetVarAddrExposed(tmp DEBUGARG(AddressExposedReason::TOO_CONSERVATIVE));
#endif // !TARGET_X86
}
#if FEATURE_MULTIREG_ARGS
if (isStructArg)
{
if (((argEntry->numRegs + argEntry->GetStackSlotsNumber()) > 1) ||
(isHfaArg && argx->TypeGet() == TYP_STRUCT))
{
hasMultiregStructArgs = true;
}
}
#ifdef TARGET_ARM
else if ((argEntry->argType == TYP_LONG) || (argEntry->argType == TYP_DOUBLE))
{
assert((argEntry->numRegs == 2) || (argEntry->numSlots == 2));
}
#endif
else
{
// We must have exactly one register or slot.
assert(((argEntry->numRegs == 1) && (argEntry->GetStackSlotsNumber() == 0)) ||
((argEntry->numRegs == 0) && (argEntry->GetStackSlotsNumber() == 1)));
}
#endif
#if defined(TARGET_X86)
if (isStructArg)
{
GenTree* lclNode = argx->OperIs(GT_LCL_VAR) ? argx : fgIsIndirOfAddrOfLocal(argx);
if ((lclNode != nullptr) &&
(lvaGetPromotionType(lclNode->AsLclVarCommon()->GetLclNum()) == Compiler::PROMOTION_TYPE_INDEPENDENT))
{
// Make a GT_FIELD_LIST of the field lclVars.
GenTreeLclVarCommon* lcl = lclNode->AsLclVarCommon();
LclVarDsc* varDsc = lvaGetDesc(lcl);
GenTreeFieldList* fieldList = new (this, GT_FIELD_LIST) GenTreeFieldList();
fgArgTabEntry* fp = gtArgEntryByNode(call, argx);
args->SetNode(fieldList);
assert(fp->GetNode() == fieldList);
for (unsigned fieldLclNum = varDsc->lvFieldLclStart;
fieldLclNum < varDsc->lvFieldLclStart + varDsc->lvFieldCnt; ++fieldLclNum)
{
LclVarDsc* fieldVarDsc = lvaGetDesc(fieldLclNum);
GenTree* fieldLcl;
if (fieldLclNum == varDsc->lvFieldLclStart)
{
lcl->SetLclNum(fieldLclNum);
lcl->SetOperResetFlags(GT_LCL_VAR);
lcl->gtType = fieldVarDsc->TypeGet();
fieldLcl = lcl;
}
else
{
fieldLcl = gtNewLclvNode(fieldLclNum, fieldVarDsc->TypeGet());
}
fieldList->AddField(this, fieldLcl, fieldVarDsc->lvFldOffset, fieldVarDsc->TypeGet());
}
}
}
#endif // TARGET_X86
flagsSummary |= args->GetNode()->gtFlags;
} // end foreach argument loop
if (!reMorphing)
{
call->fgArgInfo->ArgsComplete();
}
/* Process the function address, if indirect call */
if (call->gtCallType == CT_INDIRECT)
{
call->gtCallAddr = fgMorphTree(call->gtCallAddr);
// Const CSE may create an assignment node here
flagsSummary |= call->gtCallAddr->gtFlags;
}
#if FEATURE_FIXED_OUT_ARGS
// Record the outgoing argument size. If the call is a fast tail
// call, it will setup its arguments in incoming arg area instead
// of the out-going arg area, so we don't need to track the
// outgoing arg size.
if (!call->IsFastTailCall())
{
#if defined(UNIX_AMD64_ABI)
// This is currently required for the UNIX ABI to work correctly.
opts.compNeedToAlignFrame = true;
#endif // UNIX_AMD64_ABI
const unsigned outgoingArgSpaceSize = GetOutgoingArgByteSize(call->fgArgInfo->GetNextSlotByteOffset());
#if defined(DEBUG_ARG_SLOTS)
unsigned preallocatedArgCount = 0;
if (!compMacOsArm64Abi())
{
preallocatedArgCount = call->fgArgInfo->GetNextSlotNum();
assert(outgoingArgSpaceSize == preallocatedArgCount * REGSIZE_BYTES);
}
#endif
call->fgArgInfo->SetOutArgSize(max(outgoingArgSpaceSize, MIN_ARG_AREA_FOR_CALL));
#ifdef DEBUG
if (verbose)
{
const fgArgInfo* argInfo = call->fgArgInfo;
#if defined(DEBUG_ARG_SLOTS)
if (!compMacOsArm64Abi())
{
printf("argSlots=%d, preallocatedArgCount=%d, nextSlotNum=%d, nextSlotByteOffset=%d, "
"outgoingArgSpaceSize=%d\n",
argSlots, preallocatedArgCount, argInfo->GetNextSlotNum(), argInfo->GetNextSlotByteOffset(),
outgoingArgSpaceSize);
}
else
{
printf("nextSlotByteOffset=%d, outgoingArgSpaceSize=%d\n", argInfo->GetNextSlotByteOffset(),
outgoingArgSpaceSize);
}
#else
printf("nextSlotByteOffset=%d, outgoingArgSpaceSize=%d\n", argInfo->GetNextSlotByteOffset(),
outgoingArgSpaceSize);
#endif
}
#endif
}
#endif // FEATURE_FIXED_OUT_ARGS
// Clear the ASG and EXCEPT (if possible) flags on the call node
call->gtFlags &= ~GTF_ASG;
if (!call->OperMayThrow(this))
{
call->gtFlags &= ~GTF_EXCEPT;
}
// Union in the side effect flags from the call's operands
call->gtFlags |= flagsSummary & GTF_ALL_EFFECT;
// If we are remorphing or don't have any register arguments or other arguments that need
// temps, then we don't need to call SortArgs() and EvalArgsToTemps().
//
if (!reMorphing && (call->fgArgInfo->HasRegArgs() || call->fgArgInfo->NeedsTemps()))
{
// Do the 'defer or eval to temp' analysis.
call->fgArgInfo->SortArgs();
call->fgArgInfo->EvalArgsToTemps();
}
if (hasMultiregStructArgs)
{
fgMorphMultiregStructArgs(call);
}
#ifdef DEBUG
if (verbose)
{
JITDUMP("ArgTable for %d.%s after fgMorphArgs:\n", call->gtTreeID, GenTree::OpName(call->gtOper));
call->fgArgInfo->Dump(this);
JITDUMP("\n");
}
#endif
return call;
}
#ifdef _PREFAST_
#pragma warning(pop)
#endif
//-----------------------------------------------------------------------------
// fgMorphMultiregStructArgs: Locate the TYP_STRUCT arguments and
// call fgMorphMultiregStructArg on each of them.
//
// Arguments:
// call : a GenTreeCall node that has one or more TYP_STRUCT arguments\.
//
// Notes:
// We only call fgMorphMultiregStructArg for struct arguments that are not passed as simple types.
// It will ensure that the struct arguments are in the correct form.
// If this method fails to find any TYP_STRUCT arguments it will assert.
//
void Compiler::fgMorphMultiregStructArgs(GenTreeCall* call)
{
bool foundStructArg = false;
GenTreeFlags flagsSummary = GTF_EMPTY;
#ifdef TARGET_X86
assert(!"Logic error: no MultiregStructArgs for X86");
#endif
#if defined(TARGET_AMD64) && !defined(UNIX_AMD64_ABI)
assert(!"Logic error: no MultiregStructArgs for Windows X64 ABI");
#endif
for (GenTreeCall::Use& use : call->Args())
{
// For late arguments the arg tree that is overridden is in the gtCallLateArgs list.
// For such late args the gtCallArgList contains the setup arg node (evaluating the arg.)
// The tree from the gtCallLateArgs list is passed to the callee. The fgArgEntry node contains the mapping
// between the nodes in both lists. If the arg is not a late arg, the fgArgEntry->node points to itself,
// otherwise points to the list in the late args list.
bool isLateArg = (use.GetNode()->gtFlags & GTF_LATE_ARG) != 0;
fgArgTabEntry* fgEntryPtr = gtArgEntryByNode(call, use.GetNode());
assert(fgEntryPtr != nullptr);
GenTree* argx = fgEntryPtr->GetNode();
GenTreeCall::Use* lateUse = nullptr;
GenTree* lateNode = nullptr;
if (isLateArg)
{
for (GenTreeCall::Use& lateArgUse : call->LateArgs())
{
GenTree* argNode = lateArgUse.GetNode();
if (argx == argNode)
{
lateUse = &lateArgUse;
lateNode = argNode;
break;
}
}
assert((lateUse != nullptr) && (lateNode != nullptr));
}
if (!fgEntryPtr->isStruct)
{
continue;
}
unsigned size = (fgEntryPtr->numRegs + fgEntryPtr->GetStackSlotsNumber());
if ((size > 1) || (fgEntryPtr->IsHfaArg() && argx->TypeGet() == TYP_STRUCT))
{
foundStructArg = true;
if (varTypeIsStruct(argx) && !argx->OperIs(GT_FIELD_LIST))
{
if (fgEntryPtr->IsHfaRegArg())
{
var_types hfaType = fgEntryPtr->GetHfaType();
unsigned structSize;
if (argx->OperIs(GT_OBJ))
{
structSize = argx->AsObj()->GetLayout()->GetSize();
}
else if (varTypeIsSIMD(argx))
{
structSize = genTypeSize(argx);
}
else
{
assert(argx->OperIs(GT_LCL_VAR));
structSize = lvaGetDesc(argx->AsLclVar())->lvExactSize;
}
assert(structSize > 0);
if (structSize == genTypeSize(hfaType))
{
if (argx->OperIs(GT_OBJ))
{
argx->SetOper(GT_IND);
}
argx->gtType = hfaType;
}
}
GenTree* newArgx = fgMorphMultiregStructArg(argx, fgEntryPtr);
// Did we replace 'argx' with a new tree?
if (newArgx != argx)
{
// link the new arg node into either the late arg list or the gtCallArgs list
if (isLateArg)
{
lateUse->SetNode(newArgx);
}
else
{
use.SetNode(newArgx);
}
assert(fgEntryPtr->GetNode() == newArgx);
}
}
}
}
// We should only call this method when we actually have one or more multireg struct args
assert(foundStructArg);
// Update the flags
call->gtFlags |= (flagsSummary & GTF_ALL_EFFECT);
}
//-----------------------------------------------------------------------------
// fgMorphMultiregStructArg: Given a TYP_STRUCT arg from a call argument list,
// morph the argument as needed to be passed correctly.
//
// Arguments:
// arg - A GenTree node containing a TYP_STRUCT arg
// fgEntryPtr - the fgArgTabEntry information for the current 'arg'
//
// Notes:
// The arg must be a GT_OBJ or GT_LCL_VAR or GT_LCL_FLD of TYP_STRUCT.
// If 'arg' is a lclVar passed on the stack, we will ensure that any lclVars that must be on the
// stack are marked as doNotEnregister, and then we return.
//
// If it is passed by register, we mutate the argument into the GT_FIELD_LIST form
// which is only used for struct arguments.
//
// If arg is a LclVar we check if it is struct promoted and has the right number of fields
// and if they are at the appropriate offsets we will use the struct promted fields
// in the GT_FIELD_LIST nodes that we create.
// If we have a GT_LCL_VAR that isn't struct promoted or doesn't meet the requirements
// we will use a set of GT_LCL_FLDs nodes to access the various portions of the struct
// this also forces the struct to be stack allocated into the local frame.
// For the GT_OBJ case will clone the address expression and generate two (or more)
// indirections.
// Currently the implementation handles ARM64/ARM and will NYI for other architectures.
//
GenTree* Compiler::fgMorphMultiregStructArg(GenTree* arg, fgArgTabEntry* fgEntryPtr)
{
assert(varTypeIsStruct(arg->TypeGet()));
#if !defined(TARGET_ARMARCH) && !defined(UNIX_AMD64_ABI)
NYI("fgMorphMultiregStructArg requires implementation for this target");
#endif
#ifdef TARGET_ARM
if ((fgEntryPtr->IsSplit() && fgEntryPtr->GetStackSlotsNumber() + fgEntryPtr->numRegs > 4) ||
(!fgEntryPtr->IsSplit() && fgEntryPtr->GetRegNum() == REG_STK))
#else
if (fgEntryPtr->GetRegNum() == REG_STK)
#endif
{
GenTreeLclVarCommon* lcl = nullptr;
GenTree* actualArg = arg->gtEffectiveVal();
if (actualArg->OperGet() == GT_OBJ)
{
if (actualArg->gtGetOp1()->OperIs(GT_ADDR) && actualArg->gtGetOp1()->gtGetOp1()->OperIs(GT_LCL_VAR))
{
lcl = actualArg->gtGetOp1()->gtGetOp1()->AsLclVarCommon();
}
}
else if (actualArg->OperGet() == GT_LCL_VAR)
{
lcl = actualArg->AsLclVarCommon();
}
if (lcl != nullptr)
{
if (lvaGetPromotionType(lcl->GetLclNum()) == PROMOTION_TYPE_INDEPENDENT)
{
arg = fgMorphLclArgToFieldlist(lcl);
}
else if (arg->TypeGet() == TYP_STRUCT)
{
// If this is a non-register struct, it must be referenced from memory.
if (!actualArg->OperIs(GT_OBJ))
{
// Create an Obj of the temp to use it as a call argument.
arg = gtNewOperNode(GT_ADDR, TYP_I_IMPL, arg);
arg = gtNewObjNode(lvaGetStruct(lcl->GetLclNum()), arg);
}
// Its fields will need to be accessed by address.
lvaSetVarDoNotEnregister(lcl->GetLclNum() DEBUG_ARG(DoNotEnregisterReason::IsStructArg));
}
}
return arg;
}
#if FEATURE_MULTIREG_ARGS
// Examine 'arg' and setup argValue objClass and structSize
//
const CORINFO_CLASS_HANDLE objClass = gtGetStructHandle(arg);
GenTree* argValue = arg; // normally argValue will be arg, but see right below
unsigned structSize = 0;
if (arg->TypeGet() != TYP_STRUCT)
{
structSize = genTypeSize(arg->TypeGet());
assert(structSize == info.compCompHnd->getClassSize(objClass));
}
else if (arg->OperGet() == GT_OBJ)
{
GenTreeObj* argObj = arg->AsObj();
const ClassLayout* objLayout = argObj->GetLayout();
structSize = objLayout->GetSize();
assert(structSize == info.compCompHnd->getClassSize(objClass));
// If we have a GT_OBJ of a GT_ADDR then we set argValue to the child node of the GT_ADDR.
GenTree* op1 = argObj->gtOp1;
if (op1->OperGet() == GT_ADDR)
{
GenTree* underlyingTree = op1->AsOp()->gtOp1;
// Only update to the same type.
if (underlyingTree->OperIs(GT_LCL_VAR))
{
const LclVarDsc* varDsc = lvaGetDesc(underlyingTree->AsLclVar());
if (ClassLayout::AreCompatible(varDsc->GetLayout(), objLayout))
{
argValue = underlyingTree;
}
}
}
}
else if (arg->OperGet() == GT_LCL_VAR)
{
LclVarDsc* varDsc = lvaGetDesc(arg->AsLclVarCommon());
structSize = varDsc->lvExactSize;
assert(structSize == info.compCompHnd->getClassSize(objClass));
}
else
{
structSize = info.compCompHnd->getClassSize(objClass);
}
var_types hfaType = TYP_UNDEF;
var_types elemType = TYP_UNDEF;
unsigned elemCount = 0;
unsigned elemSize = 0;
var_types type[MAX_ARG_REG_COUNT] = {}; // TYP_UNDEF = 0
hfaType = fgEntryPtr->GetHfaType();
if (varTypeIsValidHfaType(hfaType) && fgEntryPtr->isPassedInFloatRegisters())
{
elemType = hfaType;
elemSize = genTypeSize(elemType);
elemCount = structSize / elemSize;
assert(elemSize * elemCount == structSize);
for (unsigned inx = 0; inx < elemCount; inx++)
{
type[inx] = elemType;
}
}
else
{
assert(structSize <= MAX_ARG_REG_COUNT * TARGET_POINTER_SIZE);
BYTE gcPtrs[MAX_ARG_REG_COUNT];
elemCount = roundUp(structSize, TARGET_POINTER_SIZE) / TARGET_POINTER_SIZE;
info.compCompHnd->getClassGClayout(objClass, &gcPtrs[0]);
for (unsigned inx = 0; inx < elemCount; inx++)
{
#ifdef UNIX_AMD64_ABI
if (gcPtrs[inx] == TYPE_GC_NONE)
{
type[inx] = GetTypeFromClassificationAndSizes(fgEntryPtr->structDesc.eightByteClassifications[inx],
fgEntryPtr->structDesc.eightByteSizes[inx]);
}
else
#endif // UNIX_AMD64_ABI
{
type[inx] = getJitGCType(gcPtrs[inx]);
}
}
#ifndef UNIX_AMD64_ABI
if ((argValue->OperGet() == GT_LCL_FLD) || (argValue->OperGet() == GT_LCL_VAR))
{
elemSize = TARGET_POINTER_SIZE;
// We can safely widen this to aligned bytes since we are loading from
// a GT_LCL_VAR or a GT_LCL_FLD which is properly padded and
// lives in the stack frame or will be a promoted field.
//
structSize = elemCount * TARGET_POINTER_SIZE;
}
else // we must have a GT_OBJ
{
assert(argValue->OperGet() == GT_OBJ);
// We need to load the struct from an arbitrary address
// and we can't read past the end of the structSize
// We adjust the last load type here
//
unsigned remainingBytes = structSize % TARGET_POINTER_SIZE;
unsigned lastElem = elemCount - 1;
if (remainingBytes != 0)
{
switch (remainingBytes)
{
case 1:
type[lastElem] = TYP_BYTE;
break;
case 2:
type[lastElem] = TYP_SHORT;
break;
#if defined(TARGET_ARM64) || defined(UNIX_AMD64_ABI)
case 4:
type[lastElem] = TYP_INT;
break;
#endif // (TARGET_ARM64) || (UNIX_AMD64_ABI)
default:
noway_assert(!"NYI: odd sized struct in fgMorphMultiregStructArg");
break;
}
}
}
#endif // !UNIX_AMD64_ABI
}
// We should still have a TYP_STRUCT
assert(varTypeIsStruct(argValue->TypeGet()));
GenTreeFieldList* newArg = nullptr;
// Are we passing a struct LclVar?
//
if (argValue->OperGet() == GT_LCL_VAR)
{
GenTreeLclVarCommon* varNode = argValue->AsLclVarCommon();
unsigned varNum = varNode->GetLclNum();
LclVarDsc* varDsc = lvaGetDesc(varNum);
// At this point any TYP_STRUCT LclVar must be an aligned struct
// or an HFA struct, both which are passed by value.
//
assert((varDsc->lvSize() == elemCount * TARGET_POINTER_SIZE) || varDsc->lvIsHfa());
varDsc->lvIsMultiRegArg = true;
#ifdef DEBUG
if (verbose)
{
JITDUMP("Multireg struct argument V%02u : ", varNum);
fgEntryPtr->Dump();
}
#endif // DEBUG
#ifndef UNIX_AMD64_ABI
// This local variable must match the layout of the 'objClass' type exactly
if (varDsc->lvIsHfa() && fgEntryPtr->isPassedInFloatRegisters())
{
// We have a HFA struct.
noway_assert(elemType == varDsc->GetHfaType());
noway_assert(elemSize == genTypeSize(elemType));
noway_assert(elemCount == (varDsc->lvExactSize / elemSize));
noway_assert(elemSize * elemCount == varDsc->lvExactSize);
for (unsigned inx = 0; (inx < elemCount); inx++)
{
noway_assert(type[inx] == elemType);
}
}
else
{
#if defined(TARGET_ARM64)
// We must have a 16-byte struct (non-HFA)
noway_assert(elemCount == 2);
#elif defined(TARGET_ARM)
noway_assert(elemCount <= 4);
#endif
for (unsigned inx = 0; inx < elemCount; inx++)
{
var_types currentGcLayoutType = varDsc->GetLayout()->GetGCPtrType(inx);
// We setup the type[inx] value above using the GC info from 'objClass'
// This GT_LCL_VAR must have the same GC layout info
//
if (varTypeIsGC(currentGcLayoutType))
{
noway_assert(type[inx] == currentGcLayoutType);
}
else
{
// We may have use a small type when we setup the type[inx] values above
// We can safely widen this to TYP_I_IMPL
type[inx] = TYP_I_IMPL;
}
}
}
if (varDsc->lvPromoted && varDsc->lvIsHfa() && fgEntryPtr->isPassedInFloatRegisters())
{
bool canMorphToFieldList = true;
for (unsigned fldOffset = 0; fldOffset < varDsc->lvExactSize; fldOffset += elemSize)
{
const unsigned fldVarNum = lvaGetFieldLocal(varDsc, fldOffset);
if ((fldVarNum == BAD_VAR_NUM) || !varTypeUsesFloatReg(lvaGetDesc(fldVarNum)))
{
canMorphToFieldList = false;
break;
}
}
if (canMorphToFieldList)
{
newArg = fgMorphLclArgToFieldlist(varNode);
}
}
else
#endif // !UNIX_AMD64_ABI
#if defined(TARGET_ARM64) || defined(UNIX_AMD64_ABI)
// Is this LclVar a promoted struct with exactly 2 fields?
if (varDsc->lvPromoted && (varDsc->lvFieldCnt == 2) && !varDsc->lvIsHfa())
{
// See if we have two promoted fields that start at offset 0 and 8?
unsigned loVarNum = lvaGetFieldLocal(varDsc, 0);
unsigned hiVarNum = lvaGetFieldLocal(varDsc, TARGET_POINTER_SIZE);
// Did we find the promoted fields at the necessary offsets?
if ((loVarNum != BAD_VAR_NUM) && (hiVarNum != BAD_VAR_NUM))
{
LclVarDsc* loVarDsc = lvaGetDesc(loVarNum);
LclVarDsc* hiVarDsc = lvaGetDesc(hiVarNum);
var_types loType = loVarDsc->lvType;
var_types hiType = hiVarDsc->lvType;
if ((varTypeIsFloating(loType) != genIsValidFloatReg(fgEntryPtr->GetRegNum(0))) ||
(varTypeIsFloating(hiType) != genIsValidFloatReg(fgEntryPtr->GetRegNum(1))))
{
// TODO-LSRA - It currently doesn't support the passing of floating point LCL_VARS in the integer
// registers. So for now we will use GT_LCLFLD's to pass this struct (it won't be enregistered)
//
JITDUMP("Multireg struct V%02u will be passed using GT_LCLFLD because it has float fields.\n",
varNum);
//
// we call lvaSetVarDoNotEnregister and do the proper transformation below.
//
}
else
{
// We can use the struct promoted field as the two arguments
// Create a new tree for 'arg'
// replace the existing LDOBJ(ADDR(LCLVAR))
// with a FIELD_LIST(LCLVAR-LO, FIELD_LIST(LCLVAR-HI, nullptr))
//
newArg = new (this, GT_FIELD_LIST) GenTreeFieldList();
newArg->AddField(this, gtNewLclvNode(loVarNum, loType), 0, loType);
newArg->AddField(this, gtNewLclvNode(hiVarNum, hiType), TARGET_POINTER_SIZE, hiType);
}
}
}
else
{
//
// We will create a list of GT_LCL_FLDs nodes to pass this struct
//
lvaSetVarDoNotEnregister(varNum DEBUG_ARG(DoNotEnregisterReason::LocalField));
}
#elif defined(TARGET_ARM)
// Is this LclVar a promoted struct with exactly same size?
if (varDsc->lvPromoted && (varDsc->lvFieldCnt == elemCount) && !varDsc->lvIsHfa())
{
// See if we have promoted fields?
unsigned varNums[4];
bool hasBadVarNum = false;
for (unsigned inx = 0; inx < elemCount; inx++)
{
varNums[inx] = lvaGetFieldLocal(varDsc, TARGET_POINTER_SIZE * inx);
if (varNums[inx] == BAD_VAR_NUM)
{
hasBadVarNum = true;
break;
}
}
// Did we find the promoted fields at the necessary offsets?
if (!hasBadVarNum)
{
LclVarDsc* varDscs[4];
var_types varType[4];
bool varIsFloat = false;
for (unsigned inx = 0; inx < elemCount; inx++)
{
varDscs[inx] = lvaGetDesc(varNums[inx]);
varType[inx] = varDscs[inx]->lvType;
if (varTypeIsFloating(varType[inx]))
{
// TODO-LSRA - It currently doesn't support the passing of floating point LCL_VARS in the
// integer
// registers. So for now we will use GT_LCLFLD's to pass this struct (it won't be enregistered)
//
JITDUMP("Multireg struct V%02u will be passed using GT_LCLFLD because it has float fields.\n",
varNum);
//
// we call lvaSetVarDoNotEnregister and do the proper transformation below.
//
varIsFloat = true;
break;
}
}
if (!varIsFloat)
{
newArg = fgMorphLclArgToFieldlist(varNode);
}
}
}
else
{
//
// We will create a list of GT_LCL_FLDs nodes to pass this struct
//
lvaSetVarDoNotEnregister(varNum DEBUG_ARG(DoNotEnregisterReason::LocalField));
}
#endif // TARGET_ARM
}
// If we didn't set newarg to a new List Node tree
//
if (newArg == nullptr)
{
if (fgEntryPtr->GetRegNum() == REG_STK)
{
// We leave this stack passed argument alone
return arg;
}
// Are we passing a GT_LCL_FLD (or a GT_LCL_VAR that was not struct promoted )
// A GT_LCL_FLD could also contain a 16-byte struct or HFA struct inside it?
//
if ((argValue->OperGet() == GT_LCL_FLD) || (argValue->OperGet() == GT_LCL_VAR))
{
GenTreeLclVarCommon* varNode = argValue->AsLclVarCommon();
unsigned varNum = varNode->GetLclNum();
LclVarDsc* varDsc = lvaGetDesc(varNum);
unsigned baseOffset = varNode->GetLclOffs();
unsigned lastOffset = baseOffset + structSize;
// The allocated size of our LocalVar must be at least as big as lastOffset
assert(varDsc->lvSize() >= lastOffset);
if (varDsc->HasGCPtr())
{
// alignment of the baseOffset is required
noway_assert((baseOffset % TARGET_POINTER_SIZE) == 0);
#ifndef UNIX_AMD64_ABI
noway_assert(elemSize == TARGET_POINTER_SIZE);
#endif
unsigned baseIndex = baseOffset / TARGET_POINTER_SIZE;
ClassLayout* layout = varDsc->GetLayout();
for (unsigned inx = 0; (inx < elemCount); inx++)
{
// The GC information must match what we setup using 'objClass'
if (layout->IsGCPtr(baseIndex + inx) || varTypeGCtype(type[inx]))
{
noway_assert(type[inx] == layout->GetGCPtrType(baseIndex + inx));
}
}
}
else // this varDsc contains no GC pointers
{
for (unsigned inx = 0; inx < elemCount; inx++)
{
// The GC information must match what we setup using 'objClass'
noway_assert(!varTypeIsGC(type[inx]));
}
}
//
// We create a list of GT_LCL_FLDs nodes to pass this struct
//
lvaSetVarDoNotEnregister(varNum DEBUG_ARG(DoNotEnregisterReason::LocalField));
// Create a new tree for 'arg'
// replace the existing LDOBJ(ADDR(LCLVAR))
// with a FIELD_LIST(LCLFLD-LO, LCLFLD-HI)
//
unsigned offset = baseOffset;
newArg = new (this, GT_FIELD_LIST) GenTreeFieldList();
for (unsigned inx = 0; inx < elemCount; inx++)
{
GenTree* nextLclFld = gtNewLclFldNode(varNum, type[inx], offset);
newArg->AddField(this, nextLclFld, offset, type[inx]);
offset += genTypeSize(type[inx]);
}
}
// Are we passing a GT_OBJ struct?
//
else if (argValue->OperGet() == GT_OBJ)
{
GenTreeObj* argObj = argValue->AsObj();
GenTree* baseAddr = argObj->gtOp1;
var_types addrType = baseAddr->TypeGet();
if (baseAddr->OperGet() == GT_ADDR)
{
GenTree* addrTaken = baseAddr->AsOp()->gtOp1;
if (addrTaken->IsLocal())
{
GenTreeLclVarCommon* varNode = addrTaken->AsLclVarCommon();
unsigned varNum = varNode->GetLclNum();
// We access non-struct type (for example, long) as a struct type.
// Make sure lclVar lives on stack to make sure its fields are accessible by address.
lvaSetVarDoNotEnregister(varNum DEBUGARG(DoNotEnregisterReason::LocalField));
}
}
// Create a new tree for 'arg'
// replace the existing LDOBJ(EXPR)
// with a FIELD_LIST(IND(EXPR), FIELD_LIST(IND(EXPR+8), nullptr) ...)
//
newArg = new (this, GT_FIELD_LIST) GenTreeFieldList();
unsigned offset = 0;
for (unsigned inx = 0; inx < elemCount; inx++)
{
GenTree* curAddr = baseAddr;
if (offset != 0)
{
GenTree* baseAddrDup = gtCloneExpr(baseAddr);
noway_assert(baseAddrDup != nullptr);
curAddr = gtNewOperNode(GT_ADD, addrType, baseAddrDup, gtNewIconNode(offset, TYP_I_IMPL));
}
else
{
curAddr = baseAddr;
}
GenTree* curItem = gtNewIndir(type[inx], curAddr);
// For safety all GT_IND should have at least GT_GLOB_REF set.
curItem->gtFlags |= GTF_GLOB_REF;
newArg->AddField(this, curItem, offset, type[inx]);
offset += genTypeSize(type[inx]);
}
}
}
#ifdef DEBUG
// If we reach here we should have set newArg to something
if (newArg == nullptr)
{
gtDispTree(argValue);
assert(!"Missing case in fgMorphMultiregStructArg");
}
#endif
noway_assert(newArg != nullptr);
#ifdef DEBUG
if (verbose)
{
printf("fgMorphMultiregStructArg created tree:\n");
gtDispTree(newArg);
}
#endif
arg = newArg; // consider calling fgMorphTree(newArg);
#endif // FEATURE_MULTIREG_ARGS
return arg;
}
//------------------------------------------------------------------------
// fgMorphLclArgToFieldlist: Morph a GT_LCL_VAR node to a GT_FIELD_LIST of its promoted fields
//
// Arguments:
// lcl - The GT_LCL_VAR node we will transform
//
// Return value:
// The new GT_FIELD_LIST that we have created.
//
GenTreeFieldList* Compiler::fgMorphLclArgToFieldlist(GenTreeLclVarCommon* lcl)
{
LclVarDsc* varDsc = lvaGetDesc(lcl);
assert(varDsc->lvPromoted);
unsigned fieldCount = varDsc->lvFieldCnt;
unsigned fieldLclNum = varDsc->lvFieldLclStart;
GenTreeFieldList* fieldList = new (this, GT_FIELD_LIST) GenTreeFieldList();
for (unsigned i = 0; i < fieldCount; i++)
{
LclVarDsc* fieldVarDsc = lvaGetDesc(fieldLclNum);
GenTree* lclVar = gtNewLclvNode(fieldLclNum, fieldVarDsc->TypeGet());
fieldList->AddField(this, lclVar, fieldVarDsc->lvFldOffset, fieldVarDsc->TypeGet());
fieldLclNum++;
}
return fieldList;
}
//------------------------------------------------------------------------
// fgMakeOutgoingStructArgCopy: make a copy of a struct variable if necessary,
// to pass to a callee.
//
// Arguments:
// call - call being processed
// args - args for the call
// copyBlkClass - class handle for the struct
//
// The arg is updated if necessary with the copy.
//
void Compiler::fgMakeOutgoingStructArgCopy(GenTreeCall* call, GenTreeCall::Use* args, CORINFO_CLASS_HANDLE copyBlkClass)
{
GenTree* argx = args->GetNode();
noway_assert(argx->gtOper != GT_MKREFANY);
fgArgTabEntry* argEntry = Compiler::gtArgEntryByNode(call, argx);
// If we're optimizing, see if we can avoid making a copy.
//
// We don't need a copy if this is the last use of an implicit by-ref local.
//
if (opts.OptimizationEnabled())
{
GenTreeLclVar* const lcl = argx->IsImplicitByrefParameterValue(this);
if (lcl != nullptr)
{
const unsigned varNum = lcl->GetLclNum();
LclVarDsc* const varDsc = lvaGetDesc(varNum);
const unsigned short totalAppearances = varDsc->lvRefCnt(RCS_EARLY);
// We don't have liveness so we rely on other indications of last use.
//
// We handle these cases:
//
// * (must not copy) If the call is a tail call, the use is a last use.
// We must skip the copy if we have a fast tail call.
//
// * (may not copy) if the call is noreturn, the use is a last use.
// We also check for just one reference here as we are not doing
// alias analysis of the call's parameters, or checking if the call
// site is not within some try region.
//
// * (may not copy) if there is exactly one use of the local in the method,
// and the call is not in loop, this is a last use.
//
// fgMightHaveLoop() is expensive; check it last, only if necessary.
//
if (call->IsTailCall() || //
((totalAppearances == 1) && call->IsNoReturn()) || //
((totalAppearances == 1) && !fgMightHaveLoop()))
{
args->SetNode(lcl);
assert(argEntry->GetNode() == lcl);
JITDUMP("did not need to make outgoing copy for last use of implicit byref V%2d\n", varNum);
return;
}
}
}
JITDUMP("making an outgoing copy for struct arg\n");
if (fgOutgoingArgTemps == nullptr)
{
fgOutgoingArgTemps = hashBv::Create(this);
}
unsigned tmp = 0;
bool found = false;
// Attempt to find a local we have already used for an outgoing struct and reuse it.
// We do not reuse within a statement.
if (!opts.MinOpts())
{
indexType lclNum;
FOREACH_HBV_BIT_SET(lclNum, fgOutgoingArgTemps)
{
LclVarDsc* varDsc = lvaGetDesc((unsigned)lclNum);
if (typeInfo::AreEquivalent(varDsc->lvVerTypeInfo, typeInfo(TI_STRUCT, copyBlkClass)) &&
!fgCurrentlyInUseArgTemps->testBit(lclNum))
{
tmp = (unsigned)lclNum;
found = true;
JITDUMP("reusing outgoing struct arg");
break;
}
}
NEXT_HBV_BIT_SET;
}
// Create the CopyBlk tree and insert it.
if (!found)
{
// Get a new temp
// Here We don't need unsafe value cls check, since the addr of this temp is used only in copyblk.
tmp = lvaGrabTemp(true DEBUGARG("by-value struct argument"));
lvaSetStruct(tmp, copyBlkClass, false);
if (call->IsVarargs())
{
lvaSetStructUsedAsVarArg(tmp);
}
fgOutgoingArgTemps->setBit(tmp);
}
fgCurrentlyInUseArgTemps->setBit(tmp);
// TYP_SIMD structs should not be enregistered, since ABI requires it to be
// allocated on stack and address of it needs to be passed.
if (lclVarIsSIMDType(tmp))
{
// TODO: check if we need this block here or other parts already deal with it.
lvaSetVarDoNotEnregister(tmp DEBUGARG(DoNotEnregisterReason::IsStructArg));
}
// Create a reference to the temp
GenTree* dest = gtNewLclvNode(tmp, lvaTable[tmp].lvType);
dest->gtFlags |= (GTF_DONT_CSE | GTF_VAR_DEF); // This is a def of the local, "entire" by construction.
// Copy the valuetype to the temp
GenTree* copyBlk = gtNewBlkOpNode(dest, argx, false /* not volatile */, true /* copyBlock */);
copyBlk = fgMorphCopyBlock(copyBlk);
#if FEATURE_FIXED_OUT_ARGS
// Do the copy early, and evalute the temp later (see EvalArgsToTemps)
// When on Unix create LCL_FLD for structs passed in more than one registers. See fgMakeTmpArgNode
GenTree* arg = copyBlk;
#else // FEATURE_FIXED_OUT_ARGS
// Structs are always on the stack, and thus never need temps
// so we have to put the copy and temp all into one expression.
argEntry->tmpNum = tmp;
GenTree* arg = fgMakeTmpArgNode(argEntry);
// Change the expression to "(tmp=val),tmp"
arg = gtNewOperNode(GT_COMMA, arg->TypeGet(), copyBlk, arg);
#endif // FEATURE_FIXED_OUT_ARGS
args->SetNode(arg);
call->fgArgInfo->EvalToTmp(argEntry, tmp, arg);
}
#ifdef TARGET_ARM
// See declaration for specification comment.
void Compiler::fgAddSkippedRegsInPromotedStructArg(LclVarDsc* varDsc,
unsigned firstArgRegNum,
regMaskTP* pArgSkippedRegMask)
{
assert(varDsc->lvPromoted);
// There's no way to do these calculations without breaking abstraction and assuming that
// integer register arguments are consecutive ints. They are on ARM.
// To start, figure out what register contains the last byte of the first argument.
LclVarDsc* firstFldVarDsc = lvaGetDesc(varDsc->lvFieldLclStart);
unsigned lastFldRegOfLastByte =
(firstFldVarDsc->lvFldOffset + firstFldVarDsc->lvExactSize - 1) / TARGET_POINTER_SIZE;
;
// Now we're keeping track of the register that the last field ended in; see what registers
// subsequent fields start in, and whether any are skipped.
// (We assume here the invariant that the fields are sorted in offset order.)
for (unsigned fldVarOffset = 1; fldVarOffset < varDsc->lvFieldCnt; fldVarOffset++)
{
unsigned fldVarNum = varDsc->lvFieldLclStart + fldVarOffset;
LclVarDsc* fldVarDsc = lvaGetDesc(fldVarNum);
unsigned fldRegOffset = fldVarDsc->lvFldOffset / TARGET_POINTER_SIZE;
assert(fldRegOffset >= lastFldRegOfLastByte); // Assuming sorted fields.
// This loop should enumerate the offsets of any registers skipped.
// Find what reg contains the last byte:
// And start at the first register after that. If that isn't the first reg of the current
for (unsigned skippedRegOffsets = lastFldRegOfLastByte + 1; skippedRegOffsets < fldRegOffset;
skippedRegOffsets++)
{
// If the register number would not be an arg reg, we're done.
if (firstArgRegNum + skippedRegOffsets >= MAX_REG_ARG)
return;
*pArgSkippedRegMask |= genRegMask(regNumber(firstArgRegNum + skippedRegOffsets));
}
lastFldRegOfLastByte = (fldVarDsc->lvFldOffset + fldVarDsc->lvExactSize - 1) / TARGET_POINTER_SIZE;
}
}
#endif // TARGET_ARM
/*****************************************************************************
*
* A little helper used to rearrange nested commutative operations. The
* effect is that nested associative, commutative operations are transformed
* into a 'left-deep' tree, i.e. into something like this:
*
* (((a op b) op c) op d) op...
*/
#if REARRANGE_ADDS
void Compiler::fgMoveOpsLeft(GenTree* tree)
{
GenTree* op1;
GenTree* op2;
genTreeOps oper;
do
{
op1 = tree->AsOp()->gtOp1;
op2 = tree->AsOp()->gtOp2;
oper = tree->OperGet();
noway_assert(GenTree::OperIsCommutative(oper));
noway_assert(oper == GT_ADD || oper == GT_XOR || oper == GT_OR || oper == GT_AND || oper == GT_MUL);
noway_assert(!varTypeIsFloating(tree->TypeGet()) || !opts.genFPorder);
noway_assert(oper == op2->gtOper);
// Commutativity doesn't hold if overflow checks are needed
if (tree->gtOverflowEx() || op2->gtOverflowEx())
{
return;
}
if (gtIsActiveCSE_Candidate(op2))
{
// If we have marked op2 as a CSE candidate,
// we can't perform a commutative reordering
// because any value numbers that we computed for op2
// will be incorrect after performing a commutative reordering
//
return;
}
if (oper == GT_MUL && (op2->gtFlags & GTF_MUL_64RSLT))
{
return;
}
// Check for GTF_ADDRMODE_NO_CSE flag on add/mul Binary Operators
if (((oper == GT_ADD) || (oper == GT_MUL)) && ((tree->gtFlags & GTF_ADDRMODE_NO_CSE) != 0))
{
return;
}
if ((tree->gtFlags | op2->gtFlags) & GTF_BOOLEAN)
{
// We could deal with this, but we were always broken and just hit the assert
// below regarding flags, which means it's not frequent, so will just bail out.
// See #195514
return;
}
noway_assert(!tree->gtOverflowEx() && !op2->gtOverflowEx());
GenTree* ad1 = op2->AsOp()->gtOp1;
GenTree* ad2 = op2->AsOp()->gtOp2;
// Compiler::optOptimizeBools() can create GT_OR of two GC pointers yeilding a GT_INT
// We can not reorder such GT_OR trees
//
if (varTypeIsGC(ad1->TypeGet()) != varTypeIsGC(op2->TypeGet()))
{
break;
}
// Don't split up a byref calculation and create a new byref. E.g.,
// [byref]+ (ref, [int]+ (int, int)) => [byref]+ ([byref]+ (ref, int), int).
// Doing this transformation could create a situation where the first
// addition (that is, [byref]+ (ref, int) ) creates a byref pointer that
// no longer points within the ref object. If a GC happens, the byref won't
// get updated. This can happen, for instance, if one of the int components
// is negative. It also requires the address generation be in a fully-interruptible
// code region.
//
if (varTypeIsGC(op1->TypeGet()) && op2->TypeGet() == TYP_I_IMPL)
{
assert(varTypeIsGC(tree->TypeGet()) && (oper == GT_ADD));
break;
}
/* Change "(x op (y op z))" to "(x op y) op z" */
/* ie. "(op1 op (ad1 op ad2))" to "(op1 op ad1) op ad2" */
GenTree* new_op1 = op2;
new_op1->AsOp()->gtOp1 = op1;
new_op1->AsOp()->gtOp2 = ad1;
/* Change the flags. */
// Make sure we arent throwing away any flags
noway_assert((new_op1->gtFlags &
~(GTF_MAKE_CSE | GTF_DONT_CSE | // It is ok that new_op1->gtFlags contains GTF_DONT_CSE flag.
GTF_REVERSE_OPS | // The reverse ops flag also can be set, it will be re-calculated
GTF_NODE_MASK | GTF_ALL_EFFECT | GTF_UNSIGNED)) == 0);
new_op1->gtFlags =
(new_op1->gtFlags & (GTF_NODE_MASK | GTF_DONT_CSE)) | // Make sure we propagate GTF_DONT_CSE flag.
(op1->gtFlags & GTF_ALL_EFFECT) | (ad1->gtFlags & GTF_ALL_EFFECT);
/* Retype new_op1 if it has not/become a GC ptr. */
if (varTypeIsGC(op1->TypeGet()))
{
noway_assert((varTypeIsGC(tree->TypeGet()) && op2->TypeGet() == TYP_I_IMPL &&
oper == GT_ADD) || // byref(ref + (int+int))
(varTypeIsI(tree->TypeGet()) && op2->TypeGet() == TYP_I_IMPL &&
oper == GT_OR)); // int(gcref | int(gcref|intval))
new_op1->gtType = tree->gtType;
}
else if (varTypeIsGC(ad2->TypeGet()))
{
// Neither ad1 nor op1 are GC. So new_op1 isnt either
noway_assert(op1->gtType == TYP_I_IMPL && ad1->gtType == TYP_I_IMPL);
new_op1->gtType = TYP_I_IMPL;
}
// If new_op1 is a new expression. Assign it a new unique value number.
// vnStore is null before the ValueNumber phase has run
if (vnStore != nullptr)
{
// We can only keep the old value number on new_op1 if both op1 and ad2
// have the same non-NoVN value numbers. Since op is commutative, comparing
// only ad2 and op1 is enough.
if ((op1->gtVNPair.GetLiberal() == ValueNumStore::NoVN) ||
(ad2->gtVNPair.GetLiberal() == ValueNumStore::NoVN) ||
(ad2->gtVNPair.GetLiberal() != op1->gtVNPair.GetLiberal()))
{
new_op1->gtVNPair.SetBoth(vnStore->VNForExpr(nullptr, new_op1->TypeGet()));
}
}
tree->AsOp()->gtOp1 = new_op1;
tree->AsOp()->gtOp2 = ad2;
/* If 'new_op1' is now the same nested op, process it recursively */
if ((ad1->gtOper == oper) && !ad1->gtOverflowEx())
{
fgMoveOpsLeft(new_op1);
}
/* If 'ad2' is now the same nested op, process it
* Instead of recursion, we set up op1 and op2 for the next loop.
*/
op1 = new_op1;
op2 = ad2;
} while ((op2->gtOper == oper) && !op2->gtOverflowEx());
return;
}
#endif
/*****************************************************************************/
void Compiler::fgSetRngChkTarget(GenTree* tree, bool delay)
{
if (tree->OperIs(GT_BOUNDS_CHECK))
{
GenTreeBoundsChk* const boundsChk = tree->AsBoundsChk();
BasicBlock* const failBlock = fgSetRngChkTargetInner(boundsChk->gtThrowKind, delay);
if (failBlock != nullptr)
{
boundsChk->gtIndRngFailBB = failBlock;
}
}
else if (tree->OperIs(GT_INDEX_ADDR))
{
GenTreeIndexAddr* const indexAddr = tree->AsIndexAddr();
BasicBlock* const failBlock = fgSetRngChkTargetInner(SCK_RNGCHK_FAIL, delay);
if (failBlock != nullptr)
{
indexAddr->gtIndRngFailBB = failBlock;
}
}
else
{
noway_assert(tree->OperIs(GT_ARR_ELEM, GT_ARR_INDEX));
fgSetRngChkTargetInner(SCK_RNGCHK_FAIL, delay);
}
}
BasicBlock* Compiler::fgSetRngChkTargetInner(SpecialCodeKind kind, bool delay)
{
if (opts.MinOpts())
{
delay = false;
}
if (!opts.compDbgCode)
{
if (!delay && !compIsForInlining())
{
// Create/find the appropriate "range-fail" label
return fgRngChkTarget(compCurBB, kind);
}
}
return nullptr;
}
/*****************************************************************************
*
* Expand a GT_INDEX node and fully morph the child operands
*
* The orginal GT_INDEX node is bashed into the GT_IND node that accesses
* the array element. We expand the GT_INDEX node into a larger tree that
* evaluates the array base and index. The simplest expansion is a GT_COMMA
* with a GT_BOUNDS_CHECK and a GT_IND with a GTF_INX_RNGCHK flag.
* For complex array or index expressions one or more GT_COMMA assignments
* are inserted so that we only evaluate the array or index expressions once.
*
* The fully expanded tree is then morphed. This causes gtFoldExpr to
* perform local constant prop and reorder the constants in the tree and
* fold them.
*
* We then parse the resulting array element expression in order to locate
* and label the constants and variables that occur in the tree.
*/
const int MAX_ARR_COMPLEXITY = 4;
const int MAX_INDEX_COMPLEXITY = 4;
GenTree* Compiler::fgMorphArrayIndex(GenTree* tree)
{
noway_assert(tree->gtOper == GT_INDEX);
GenTreeIndex* asIndex = tree->AsIndex();
var_types elemTyp = asIndex->TypeGet();
unsigned elemSize = asIndex->gtIndElemSize;
CORINFO_CLASS_HANDLE elemStructType = asIndex->gtStructElemClass;
noway_assert(elemTyp != TYP_STRUCT || elemStructType != nullptr);
// Fold "cns_str"[cns_index] to ushort constant
// NOTE: don't do it for empty string, the operation will fail anyway
if (opts.OptimizationEnabled() && asIndex->Arr()->OperIs(GT_CNS_STR) &&
!asIndex->Arr()->AsStrCon()->IsStringEmptyField() && asIndex->Index()->IsIntCnsFitsInI32())
{
const int cnsIndex = static_cast<int>(asIndex->Index()->AsIntConCommon()->IconValue());
if (cnsIndex >= 0)
{
int length;
const char16_t* str = info.compCompHnd->getStringLiteral(asIndex->Arr()->AsStrCon()->gtScpHnd,
asIndex->Arr()->AsStrCon()->gtSconCPX, &length);
if ((cnsIndex < length) && (str != nullptr))
{
GenTree* cnsCharNode = gtNewIconNode(str[cnsIndex], TYP_INT);
INDEBUG(cnsCharNode->gtDebugFlags |= GTF_DEBUG_NODE_MORPHED);
return cnsCharNode;
}
}
}
#ifdef FEATURE_SIMD
if (varTypeIsStruct(elemTyp) && structSizeMightRepresentSIMDType(elemSize))
{
// If this is a SIMD type, this is the point at which we lose the type information,
// so we need to set the correct type on the GT_IND.
// (We don't care about the base type here, so we only check, but don't retain, the return value).
unsigned simdElemSize = 0;
if (getBaseJitTypeAndSizeOfSIMDType(elemStructType, &simdElemSize) != CORINFO_TYPE_UNDEF)
{
assert(simdElemSize == elemSize);
elemTyp = getSIMDTypeForSize(elemSize);
// This is the new type of the node.
tree->gtType = elemTyp;
// Now set elemStructType to null so that we don't confuse value numbering.
elemStructType = nullptr;
}
}
#endif // FEATURE_SIMD
// Set up the array length's offset into lenOffs
// And the first element's offset into elemOffs
ssize_t lenOffs;
ssize_t elemOffs;
if (tree->gtFlags & GTF_INX_STRING_LAYOUT)
{
lenOffs = OFFSETOF__CORINFO_String__stringLen;
elemOffs = OFFSETOF__CORINFO_String__chars;
tree->gtFlags &= ~GTF_INX_STRING_LAYOUT; // Clear this flag as it is used for GTF_IND_VOLATILE
}
else
{
// We have a standard array
lenOffs = OFFSETOF__CORINFO_Array__length;
elemOffs = OFFSETOF__CORINFO_Array__data;
}
// In minopts, we expand GT_INDEX to GT_IND(GT_INDEX_ADDR) in order to minimize the size of the IR. As minopts
// compilation time is roughly proportional to the size of the IR, this helps keep compilation times down.
// Furthermore, this representation typically saves on code size in minopts w.r.t. the complete expansion
// performed when optimizing, as it does not require LclVar nodes (which are always stack loads/stores in
// minopts).
//
// When we *are* optimizing, we fully expand GT_INDEX to:
// 1. Evaluate the array address expression and store the result in a temp if the expression is complex or
// side-effecting.
// 2. Evaluate the array index expression and store the result in a temp if the expression is complex or
// side-effecting.
// 3. Perform an explicit bounds check: GT_BOUNDS_CHECK(index, GT_ARR_LENGTH(array))
// 4. Compute the address of the element that will be accessed:
// GT_ADD(GT_ADD(array, firstElementOffset), GT_MUL(index, elementSize))
// 5. Dereference the address with a GT_IND.
//
// This expansion explicitly exposes the bounds check and the address calculation to the optimizer, which allows
// for more straightforward bounds-check removal, CSE, etc.
if (opts.MinOpts())
{
GenTree* const array = fgMorphTree(asIndex->Arr());
GenTree* const index = fgMorphTree(asIndex->Index());
GenTreeIndexAddr* const indexAddr =
new (this, GT_INDEX_ADDR) GenTreeIndexAddr(array, index, elemTyp, elemStructType, elemSize,
static_cast<unsigned>(lenOffs), static_cast<unsigned>(elemOffs));
indexAddr->gtFlags |= (array->gtFlags | index->gtFlags) & GTF_ALL_EFFECT;
// Mark the indirection node as needing a range check if necessary.
// Note this will always be true unless JitSkipArrayBoundCheck() is used
if ((indexAddr->gtFlags & GTF_INX_RNGCHK) != 0)
{
fgSetRngChkTarget(indexAddr);
}
if (!tree->TypeIs(TYP_STRUCT))
{
tree->ChangeOper(GT_IND);
}
else
{
DEBUG_DESTROY_NODE(tree);
tree = gtNewObjNode(elemStructType, indexAddr);
INDEBUG(tree->gtDebugFlags |= GTF_DEBUG_NODE_MORPHED);
}
GenTreeIndir* const indir = tree->AsIndir();
indir->Addr() = indexAddr;
bool canCSE = indir->CanCSE();
indir->gtFlags = GTF_IND_ARR_INDEX | (indexAddr->gtFlags & GTF_ALL_EFFECT);
if (!canCSE)
{
indir->SetDoNotCSE();
}
INDEBUG(indexAddr->gtDebugFlags |= GTF_DEBUG_NODE_MORPHED);
return indir;
}
GenTree* arrRef = asIndex->Arr();
GenTree* index = asIndex->Index();
bool chkd = ((tree->gtFlags & GTF_INX_RNGCHK) != 0); // if false, range checking will be disabled
bool indexNonFaulting = ((tree->gtFlags & GTF_INX_NOFAULT) != 0); // if true, mark GTF_IND_NONFAULTING
bool nCSE = ((tree->gtFlags & GTF_DONT_CSE) != 0);
GenTree* arrRefDefn = nullptr; // non-NULL if we need to allocate a temp for the arrRef expression
GenTree* indexDefn = nullptr; // non-NULL if we need to allocate a temp for the index expression
GenTree* bndsChk = nullptr;
// If we're doing range checking, introduce a GT_BOUNDS_CHECK node for the address.
if (chkd)
{
GenTree* arrRef2 = nullptr; // The second copy will be used in array address expression
GenTree* index2 = nullptr;
// If the arrRef or index expressions involves an assignment, a call, or reads from global memory,
// then we *must* allocate a temporary in which to "localize" those values, to ensure that the
// same values are used in the bounds check and the actual dereference.
// Also we allocate the temporary when the expression is sufficiently complex/expensive.
//
// Note that if the expression is a GT_FIELD, it has not yet been morphed so its true complexity is
// not exposed. Without that condition there are cases of local struct fields that were previously,
// needlessly, marked as GTF_GLOB_REF, and when that was fixed, there were some regressions that
// were mostly ameliorated by adding this condition.
//
// Likewise, allocate a temporary if the expression is a GT_LCL_FLD node. These used to be created
// after fgMorphArrayIndex from GT_FIELD trees so this preserves the existing behavior. This is
// perhaps a decision that should be left to CSE but FX diffs show that it is slightly better to
// do this here.
if ((arrRef->gtFlags & (GTF_ASG | GTF_CALL | GTF_GLOB_REF)) ||
gtComplexityExceeds(&arrRef, MAX_ARR_COMPLEXITY) || arrRef->OperIs(GT_FIELD, GT_LCL_FLD))
{
unsigned arrRefTmpNum = lvaGrabTemp(true DEBUGARG("arr expr"));
arrRefDefn = gtNewTempAssign(arrRefTmpNum, arrRef);
arrRef = gtNewLclvNode(arrRefTmpNum, arrRef->TypeGet());
arrRef2 = gtNewLclvNode(arrRefTmpNum, arrRef->TypeGet());
}
else
{
arrRef2 = gtCloneExpr(arrRef);
noway_assert(arrRef2 != nullptr);
}
if ((index->gtFlags & (GTF_ASG | GTF_CALL | GTF_GLOB_REF)) || gtComplexityExceeds(&index, MAX_ARR_COMPLEXITY) ||
index->OperIs(GT_FIELD, GT_LCL_FLD))
{
unsigned indexTmpNum = lvaGrabTemp(true DEBUGARG("index expr"));
indexDefn = gtNewTempAssign(indexTmpNum, index);
index = gtNewLclvNode(indexTmpNum, index->TypeGet());
index2 = gtNewLclvNode(indexTmpNum, index->TypeGet());
}
else
{
index2 = gtCloneExpr(index);
noway_assert(index2 != nullptr);
}
// Next introduce a GT_BOUNDS_CHECK node
var_types bndsChkType = TYP_INT; // By default, try to use 32-bit comparison for array bounds check.
#ifdef TARGET_64BIT
// The CLI Spec allows an array to be indexed by either an int32 or a native int. In the case
// of a 64 bit architecture this means the array index can potentially be a TYP_LONG, so for this case,
// the comparison will have to be widened to 64 bits.
if (index->TypeGet() == TYP_I_IMPL)
{
bndsChkType = TYP_I_IMPL;
}
#endif // TARGET_64BIT
GenTree* arrLen = gtNewArrLen(TYP_INT, arrRef, (int)lenOffs, compCurBB);
if (bndsChkType != TYP_INT)
{
arrLen = gtNewCastNode(bndsChkType, arrLen, true, bndsChkType);
}
GenTreeBoundsChk* arrBndsChk = new (this, GT_BOUNDS_CHECK) GenTreeBoundsChk(index, arrLen, SCK_RNGCHK_FAIL);
bndsChk = arrBndsChk;
// Now we'll switch to using the second copies for arrRef and index
// to compute the address expression
arrRef = arrRef2;
index = index2;
}
// Create the "addr" which is "*(arrRef + ((index * elemSize) + elemOffs))"
GenTree* addr;
#ifdef TARGET_64BIT
// Widen 'index' on 64-bit targets
if (index->TypeGet() != TYP_I_IMPL)
{
if (index->OperGet() == GT_CNS_INT)
{
index->gtType = TYP_I_IMPL;
}
else
{
index = gtNewCastNode(TYP_I_IMPL, index, true, TYP_I_IMPL);
}
}
#endif // TARGET_64BIT
/* Scale the index value if necessary */
if (elemSize > 1)
{
GenTree* size = gtNewIconNode(elemSize, TYP_I_IMPL);
// Fix 392756 WP7 Crossgen
//
// During codegen optGetArrayRefScaleAndIndex() makes the assumption that op2 of a GT_MUL node
// is a constant and is not capable of handling CSE'ing the elemSize constant into a lclvar.
// Hence to prevent the constant from becoming a CSE we mark it as NO_CSE.
//
size->gtFlags |= GTF_DONT_CSE;
/* Multiply by the array element size */
addr = gtNewOperNode(GT_MUL, TYP_I_IMPL, index, size);
}
else
{
addr = index;
}
// Be careful to only create the byref pointer when the full index expression is added to the array reference.
// We don't want to create a partial byref address expression that doesn't include the full index offset:
// a byref must point within the containing object. It is dangerous (especially when optimizations come into
// play) to create a "partial" byref that doesn't point exactly to the correct object; there is risk that
// the partial byref will not point within the object, and thus not get updated correctly during a GC.
// This is mostly a risk in fully-interruptible code regions.
// We can generate two types of trees for "addr":
//
// 1) "arrRef + (index + elemOffset)"
// 2) "(arrRef + elemOffset) + index"
//
// XArch has powerful addressing modes such as [base + index*scale + offset] so it's fine with 1),
// while for Arm we better try to make an invariant sub-tree as large as possible, which is usually
// "(arrRef + elemOffset)" and is CSE/LoopHoisting friendly => produces better codegen.
// 2) should still be safe from GC's point of view since both ADD operations are byref and point to
// within the object so GC will be able to correctly track and update them.
bool groupArrayRefWithElemOffset = false;
#ifdef TARGET_ARMARCH
groupArrayRefWithElemOffset = true;
// TODO: in some cases even on ARM we better use 1) shape because if "index" is invariant and "arrRef" is not
// we at least will be able to hoist/CSE "index + elemOffset" in some cases.
// See https://github.com/dotnet/runtime/pull/61293#issuecomment-964146497
// Use 2) form only for primitive types for now - it significantly reduced number of size regressions
if (!varTypeIsIntegral(elemTyp) && !varTypeIsFloating(elemTyp))
{
groupArrayRefWithElemOffset = false;
}
#endif
// First element's offset
GenTree* elemOffset = gtNewIconNode(elemOffs, TYP_I_IMPL);
if (groupArrayRefWithElemOffset)
{
GenTree* basePlusOffset = gtNewOperNode(GT_ADD, TYP_BYREF, arrRef, elemOffset);
addr = gtNewOperNode(GT_ADD, TYP_BYREF, basePlusOffset, addr);
}
else
{
addr = gtNewOperNode(GT_ADD, TYP_I_IMPL, addr, elemOffset);
addr = gtNewOperNode(GT_ADD, TYP_BYREF, arrRef, addr);
}
assert(((tree->gtDebugFlags & GTF_DEBUG_NODE_LARGE) != 0) ||
(GenTree::s_gtNodeSizes[GT_IND] == TREE_NODE_SZ_SMALL));
// Change the orginal GT_INDEX node into a GT_IND node
tree->SetOper(GT_IND);
// If the index node is a floating-point type, notify the compiler
// we'll potentially use floating point registers at the time of codegen.
if (varTypeUsesFloatReg(tree->gtType))
{
this->compFloatingPointUsed = true;
}
// We've now consumed the GTF_INX_RNGCHK and GTF_INX_NOFAULT, and the node
// is no longer a GT_INDEX node.
tree->gtFlags &= ~(GTF_INX_RNGCHK | GTF_INX_NOFAULT);
tree->AsOp()->gtOp1 = addr;
// This is an array index expression.
tree->gtFlags |= GTF_IND_ARR_INDEX;
// If there's a bounds check, the indir won't fault.
if (bndsChk || indexNonFaulting)
{
tree->gtFlags |= GTF_IND_NONFAULTING;
}
else
{
tree->gtFlags |= GTF_EXCEPT;
}
if (nCSE)
{
tree->gtFlags |= GTF_DONT_CSE;
}
// Store information about it.
GetArrayInfoMap()->Set(tree, ArrayInfo(elemTyp, elemSize, (int)elemOffs, elemStructType));
// Remember this 'indTree' that we just created, as we still need to attach the fieldSeq information to it.
GenTree* indTree = tree;
// Did we create a bndsChk tree?
if (bndsChk)
{
// Use a GT_COMMA node to prepend the array bound check
//
tree = gtNewOperNode(GT_COMMA, elemTyp, bndsChk, tree);
/* Mark the indirection node as needing a range check */
fgSetRngChkTarget(bndsChk);
}
if (indexDefn != nullptr)
{
// Use a GT_COMMA node to prepend the index assignment
//
tree = gtNewOperNode(GT_COMMA, tree->TypeGet(), indexDefn, tree);
}
if (arrRefDefn != nullptr)
{
// Use a GT_COMMA node to prepend the arRef assignment
//
tree = gtNewOperNode(GT_COMMA, tree->TypeGet(), arrRefDefn, tree);
}
JITDUMP("fgMorphArrayIndex (before remorph):\n")
DISPTREE(tree)
// Currently we morph the tree to perform some folding operations prior
// to attaching fieldSeq info and labeling constant array index contributions
//
tree = fgMorphTree(tree);
JITDUMP("fgMorphArrayIndex (after remorph):\n")
DISPTREE(tree)
// Ideally we just want to proceed to attaching fieldSeq info and labeling the
// constant array index contributions, but the morphing operation may have changed
// the 'tree' into something that now unconditionally throws an exception.
//
// In such case the gtEffectiveVal could be a new tree or it's gtOper could be modified
// or it could be left unchanged. If it is unchanged then we should not return,
// instead we should proceed to attaching fieldSeq info, etc...
//
GenTree* arrElem = tree->gtEffectiveVal();
if (fgIsCommaThrow(tree))
{
if ((arrElem != indTree) || // A new tree node may have been created
(!indTree->OperIs(GT_IND))) // The GT_IND may have been changed to a GT_CNS_INT
{
return tree; // Just return the Comma-Throw, don't try to attach the fieldSeq info, etc..
}
}
assert(!fgGlobalMorph || (arrElem->gtDebugFlags & GTF_DEBUG_NODE_MORPHED));
DBEXEC(fgGlobalMorph && (arrElem == tree), tree->gtDebugFlags &= ~GTF_DEBUG_NODE_MORPHED)
addr = arrElem->gtGetOp1();
GenTree* cnsOff = nullptr;
if (addr->OperIs(GT_ADD))
{
GenTree* addrOp1 = addr->gtGetOp1();
if (groupArrayRefWithElemOffset)
{
if (addrOp1->OperIs(GT_ADD) && addrOp1->gtGetOp2()->IsCnsIntOrI())
{
assert(addrOp1->gtGetOp1()->TypeIs(TYP_REF));
cnsOff = addrOp1->gtGetOp2();
addr = addr->gtGetOp2();
// Label any constant array index contributions with #ConstantIndex and any LclVars with
// GTF_VAR_ARR_INDEX
addr->LabelIndex(this);
}
else
{
assert(addr->gtGetOp2()->IsCnsIntOrI());
cnsOff = addr->gtGetOp2();
addr = nullptr;
}
}
else
{
assert(addr->TypeIs(TYP_BYREF));
assert(addr->gtGetOp1()->TypeIs(TYP_REF));
addr = addr->gtGetOp2();
// Look for the constant [#FirstElem] node here, or as the RHS of an ADD.
if (addr->IsCnsIntOrI())
{
cnsOff = addr;
addr = nullptr;
}
else
{
if ((addr->OperIs(GT_ADD)) && addr->gtGetOp2()->IsCnsIntOrI())
{
cnsOff = addr->gtGetOp2();
addr = addr->gtGetOp1();
}
// Label any constant array index contributions with #ConstantIndex and any LclVars with
// GTF_VAR_ARR_INDEX
addr->LabelIndex(this);
}
}
}
else if (addr->IsCnsIntOrI())
{
cnsOff = addr;
}
FieldSeqNode* firstElemFseq = GetFieldSeqStore()->CreateSingleton(FieldSeqStore::FirstElemPseudoField);
if ((cnsOff != nullptr) && (cnsOff->AsIntCon()->gtIconVal == elemOffs))
{
// Assign it the [#FirstElem] field sequence
//
cnsOff->AsIntCon()->gtFieldSeq = firstElemFseq;
}
else // We have folded the first element's offset with the index expression
{
// Build the [#ConstantIndex, #FirstElem] field sequence
//
FieldSeqNode* constantIndexFseq = GetFieldSeqStore()->CreateSingleton(FieldSeqStore::ConstantIndexPseudoField);
FieldSeqNode* fieldSeq = GetFieldSeqStore()->Append(constantIndexFseq, firstElemFseq);
if (cnsOff == nullptr) // It must have folded into a zero offset
{
// Record in the general zero-offset map.
fgAddFieldSeqForZeroOffset(addr, fieldSeq);
}
else
{
cnsOff->AsIntCon()->gtFieldSeq = fieldSeq;
}
}
return tree;
}
#ifdef TARGET_X86
/*****************************************************************************
*
* Wrap fixed stack arguments for varargs functions to go through varargs
* cookie to access them, except for the cookie itself.
*
* Non-x86 platforms are allowed to access all arguments directly
* so we don't need this code.
*
*/
GenTree* Compiler::fgMorphStackArgForVarArgs(unsigned lclNum, var_types varType, unsigned lclOffs)
{
/* For the fixed stack arguments of a varargs function, we need to go
through the varargs cookies to access them, except for the
cookie itself */
LclVarDsc* varDsc = lvaGetDesc(lclNum);
if (varDsc->lvIsParam && !varDsc->lvIsRegArg && lclNum != lvaVarargsHandleArg)
{
// Create a node representing the local pointing to the base of the args
GenTree* ptrArg =
gtNewOperNode(GT_SUB, TYP_I_IMPL, gtNewLclvNode(lvaVarargsBaseOfStkArgs, TYP_I_IMPL),
gtNewIconNode(varDsc->GetStackOffset() -
codeGen->intRegState.rsCalleeRegArgCount * REGSIZE_BYTES - lclOffs));
// Access the argument through the local
GenTree* tree;
if (varTypeIsStruct(varType))
{
CORINFO_CLASS_HANDLE typeHnd = varDsc->GetStructHnd();
assert(typeHnd != nullptr);
tree = gtNewObjNode(typeHnd, ptrArg);
}
else
{
tree = gtNewOperNode(GT_IND, varType, ptrArg);
}
tree->gtFlags |= GTF_IND_TGTANYWHERE;
if (varDsc->IsAddressExposed())
{
tree->gtFlags |= GTF_GLOB_REF;
}
return fgMorphTree(tree);
}
return NULL;
}
#endif
/*****************************************************************************
*
* Transform the given GT_LCL_VAR tree for code generation.
*/
GenTree* Compiler::fgMorphLocalVar(GenTree* tree, bool forceRemorph)
{
assert(tree->gtOper == GT_LCL_VAR);
unsigned lclNum = tree->AsLclVarCommon()->GetLclNum();
var_types varType = lvaGetRealType(lclNum);
LclVarDsc* varDsc = lvaGetDesc(lclNum);
if (varDsc->IsAddressExposed())
{
tree->gtFlags |= GTF_GLOB_REF;
}
#ifdef TARGET_X86
if (info.compIsVarArgs)
{
GenTree* newTree = fgMorphStackArgForVarArgs(lclNum, varType, 0);
if (newTree != nullptr)
{
if (newTree->OperIsBlk() && ((tree->gtFlags & GTF_VAR_DEF) == 0))
{
newTree->SetOper(GT_IND);
}
return newTree;
}
}
#endif // TARGET_X86
/* If not during the global morphing phase bail */
if (!fgGlobalMorph && !forceRemorph)
{
return tree;
}
bool varAddr = (tree->gtFlags & GTF_DONT_CSE) != 0;
noway_assert(!(tree->gtFlags & GTF_VAR_DEF) || varAddr); // GTF_VAR_DEF should always imply varAddr
if (!varAddr && varDsc->lvNormalizeOnLoad())
{
// TYP_BOOL quirk: previously, the code in optAssertionIsSubrange did not handle TYP_BOOL.
// Now it does, but this leads to some regressions because we lose the uniform VNs for trees
// that represent the "reduced" normalize-on-load locals, i. e. LCL_VAR(small type V00), created
// here with local assertions, and "expanded", i. e. CAST(small type <- LCL_VAR(int V00)).
// This is a pretty fundamental problem with how normalize-on-load locals appear to the optimizer.
// This quirk preserves the previous behavior.
// TODO-CQ: fix the VNs for normalize-on-load locals and remove this quirk.
bool isBoolQuirk = varType == TYP_BOOL;
// Assertion prop can tell us to omit adding a cast here. This is
// useful when the local is a small-typed parameter that is passed in a
// register: in that case, the ABI specifies that the upper bits might
// be invalid, but the assertion guarantees us that we have normalized
// when we wrote it.
if (optLocalAssertionProp && !isBoolQuirk &&
optAssertionIsSubrange(tree, IntegralRange::ForType(varType), apFull) != NO_ASSERTION_INDEX)
{
// The previous assertion can guarantee us that if this node gets
// assigned a register, it will be normalized already. It is still
// possible that this node ends up being in memory, in which case
// normalization will still be needed, so we better have the right
// type.
assert(tree->TypeGet() == varDsc->TypeGet());
return tree;
}
// Small-typed arguments and aliased locals are normalized on load.
// Other small-typed locals are normalized on store.
// Also, under the debugger as the debugger could write to the variable.
// If this is one of the former, insert a narrowing cast on the load.
// ie. Convert: var-short --> cast-short(var-int)
tree->gtType = TYP_INT;
fgMorphTreeDone(tree);
tree = gtNewCastNode(TYP_INT, tree, false, varType);
fgMorphTreeDone(tree);
return tree;
}
return tree;
}
/*****************************************************************************
Grab a temp for big offset morphing.
This method will grab a new temp if no temp of this "type" has been created.
Or it will return the same cached one if it has been created.
*/
unsigned Compiler::fgGetBigOffsetMorphingTemp(var_types type)
{
unsigned lclNum = fgBigOffsetMorphingTemps[type];
if (lclNum == BAD_VAR_NUM)
{
// We haven't created a temp for this kind of type. Create one now.
lclNum = lvaGrabTemp(false DEBUGARG("Big Offset Morphing"));
fgBigOffsetMorphingTemps[type] = lclNum;
}
else
{
// We better get the right type.
noway_assert(lvaTable[lclNum].TypeGet() == type);
}
noway_assert(lclNum != BAD_VAR_NUM);
return lclNum;
}
/*****************************************************************************
*
* Transform the given GT_FIELD tree for code generation.
*/
GenTree* Compiler::fgMorphField(GenTree* tree, MorphAddrContext* mac)
{
assert(tree->gtOper == GT_FIELD);
CORINFO_FIELD_HANDLE symHnd = tree->AsField()->gtFldHnd;
unsigned fldOffset = tree->AsField()->gtFldOffset;
GenTree* objRef = tree->AsField()->GetFldObj();
bool objIsLocal = false;
FieldSeqNode* fieldSeq = FieldSeqStore::NotAField();
if (!tree->AsField()->gtFldMayOverlap)
{
if (objRef != nullptr)
{
fieldSeq = GetFieldSeqStore()->CreateSingleton(symHnd, FieldSeqNode::FieldKind::Instance);
}
else
{
// Only simple statics get importred as GT_FIELDs.
fieldSeq = GetFieldSeqStore()->CreateSingleton(symHnd, FieldSeqNode::FieldKind::SimpleStatic);
}
}
// Reset the flag because we may reuse the node.
tree->AsField()->gtFldMayOverlap = false;
if (fgGlobalMorph && (objRef != nullptr) && (objRef->gtOper == GT_ADDR))
{
// Make sure we've checked if 'objRef' is an address of an implicit-byref parameter.
// If it is, fgMorphImplicitByRefArgs may change it do a different opcode, which the
// simd field rewrites are sensitive to.
fgMorphImplicitByRefArgs(objRef);
}
noway_assert(((objRef != nullptr) && (objRef->IsLocalAddrExpr() != nullptr)) ||
((tree->gtFlags & GTF_GLOB_REF) != 0));
#ifdef FEATURE_SIMD
// if this field belongs to simd struct, translate it to simd intrinsic.
if (mac == nullptr)
{
if (IsBaselineSimdIsaSupported())
{
GenTree* newTree = fgMorphFieldToSimdGetElement(tree);
if (newTree != tree)
{
newTree = fgMorphTree(newTree);
return newTree;
}
}
}
else if ((objRef != nullptr) && (objRef->OperGet() == GT_ADDR) && varTypeIsSIMD(objRef->gtGetOp1()))
{
GenTreeLclVarCommon* lcl = objRef->IsLocalAddrExpr();
if (lcl != nullptr)
{
lvaSetVarDoNotEnregister(lcl->GetLclNum() DEBUGARG(DoNotEnregisterReason::LocalField));
}
}
#endif
// Create a default MorphAddrContext early so it doesn't go out of scope
// before it is used.
MorphAddrContext defMAC(MACK_Ind);
/* Is this an instance data member? */
if (objRef)
{
GenTree* addr;
objIsLocal = objRef->IsLocal();
if (tree->gtFlags & GTF_IND_TLS_REF)
{
NO_WAY("instance field can not be a TLS ref.");
}
/* We'll create the expression "*(objRef + mem_offs)" */
noway_assert(varTypeIsGC(objRef->TypeGet()) || objRef->TypeGet() == TYP_I_IMPL);
/*
Now we have a tree like this:
+--------------------+
| GT_FIELD | tree
+----------+---------+
|
+--------------+-------------+
|tree->AsField()->GetFldObj()|
+--------------+-------------+
We want to make it like this (when fldOffset is <= MAX_UNCHECKED_OFFSET_FOR_NULL_OBJECT):
+--------------------+
| GT_IND/GT_OBJ | tree
+---------+----------+
|
|
+---------+----------+
| GT_ADD | addr
+---------+----------+
|
/ \
/ \
/ \
+-------------------+ +----------------------+
| objRef | | fldOffset |
| | | (when fldOffset !=0) |
+-------------------+ +----------------------+
or this (when fldOffset is > MAX_UNCHECKED_OFFSET_FOR_NULL_OBJECT):
+--------------------+
| GT_IND/GT_OBJ | tree
+----------+---------+
|
+----------+---------+
| GT_COMMA | comma2
+----------+---------+
|
/ \
/ \
/ \
/ \
+---------+----------+ +---------+----------+
comma | GT_COMMA | | "+" (i.e. GT_ADD) | addr
+---------+----------+ +---------+----------+
| |
/ \ / \
/ \ / \
/ \ / \
+-----+-----+ +-----+-----+ +---------+ +-----------+
asg | GT_ASG | ind | GT_IND | | tmpLcl | | fldOffset |
+-----+-----+ +-----+-----+ +---------+ +-----------+
| |
/ \ |
/ \ |
/ \ |
+-----+-----+ +-----+-----+ +-----------+
| tmpLcl | | objRef | | tmpLcl |
+-----------+ +-----------+ +-----------+
*/
var_types objRefType = objRef->TypeGet();
GenTree* comma = nullptr;
// NULL mac means we encounter the GT_FIELD first. This denotes a dereference of the field,
// and thus is equivalent to a MACK_Ind with zero offset.
if (mac == nullptr)
{
mac = &defMAC;
}
// This flag is set to enable the "conservative" style of explicit null-check insertion.
// This means that we insert an explicit null check whenever we create byref by adding a
// constant offset to a ref, in a MACK_Addr context (meaning that the byref is not immediately
// dereferenced). The alternative is "aggressive", which would not insert such checks (for
// small offsets); in this plan, we would transfer some null-checking responsibility to
// callee's of methods taking byref parameters. They would have to add explicit null checks
// when creating derived byrefs from argument byrefs by adding constants to argument byrefs, in
// contexts where the resulting derived byref is not immediately dereferenced (or if the offset is too
// large). To make the "aggressive" scheme work, however, we'd also have to add explicit derived-from-null
// checks for byref parameters to "external" methods implemented in C++, and in P/Invoke stubs.
// This is left here to point out how to implement it.
CLANG_FORMAT_COMMENT_ANCHOR;
#define CONSERVATIVE_NULL_CHECK_BYREF_CREATION 1
bool addExplicitNullCheck = false;
// Implicit byref locals and string literals are never null.
if (fgAddrCouldBeNull(objRef))
{
// If the objRef is a GT_ADDR node, it, itself, never requires null checking. The expression
// whose address is being taken is either a local or static variable, whose address is necessarily
// non-null, or else it is a field dereference, which will do its own bounds checking if necessary.
if (objRef->gtOper != GT_ADDR && (mac->m_kind == MACK_Addr || mac->m_kind == MACK_Ind))
{
if (!mac->m_allConstantOffsets || fgIsBigOffset(mac->m_totalOffset + fldOffset))
{
addExplicitNullCheck = true;
}
else
{
// In R2R mode the field offset for some fields may change when the code
// is loaded. So we can't rely on a zero offset here to suppress the null check.
//
// See GitHub issue #16454.
bool fieldHasChangeableOffset = false;
#ifdef FEATURE_READYTORUN
fieldHasChangeableOffset = (tree->AsField()->gtFieldLookup.addr != nullptr);
#endif
#if CONSERVATIVE_NULL_CHECK_BYREF_CREATION
addExplicitNullCheck = (mac->m_kind == MACK_Addr) &&
((mac->m_totalOffset + fldOffset > 0) || fieldHasChangeableOffset);
#else
addExplicitNullCheck = (objRef->gtType == TYP_BYREF && mac->m_kind == MACK_Addr &&
((mac->m_totalOffset + fldOffset > 0) || fieldHasChangeableOffset));
#endif
}
}
}
if (addExplicitNullCheck)
{
#ifdef DEBUG
if (verbose)
{
printf("Before explicit null check morphing:\n");
gtDispTree(tree);
}
#endif
//
// Create the "comma" subtree
//
GenTree* asg = nullptr;
GenTree* nullchk;
unsigned lclNum;
if (objRef->gtOper != GT_LCL_VAR)
{
lclNum = fgGetBigOffsetMorphingTemp(genActualType(objRef->TypeGet()));
// Create the "asg" node
asg = gtNewTempAssign(lclNum, objRef);
}
else
{
lclNum = objRef->AsLclVarCommon()->GetLclNum();
}
GenTree* lclVar = gtNewLclvNode(lclNum, objRefType);
nullchk = gtNewNullCheck(lclVar, compCurBB);
nullchk->gtFlags |= GTF_DONT_CSE; // Don't try to create a CSE for these TYP_BYTE indirections
if (asg)
{
// Create the "comma" node.
comma = gtNewOperNode(GT_COMMA,
TYP_VOID, // We don't want to return anything from this "comma" node.
// Set the type to TYP_VOID, so we can select "cmp" instruction
// instead of "mov" instruction later on.
asg, nullchk);
}
else
{
comma = nullchk;
}
addr = gtNewLclvNode(lclNum, objRefType); // Use "tmpLcl" to create "addr" node.
}
else
{
addr = objRef;
}
#ifdef FEATURE_READYTORUN
if (tree->AsField()->gtFieldLookup.addr != nullptr)
{
GenTree* offsetNode = nullptr;
if (tree->AsField()->gtFieldLookup.accessType == IAT_PVALUE)
{
offsetNode = gtNewIndOfIconHandleNode(TYP_I_IMPL, (size_t)tree->AsField()->gtFieldLookup.addr,
GTF_ICON_CONST_PTR, true);
#ifdef DEBUG
offsetNode->gtGetOp1()->AsIntCon()->gtTargetHandle = (size_t)symHnd;
#endif
}
else
{
noway_assert(!"unexpected accessType for R2R field access");
}
var_types addType = (objRefType == TYP_I_IMPL) ? TYP_I_IMPL : TYP_BYREF;
addr = gtNewOperNode(GT_ADD, addType, addr, offsetNode);
}
#endif
if (fldOffset != 0)
{
// Generate the "addr" node.
// Add the member offset to the object's address.
addr = gtNewOperNode(GT_ADD, (objRefType == TYP_I_IMPL) ? TYP_I_IMPL : TYP_BYREF, addr,
gtNewIconHandleNode(fldOffset, GTF_ICON_FIELD_OFF, fieldSeq));
}
// Now let's set the "tree" as a GT_IND tree.
tree->SetOper(GT_IND);
tree->AsOp()->gtOp1 = addr;
tree->SetIndirExceptionFlags(this);
if (addExplicitNullCheck)
{
//
// Create "comma2" node and link it to "tree".
//
GenTree* comma2;
comma2 = gtNewOperNode(GT_COMMA,
addr->TypeGet(), // The type of "comma2" node is the same as the type of "addr" node.
comma, addr);
tree->AsOp()->gtOp1 = comma2;
}
#ifdef DEBUG
if (verbose)
{
if (addExplicitNullCheck)
{
printf("After adding explicit null check:\n");
gtDispTree(tree);
}
}
#endif
}
else /* This is a static data member */
{
if (tree->gtFlags & GTF_IND_TLS_REF)
{
// Thread Local Storage static field reference
//
// Field ref is a TLS 'Thread-Local-Storage' reference
//
// Build this tree: IND(*) #
// |
// ADD(I_IMPL)
// / \.
// / CNS(fldOffset)
// /
// /
// /
// IND(I_IMPL) == [Base of this DLL's TLS]
// |
// ADD(I_IMPL)
// / \.
// / CNS(IdValue*4) or MUL
// / / \.
// IND(I_IMPL) / CNS(4)
// | /
// CNS(TLS_HDL,0x2C) IND
// |
// CNS(pIdAddr)
//
// # Denotes the orginal node
//
void** pIdAddr = nullptr;
unsigned IdValue = info.compCompHnd->getFieldThreadLocalStoreID(symHnd, (void**)&pIdAddr);
//
// If we can we access the TLS DLL index ID value directly
// then pIdAddr will be NULL and
// IdValue will be the actual TLS DLL index ID
//
GenTree* dllRef = nullptr;
if (pIdAddr == nullptr)
{
if (IdValue != 0)
{
dllRef = gtNewIconNode(IdValue * 4, TYP_I_IMPL);
}
}
else
{
dllRef = gtNewIndOfIconHandleNode(TYP_I_IMPL, (size_t)pIdAddr, GTF_ICON_CONST_PTR, true);
// Next we multiply by 4
dllRef = gtNewOperNode(GT_MUL, TYP_I_IMPL, dllRef, gtNewIconNode(4, TYP_I_IMPL));
}
#define WIN32_TLS_SLOTS (0x2C) // Offset from fs:[0] where the pointer to the slots resides
// Mark this ICON as a TLS_HDL, codegen will use FS:[cns]
GenTree* tlsRef = gtNewIconHandleNode(WIN32_TLS_SLOTS, GTF_ICON_TLS_HDL);
// Translate GTF_FLD_INITCLASS to GTF_ICON_INITCLASS
if ((tree->gtFlags & GTF_FLD_INITCLASS) != 0)
{
tree->gtFlags &= ~GTF_FLD_INITCLASS;
tlsRef->gtFlags |= GTF_ICON_INITCLASS;
}
tlsRef = gtNewOperNode(GT_IND, TYP_I_IMPL, tlsRef);
if (dllRef != nullptr)
{
/* Add the dllRef */
tlsRef = gtNewOperNode(GT_ADD, TYP_I_IMPL, tlsRef, dllRef);
}
/* indirect to have tlsRef point at the base of the DLLs Thread Local Storage */
tlsRef = gtNewOperNode(GT_IND, TYP_I_IMPL, tlsRef);
if (fldOffset != 0)
{
GenTree* fldOffsetNode = new (this, GT_CNS_INT) GenTreeIntCon(TYP_INT, fldOffset, fieldSeq);
/* Add the TLS static field offset to the address */
tlsRef = gtNewOperNode(GT_ADD, TYP_I_IMPL, tlsRef, fldOffsetNode);
}
// Final indirect to get to actual value of TLS static field
tree->SetOper(GT_IND);
tree->AsOp()->gtOp1 = tlsRef;
noway_assert(tree->gtFlags & GTF_IND_TLS_REF);
}
else
{
// Normal static field reference
//
// If we can we access the static's address directly
// then pFldAddr will be NULL and
// fldAddr will be the actual address of the static field
//
void** pFldAddr = nullptr;
void* fldAddr = info.compCompHnd->getFieldAddress(symHnd, (void**)&pFldAddr);
// We should always be able to access this static field address directly
//
assert(pFldAddr == nullptr);
// For boxed statics, this direct address will be for the box. We have already added
// the indirection for the field itself and attached the sequence, in importation.
bool isBoxedStatic = gtIsStaticFieldPtrToBoxedStruct(tree->TypeGet(), symHnd);
if (isBoxedStatic)
{
fieldSeq = FieldSeqStore::NotAField();
}
// TODO-CQ: enable this optimization for 32 bit targets.
bool isStaticReadOnlyInited = false;
#ifdef TARGET_64BIT
if (tree->TypeIs(TYP_REF) && !isBoxedStatic)
{
bool pIsSpeculative = true;
if (info.compCompHnd->getStaticFieldCurrentClass(symHnd, &pIsSpeculative) != NO_CLASS_HANDLE)
{
isStaticReadOnlyInited = !pIsSpeculative;
}
}
#endif // TARGET_64BIT
// TODO: choices made below have mostly historical reasons and
// should be unified to always use the IND(<address>) form.
CLANG_FORMAT_COMMENT_ANCHOR;
#ifdef TARGET_64BIT
bool preferIndir = true;
#else // !TARGET_64BIT
bool preferIndir = isBoxedStatic;
#endif // !TARGET_64BIT
if (preferIndir)
{
GenTreeFlags handleKind = GTF_EMPTY;
if (isBoxedStatic)
{
handleKind = GTF_ICON_STATIC_BOX_PTR;
}
else if (isStaticReadOnlyInited)
{
handleKind = GTF_ICON_CONST_PTR;
}
else
{
handleKind = GTF_ICON_STATIC_HDL;
}
GenTree* addr = gtNewIconHandleNode((size_t)fldAddr, handleKind, fieldSeq);
// Translate GTF_FLD_INITCLASS to GTF_ICON_INITCLASS, if we need to.
if (((tree->gtFlags & GTF_FLD_INITCLASS) != 0) && !isStaticReadOnlyInited)
{
tree->gtFlags &= ~GTF_FLD_INITCLASS;
addr->gtFlags |= GTF_ICON_INITCLASS;
}
tree->SetOper(GT_IND);
tree->AsOp()->gtOp1 = addr;
if (isBoxedStatic)
{
// The box for the static cannot be null, and is logically invariant, since it
// represents (a base for) the static's address.
tree->gtFlags |= (GTF_IND_INVARIANT | GTF_IND_NONFAULTING | GTF_IND_NONNULL);
}
else if (isStaticReadOnlyInited)
{
JITDUMP("Marking initialized static read-only field '%s' as invariant.\n", eeGetFieldName(symHnd));
// Static readonly field is not null at this point (see getStaticFieldCurrentClass impl).
tree->gtFlags |= (GTF_IND_INVARIANT | GTF_IND_NONFAULTING | GTF_IND_NONNULL);
}
return fgMorphSmpOp(tree);
}
else
{
// Only volatile or classinit could be set, and they map over
noway_assert((tree->gtFlags & ~(GTF_FLD_VOLATILE | GTF_FLD_INITCLASS | GTF_COMMON_MASK)) == 0);
static_assert_no_msg(GTF_FLD_VOLATILE == GTF_CLS_VAR_VOLATILE);
static_assert_no_msg(GTF_FLD_INITCLASS == GTF_CLS_VAR_INITCLASS);
tree->SetOper(GT_CLS_VAR);
tree->AsClsVar()->gtClsVarHnd = symHnd;
tree->AsClsVar()->gtFieldSeq = fieldSeq;
}
return tree;
}
}
noway_assert(tree->gtOper == GT_IND);
if (fldOffset == 0)
{
GenTree* addr = tree->AsOp()->gtOp1;
// 'addr' may be a GT_COMMA. Skip over any comma nodes
addr = addr->gtEffectiveVal();
#ifdef DEBUG
if (verbose)
{
printf("\nBefore calling fgAddFieldSeqForZeroOffset:\n");
gtDispTree(tree);
}
#endif
// We expect 'addr' to be an address at this point.
assert(addr->TypeGet() == TYP_BYREF || addr->TypeGet() == TYP_I_IMPL || addr->TypeGet() == TYP_REF);
// Since we don't make a constant zero to attach the field sequence to, associate it with the "addr" node.
fgAddFieldSeqForZeroOffset(addr, fieldSeq);
}
// Pass down the current mac; if non null we are computing an address
GenTree* result = fgMorphSmpOp(tree, mac);
#ifdef DEBUG
if (verbose)
{
printf("\nFinal value of Compiler::fgMorphField after calling fgMorphSmpOp:\n");
gtDispTree(result);
}
#endif
return result;
}
//------------------------------------------------------------------------------
// fgMorphCallInline: attempt to inline a call
//
// Arguments:
// call - call expression to inline, inline candidate
// inlineResult - result tracking and reporting
//
// Notes:
// Attempts to inline the call.
//
// If successful, callee's IR is inserted in place of the call, and
// is marked with an InlineContext.
//
// If unsuccessful, the transformations done in anticipation of a
// possible inline are undone, and the candidate flag on the call
// is cleared.
void Compiler::fgMorphCallInline(GenTreeCall* call, InlineResult* inlineResult)
{
bool inliningFailed = false;
// Is this call an inline candidate?
if (call->IsInlineCandidate())
{
InlineContext* createdContext = nullptr;
// Attempt the inline
fgMorphCallInlineHelper(call, inlineResult, &createdContext);
// We should have made up our minds one way or another....
assert(inlineResult->IsDecided());
// If we failed to inline, we have a bit of work to do to cleanup
if (inlineResult->IsFailure())
{
if (createdContext != nullptr)
{
// We created a context before we got to the failure, so mark
// it as failed in the tree.
createdContext->SetFailed(inlineResult);
}
else
{
#ifdef DEBUG
// In debug we always put all inline attempts into the inline tree.
InlineContext* ctx =
m_inlineStrategy->NewContext(call->gtInlineCandidateInfo->inlinersContext, fgMorphStmt, call);
ctx->SetFailed(inlineResult);
#endif
}
inliningFailed = true;
// Clear the Inline Candidate flag so we can ensure later we tried
// inlining all candidates.
//
call->gtFlags &= ~GTF_CALL_INLINE_CANDIDATE;
}
}
else
{
// This wasn't an inline candidate. So it must be a GDV candidate.
assert(call->IsGuardedDevirtualizationCandidate());
// We already know we can't inline this call, so don't even bother to try.
inliningFailed = true;
}
// If we failed to inline (or didn't even try), do some cleanup.
if (inliningFailed)
{
if (call->gtReturnType != TYP_VOID)
{
JITDUMP("Inlining [%06u] failed, so bashing " FMT_STMT " to NOP\n", dspTreeID(call), fgMorphStmt->GetID());
// Detach the GT_CALL tree from the original statement by
// hanging a "nothing" node to it. Later the "nothing" node will be removed
// and the original GT_CALL tree will be picked up by the GT_RET_EXPR node.
noway_assert(fgMorphStmt->GetRootNode() == call);
fgMorphStmt->SetRootNode(gtNewNothingNode());
}
}
}
//------------------------------------------------------------------------------
// fgMorphCallInlineHelper: Helper to attempt to inline a call
//
// Arguments:
// call - call expression to inline, inline candidate
// result - result to set to success or failure
// createdContext - The context that was created if the inline attempt got to the inliner.
//
// Notes:
// Attempts to inline the call.
//
// If successful, callee's IR is inserted in place of the call, and
// is marked with an InlineContext.
//
// If unsuccessful, the transformations done in anticipation of a
// possible inline are undone, and the candidate flag on the call
// is cleared.
//
// If a context was created because we got to the importer then it is output by this function.
// If the inline succeeded, this context will already be marked as successful. If it failed and
// a context is returned, then it will not have been marked as success or failed.
void Compiler::fgMorphCallInlineHelper(GenTreeCall* call, InlineResult* result, InlineContext** createdContext)
{
// Don't expect any surprises here.
assert(result->IsCandidate());
if (lvaCount >= MAX_LV_NUM_COUNT_FOR_INLINING)
{
// For now, attributing this to call site, though it's really
// more of a budget issue (lvaCount currently includes all
// caller and prospective callee locals). We still might be
// able to inline other callees into this caller, or inline
// this callee in other callers.
result->NoteFatal(InlineObservation::CALLSITE_TOO_MANY_LOCALS);
return;
}
if (call->IsVirtual())
{
result->NoteFatal(InlineObservation::CALLSITE_IS_VIRTUAL);
return;
}
// Re-check this because guarded devirtualization may allow these through.
if (gtIsRecursiveCall(call) && call->IsImplicitTailCall())
{
result->NoteFatal(InlineObservation::CALLSITE_IMPLICIT_REC_TAIL_CALL);
return;
}
// impMarkInlineCandidate() is expected not to mark tail prefixed calls
// and recursive tail calls as inline candidates.
noway_assert(!call->IsTailPrefixedCall());
noway_assert(!call->IsImplicitTailCall() || !gtIsRecursiveCall(call));
//
// Calling inlinee's compiler to inline the method.
//
unsigned startVars = lvaCount;
#ifdef DEBUG
if (verbose)
{
printf("Expanding INLINE_CANDIDATE in statement ");
printStmtID(fgMorphStmt);
printf(" in " FMT_BB ":\n", compCurBB->bbNum);
gtDispStmt(fgMorphStmt);
if (call->IsImplicitTailCall())
{
printf("Note: candidate is implicit tail call\n");
}
}
#endif
impInlineRoot()->m_inlineStrategy->NoteAttempt(result);
//
// Invoke the compiler to inline the call.
//
fgInvokeInlineeCompiler(call, result, createdContext);
if (result->IsFailure())
{
// Undo some changes made in anticipation of inlining...
// Zero out the used locals
memset(lvaTable + startVars, 0, (lvaCount - startVars) * sizeof(*lvaTable));
for (unsigned i = startVars; i < lvaCount; i++)
{
new (&lvaTable[i], jitstd::placement_t()) LclVarDsc(); // call the constructor.
}
lvaCount = startVars;
#ifdef DEBUG
if (verbose)
{
// printf("Inlining failed. Restore lvaCount to %d.\n", lvaCount);
}
#endif
return;
}
#ifdef DEBUG
if (verbose)
{
// printf("After inlining lvaCount=%d.\n", lvaCount);
}
#endif
}
//------------------------------------------------------------------------
// fgCanFastTailCall: Check to see if this tail call can be optimized as epilog+jmp.
//
// Arguments:
// callee - The callee to check
// failReason - If this method returns false, the reason why. Can be nullptr.
//
// Return Value:
// Returns true or false based on whether the callee can be fastTailCalled
//
// Notes:
// This function is target specific and each target will make the fastTailCall
// decision differently. See the notes below.
//
// This function calls fgInitArgInfo() to initialize the arg info table, which
// is used to analyze the argument. This function can alter the call arguments
// by adding argument IR nodes for non-standard arguments.
//
// Windows Amd64:
// A fast tail call can be made whenever the number of callee arguments
// is less than or equal to the number of caller arguments, or we have four
// or fewer callee arguments. This is because, on Windows AMD64, each
// argument uses exactly one register or one 8-byte stack slot. Thus, we only
// need to count arguments, and not be concerned with the size of each
// incoming or outgoing argument.
//
// Can fast tail call examples (amd64 Windows):
//
// -- Callee will have all register arguments --
// caller(int, int, int, int)
// callee(int, int, float, int)
//
// -- Callee requires stack space that is equal or less than the caller --
// caller(struct, struct, struct, struct, struct, struct)
// callee(int, int, int, int, int, int)
//
// -- Callee requires stack space that is less than the caller --
// caller(struct, double, struct, float, struct, struct)
// callee(int, int, int, int, int)
//
// -- Callee will have all register arguments --
// caller(int)
// callee(int, int, int, int)
//
// Cannot fast tail call examples (amd64 Windows):
//
// -- Callee requires stack space that is larger than the caller --
// caller(struct, double, struct, float, struct, struct)
// callee(int, int, int, int, int, double, double, double)
//
// -- Callee has a byref struct argument --
// caller(int, int, int)
// callee(struct(size 3 bytes))
//
// Unix Amd64 && Arm64:
// A fastTailCall decision can be made whenever the callee's stack space is
// less than or equal to the caller's stack space. There are many permutations
// of when the caller and callee have different stack sizes if there are
// structs being passed to either the caller or callee.
//
// Exceptions:
// If the callee has a 9 to 16 byte struct argument and the callee has
// stack arguments, the decision will be to not fast tail call. This is
// because before fgMorphArgs is done, the struct is unknown whether it
// will be placed on the stack or enregistered. Therefore, the conservative
// decision of do not fast tail call is taken. This limitations should be
// removed if/when fgMorphArgs no longer depends on fgCanFastTailCall.
//
// Can fast tail call examples (amd64 Unix):
//
// -- Callee will have all register arguments --
// caller(int, int, int, int)
// callee(int, int, float, int)
//
// -- Callee requires stack space that is equal to the caller --
// caller({ long, long }, { int, int }, { int }, { int }, { int }, { int }) -- 6 int register arguments, 16 byte
// stack
// space
// callee(int, int, int, int, int, int, int, int) -- 6 int register arguments, 16 byte stack space
//
// -- Callee requires stack space that is less than the caller --
// caller({ long, long }, int, { long, long }, int, { long, long }, { long, long }) 6 int register arguments, 32 byte
// stack
// space
// callee(int, int, int, int, int, int, { long, long } ) // 6 int register arguments, 16 byte stack space
//
// -- Callee will have all register arguments --
// caller(int)
// callee(int, int, int, int)
//
// Cannot fast tail call examples (amd64 Unix):
//
// -- Callee requires stack space that is larger than the caller --
// caller(float, float, float, float, float, float, float, float) -- 8 float register arguments
// callee(int, int, int, int, int, int, int, int) -- 6 int register arguments, 16 byte stack space
//
// -- Callee has structs which cannot be enregistered (Implementation Limitation) --
// caller(float, float, float, float, float, float, float, float, { double, double, double }) -- 8 float register
// arguments, 24 byte stack space
// callee({ double, double, double }) -- 24 bytes stack space
//
// -- Callee requires stack space and has a struct argument >8 bytes and <16 bytes (Implementation Limitation) --
// caller(int, int, int, int, int, int, { double, double, double }) -- 6 int register arguments, 24 byte stack space
// callee(int, int, int, int, int, int, { int, int }) -- 6 int registers, 16 byte stack space
//
// -- Caller requires stack space and nCalleeArgs > nCallerArgs (Bug) --
// caller({ double, double, double, double, double, double }) // 48 byte stack
// callee(int, int) -- 2 int registers
bool Compiler::fgCanFastTailCall(GenTreeCall* callee, const char** failReason)
{
#if FEATURE_FASTTAILCALL
// To reach here means that the return types of the caller and callee are tail call compatible.
// In the case of structs that can be returned in a register, compRetNativeType is set to the actual return type.
CLANG_FORMAT_COMMENT_ANCHOR;
#ifdef DEBUG
if (callee->IsTailPrefixedCall())
{
var_types retType = info.compRetType;
assert(impTailCallRetTypeCompatible(false, retType, info.compMethodInfo->args.retTypeClass, info.compCallConv,
(var_types)callee->gtReturnType, callee->gtRetClsHnd,
callee->GetUnmanagedCallConv()));
}
#endif
assert(!callee->AreArgsComplete());
fgInitArgInfo(callee);
fgArgInfo* argInfo = callee->fgArgInfo;
unsigned calleeArgStackSize = 0;
unsigned callerArgStackSize = info.compArgStackSize;
auto reportFastTailCallDecision = [&](const char* thisFailReason) {
if (failReason != nullptr)
{
*failReason = thisFailReason;
}
#ifdef DEBUG
if ((JitConfig.JitReportFastTailCallDecisions()) == 1)
{
if (callee->gtCallType != CT_INDIRECT)
{
const char* methodName;
methodName = eeGetMethodFullName(callee->gtCallMethHnd);
printf("[Fast tailcall decision]: Caller: %s\n[Fast tailcall decision]: Callee: %s -- Decision: ",
info.compFullName, methodName);
}
else
{
printf("[Fast tailcall decision]: Caller: %s\n[Fast tailcall decision]: Callee: IndirectCall -- "
"Decision: ",
info.compFullName);
}
if (thisFailReason == nullptr)
{
printf("Will fast tailcall");
}
else
{
printf("Will not fast tailcall (%s)", thisFailReason);
}
printf(" (CallerArgStackSize: %d, CalleeArgStackSize: %d)\n\n", callerArgStackSize, calleeArgStackSize);
}
else
{
if (thisFailReason == nullptr)
{
JITDUMP("[Fast tailcall decision]: Will fast tailcall\n");
}
else
{
JITDUMP("[Fast tailcall decision]: Will not fast tailcall (%s)\n", thisFailReason);
}
}
#endif // DEBUG
};
for (unsigned index = 0; index < argInfo->ArgCount(); ++index)
{
fgArgTabEntry* arg = argInfo->GetArgEntry(index, false);
calleeArgStackSize = roundUp(calleeArgStackSize, arg->GetByteAlignment());
calleeArgStackSize += arg->GetStackByteSize();
#ifdef TARGET_ARM
if (arg->IsSplit())
{
reportFastTailCallDecision("Splitted argument in callee is not supported on ARM32");
return false;
}
#endif // TARGET_ARM
}
calleeArgStackSize = GetOutgoingArgByteSize(calleeArgStackSize);
#ifdef TARGET_ARM
if (compHasSplitParam)
{
reportFastTailCallDecision("Splitted argument in caller is not supported on ARM32");
return false;
}
if (compIsProfilerHookNeeded())
{
reportFastTailCallDecision("Profiler is not supported on ARM32");
return false;
}
// On ARM32 we have only one non-parameter volatile register and we need it
// for the GS security cookie check. We could technically still tailcall
// when the callee does not use all argument registers, but we keep the
// code simple here.
if (getNeedsGSSecurityCookie())
{
reportFastTailCallDecision("Not enough registers available due to the GS security cookie check");
return false;
}
#endif
if (!opts.compFastTailCalls)
{
reportFastTailCallDecision("Configuration doesn't allow fast tail calls");
return false;
}
if (callee->IsStressTailCall())
{
reportFastTailCallDecision("Fast tail calls are not performed under tail call stress");
return false;
}
#ifdef TARGET_ARM
if (callee->IsR2RRelativeIndir() || callee->HasNonStandardAddedArgs(this))
{
reportFastTailCallDecision(
"Method with non-standard args passed in callee saved register cannot be tail called");
return false;
}
#endif
// Note on vararg methods:
// If the caller is vararg method, we don't know the number of arguments passed by caller's caller.
// But we can be sure that in-coming arg area of vararg caller would be sufficient to hold its
// fixed args. Therefore, we can allow a vararg method to fast tail call other methods as long as
// out-going area required for callee is bounded by caller's fixed argument space.
//
// Note that callee being a vararg method is not a problem since we can account the params being passed.
//
// We will currently decide to not fast tail call on Windows armarch if the caller or callee is a vararg
// method. This is due to the ABI differences for native vararg methods for these platforms. There is
// work required to shuffle arguments to the correct locations.
CLANG_FORMAT_COMMENT_ANCHOR;
if (TargetOS::IsWindows && TargetArchitecture::IsArmArch && (info.compIsVarArgs || callee->IsVarargs()))
{
reportFastTailCallDecision("Fast tail calls with varargs not supported on Windows ARM/ARM64");
return false;
}
if (compLocallocUsed)
{
reportFastTailCallDecision("Localloc used");
return false;
}
#ifdef TARGET_AMD64
// Needed for Jit64 compat.
// In future, enabling fast tail calls from methods that need GS cookie
// check would require codegen side work to emit GS cookie check before a
// tail call.
if (getNeedsGSSecurityCookie())
{
reportFastTailCallDecision("GS Security cookie check required");
return false;
}
#endif
// If the NextCallReturnAddress intrinsic is used we should do normal calls.
if (info.compHasNextCallRetAddr)
{
reportFastTailCallDecision("Uses NextCallReturnAddress intrinsic");
return false;
}
if (callee->HasRetBufArg()) // RetBuf
{
// If callee has RetBuf param, caller too must have it.
// Otherwise go the slow route.
if (info.compRetBuffArg == BAD_VAR_NUM)
{
reportFastTailCallDecision("Callee has RetBuf but caller does not.");
return false;
}
}
// For a fast tail call the caller will use its incoming arg stack space to place
// arguments, so if the callee requires more arg stack space than is available here
// the fast tail call cannot be performed. This is common to all platforms.
// Note that the GC'ness of on stack args need not match since the arg setup area is marked
// as non-interruptible for fast tail calls.
if (calleeArgStackSize > callerArgStackSize)
{
reportFastTailCallDecision("Not enough incoming arg space");
return false;
}
// For Windows some struct parameters are copied on the local frame
// and then passed by reference. We cannot fast tail call in these situation
// as we need to keep our frame around.
if (fgCallHasMustCopyByrefParameter(callee))
{
reportFastTailCallDecision("Callee has a byref parameter");
return false;
}
reportFastTailCallDecision(nullptr);
return true;
#else // FEATURE_FASTTAILCALL
if (failReason)
*failReason = "Fast tailcalls are not supported on this platform";
return false;
#endif
}
//------------------------------------------------------------------------
// fgCallHasMustCopyByrefParameter: Check to see if this call has a byref parameter that
// requires a struct copy in the caller.
//
// Arguments:
// callee - The callee to check
//
// Return Value:
// Returns true or false based on whether this call has a byref parameter that
// requires a struct copy in the caller.
#if FEATURE_FASTTAILCALL
bool Compiler::fgCallHasMustCopyByrefParameter(GenTreeCall* callee)
{
fgArgInfo* argInfo = callee->fgArgInfo;
bool hasMustCopyByrefParameter = false;
for (unsigned index = 0; index < argInfo->ArgCount(); ++index)
{
fgArgTabEntry* arg = argInfo->GetArgEntry(index, false);
if (arg->isStruct)
{
if (arg->passedByRef)
{
// Generally a byref arg will block tail calling, as we have to
// make a local copy of the struct for the callee.
hasMustCopyByrefParameter = true;
// If we're optimizing, we may be able to pass our caller's byref to our callee,
// and so still be able to avoid a struct copy.
if (opts.OptimizationEnabled())
{
// First, see if this arg is an implicit byref param.
GenTreeLclVar* const lcl = arg->GetNode()->IsImplicitByrefParameterValue(this);
if (lcl != nullptr)
{
// Yes, the arg is an implicit byref param.
const unsigned lclNum = lcl->GetLclNum();
LclVarDsc* const varDsc = lvaGetDesc(lcl);
// The param must not be promoted; if we've promoted, then the arg will be
// a local struct assembled from the promoted fields.
if (varDsc->lvPromoted)
{
JITDUMP("Arg [%06u] is promoted implicit byref V%02u, so no tail call\n",
dspTreeID(arg->GetNode()), lclNum);
}
else
{
JITDUMP("Arg [%06u] is unpromoted implicit byref V%02u, seeing if we can still tail call\n",
dspTreeID(arg->GetNode()), lclNum);
// We have to worry about introducing aliases if we bypass copying
// the struct at the call. We'll do some limited analysis to see if we
// can rule this out.
const unsigned argLimit = 6;
// If this is the only appearance of the byref in the method, then
// aliasing is not possible.
//
// If no other call arg refers to this byref, and no other arg is
// a pointer which could refer to this byref, we can optimize.
//
// We only check this for calls with small numbers of arguments,
// as the analysis cost will be quadratic.
//
const unsigned totalAppearances = varDsc->lvRefCnt(RCS_EARLY);
const unsigned callAppearances = (unsigned)varDsc->lvRefCntWtd(RCS_EARLY);
assert(totalAppearances >= callAppearances);
if (totalAppearances == 1)
{
JITDUMP("... yes, arg is the only appearance of V%02u\n", lclNum);
hasMustCopyByrefParameter = false;
}
else if (totalAppearances > callAppearances)
{
// lvRefCntWtd tracks the number of appearances of the arg at call sites.
// If this number doesn't match the regular ref count, there is
// a non-call appearance, and we must be conservative.
//
JITDUMP("... no, arg has %u non-call appearance(s)\n",
totalAppearances - callAppearances);
}
else if (argInfo->ArgCount() <= argLimit)
{
JITDUMP("... all %u appearance(s) are as implicit byref args to calls.\n"
"... Running alias analysis on this call's args\n",
totalAppearances);
GenTree* interferingArg = nullptr;
for (unsigned index2 = 0; index2 < argInfo->ArgCount(); ++index2)
{
if (index2 == index)
{
continue;
}
fgArgTabEntry* const arg2 = argInfo->GetArgEntry(index2, false);
JITDUMP("... checking other arg [%06u]...\n", dspTreeID(arg2->GetNode()));
DISPTREE(arg2->GetNode());
// Do we pass 'lcl' more than once to the callee?
if (arg2->isStruct && arg2->passedByRef)
{
GenTreeLclVarCommon* const lcl2 =
arg2->GetNode()->IsImplicitByrefParameterValue(this);
if ((lcl2 != nullptr) && (lclNum == lcl2->GetLclNum()))
{
// not copying would introduce aliased implicit byref structs
// in the callee ... we can't optimize.
interferingArg = arg2->GetNode();
break;
}
else
{
JITDUMP("... arg refers to different implicit byref V%02u\n",
lcl2->GetLclNum());
continue;
}
}
// Do we pass a byref pointer which might point within 'lcl'?
//
// We can assume the 'lcl' is unaliased on entry to the
// method, so the only way we can have an aliasing byref pointer at
// the call is if 'lcl' is address taken/exposed in the method.
//
// Note even though 'lcl' is not promoted, we are in the middle
// of the promote->rewrite->undo->(morph)->demote cycle, and so
// might see references to promoted fields of 'lcl' that haven't yet
// been demoted (see fgMarkDemotedImplicitByRefArgs).
//
// So, we also need to scan all 'lcl's fields, if any, to see if they
// are exposed.
//
// When looking for aliases from other args, we check for both TYP_BYREF
// and TYP_I_IMPL typed args here. Conceptually anything that points into
// an implicit byref parameter should be TYP_BYREF, as these parameters could
// refer to boxed heap locations (say if the method is invoked by reflection)
// but there are some stack only structs (like typed references) where
// the importer/runtime code uses TYP_I_IMPL, and fgInitArgInfo will
// transiently retype all simple address-of implicit parameter args as
// TYP_I_IMPL.
//
if ((arg2->argType == TYP_BYREF) || (arg2->argType == TYP_I_IMPL))
{
JITDUMP("...arg is a byref, must run an alias check\n");
bool checkExposure = true;
bool hasExposure = false;
// See if there is any way arg could refer to a parameter struct.
GenTree* arg2Node = arg2->GetNode();
if (arg2Node->OperIs(GT_LCL_VAR))
{
GenTreeLclVarCommon* arg2LclNode = arg2Node->AsLclVarCommon();
assert(arg2LclNode->GetLclNum() != lclNum);
LclVarDsc* arg2Dsc = lvaGetDesc(arg2LclNode);
// Other params can't alias implicit byref params
if (arg2Dsc->lvIsParam)
{
checkExposure = false;
}
}
// Because we're checking TYP_I_IMPL above, at least
// screen out obvious things that can't cause aliases.
else if (arg2Node->IsIntegralConst())
{
checkExposure = false;
}
if (checkExposure)
{
JITDUMP(
"... not sure where byref arg points, checking if V%02u is exposed\n",
lclNum);
// arg2 might alias arg, see if we've exposed
// arg somewhere in the method.
if (varDsc->lvHasLdAddrOp || varDsc->IsAddressExposed())
{
// Struct as a whole is exposed, can't optimize
JITDUMP("... V%02u is exposed\n", lclNum);
hasExposure = true;
}
else if (varDsc->lvFieldLclStart != 0)
{
// This is the promoted/undone struct case.
//
// The field start is actually the local number of the promoted local,
// use it to enumerate the fields.
const unsigned promotedLcl = varDsc->lvFieldLclStart;
LclVarDsc* const promotedVarDsc = lvaGetDesc(promotedLcl);
JITDUMP("...promoted-unpromoted case -- also checking exposure of "
"fields of V%02u\n",
promotedLcl);
for (unsigned fieldIndex = 0; fieldIndex < promotedVarDsc->lvFieldCnt;
fieldIndex++)
{
LclVarDsc* fieldDsc =
lvaGetDesc(promotedVarDsc->lvFieldLclStart + fieldIndex);
if (fieldDsc->lvHasLdAddrOp || fieldDsc->IsAddressExposed())
{
// Promoted and not yet demoted field is exposed, can't optimize
JITDUMP("... field V%02u is exposed\n",
promotedVarDsc->lvFieldLclStart + fieldIndex);
hasExposure = true;
break;
}
}
}
}
if (hasExposure)
{
interferingArg = arg2->GetNode();
break;
}
}
else
{
JITDUMP("...arg is not a byref or implicit byref (%s)\n",
varTypeName(arg2->GetNode()->TypeGet()));
}
}
if (interferingArg != nullptr)
{
JITDUMP("... no, arg [%06u] may alias with V%02u\n", dspTreeID(interferingArg),
lclNum);
}
else
{
JITDUMP("... yes, no other arg in call can alias V%02u\n", lclNum);
hasMustCopyByrefParameter = false;
}
}
else
{
JITDUMP(" ... no, call has %u > %u args, alias analysis deemed too costly\n",
argInfo->ArgCount(), argLimit);
}
}
}
}
if (hasMustCopyByrefParameter)
{
// This arg requires a struct copy. No reason to keep scanning the remaining args.
break;
}
}
}
}
return hasMustCopyByrefParameter;
}
#endif
//------------------------------------------------------------------------
// fgMorphPotentialTailCall: Attempt to morph a call that the importer has
// identified as a potential tailcall to an actual tailcall and return the
// placeholder node to use in this case.
//
// Arguments:
// call - The call to morph.
//
// Return Value:
// Returns a node to use if the call was morphed into a tailcall. If this
// function returns a node the call is done being morphed and the new node
// should be used. Otherwise the call will have been demoted to a regular call
// and should go through normal morph.
//
// Notes:
// This is called only for calls that the importer has already identified as
// potential tailcalls. It will do profitability and legality checks and
// classify which kind of tailcall we are able to (or should) do, along with
// modifying the trees to perform that kind of tailcall.
//
GenTree* Compiler::fgMorphPotentialTailCall(GenTreeCall* call)
{
// It should either be an explicit (i.e. tail prefixed) or an implicit tail call
assert(call->IsTailPrefixedCall() ^ call->IsImplicitTailCall());
// It cannot be an inline candidate
assert(!call->IsInlineCandidate());
auto failTailCall = [&](const char* reason, unsigned lclNum = BAD_VAR_NUM) {
#ifdef DEBUG
if (verbose)
{
printf("\nRejecting tail call in morph for call ");
printTreeID(call);
printf(": %s", reason);
if (lclNum != BAD_VAR_NUM)
{
printf(" V%02u", lclNum);
}
printf("\n");
}
#endif
// for non user funcs, we have no handles to report
info.compCompHnd->reportTailCallDecision(nullptr,
(call->gtCallType == CT_USER_FUNC) ? call->gtCallMethHnd : nullptr,
call->IsTailPrefixedCall(), TAILCALL_FAIL, reason);
// We have checked the candidate so demote.
call->gtCallMoreFlags &= ~GTF_CALL_M_EXPLICIT_TAILCALL;
#if FEATURE_TAILCALL_OPT
call->gtCallMoreFlags &= ~GTF_CALL_M_IMPLICIT_TAILCALL;
#endif
};
if (call->gtCallMoreFlags & GTF_CALL_M_SPECIAL_INTRINSIC)
{
failTailCall("Might turn into an intrinsic");
return nullptr;
}
#ifdef TARGET_ARM
if (call->gtCallMoreFlags & GTF_CALL_M_WRAPPER_DELEGATE_INV)
{
failTailCall("Non-standard calling convention");
return nullptr;
}
#endif
if (call->IsNoReturn() && !call->IsTailPrefixedCall())
{
// Such tail calls always throw an exception and we won't be able to see current
// Caller() in the stacktrace.
failTailCall("Never returns");
return nullptr;
}
#ifdef DEBUG
if (opts.compGcChecks && (info.compRetType == TYP_REF))
{
failTailCall("COMPlus_JitGCChecks or stress might have interposed a call to CORINFO_HELP_CHECK_OBJ, "
"invalidating tailcall opportunity");
return nullptr;
}
#endif
// We have to ensure to pass the incoming retValBuf as the
// outgoing one. Using a temp will not do as this function will
// not regain control to do the copy. This can happen when inlining
// a tailcall which also has a potential tailcall in it: the IL looks
// like we can do a tailcall, but the trees generated use a temp for the inlinee's
// result. TODO-CQ: Fix this.
if (info.compRetBuffArg != BAD_VAR_NUM)
{
noway_assert(call->TypeGet() == TYP_VOID);
GenTree* retValBuf = call->gtCallArgs->GetNode();
if (retValBuf->gtOper != GT_LCL_VAR || retValBuf->AsLclVarCommon()->GetLclNum() != info.compRetBuffArg)
{
failTailCall("Need to copy return buffer");
return nullptr;
}
}
// We are still not sure whether it can be a tail call. Because, when converting
// a call to an implicit tail call, we must check that there are no locals with
// their address taken. If this is the case, we have to assume that the address
// has been leaked and the current stack frame must live until after the final
// call.
// Verify that none of vars has lvHasLdAddrOp or IsAddressExposed() bit set. Note
// that lvHasLdAddrOp is much more conservative. We cannot just base it on
// IsAddressExposed() alone since it is not guaranteed to be set on all VarDscs
// during morph stage. The reason for also checking IsAddressExposed() is that in case
// of vararg methods user args are marked as addr exposed but not lvHasLdAddrOp.
// The combination of lvHasLdAddrOp and IsAddressExposed() though conservative allows us
// never to be incorrect.
//
// TODO-Throughput: have a compiler level flag to indicate whether method has vars whose
// address is taken. Such a flag could be set whenever lvHasLdAddrOp or IsAddressExposed()
// is set. This avoids the need for iterating through all lcl vars of the current
// method. Right now throughout the code base we are not consistently using 'set'
// method to set lvHasLdAddrOp and IsAddressExposed() flags.
bool isImplicitOrStressTailCall = call->IsImplicitTailCall() || call->IsStressTailCall();
if (isImplicitOrStressTailCall && compLocallocUsed)
{
failTailCall("Localloc used");
return nullptr;
}
bool hasStructParam = false;
for (unsigned varNum = 0; varNum < lvaCount; varNum++)
{
LclVarDsc* varDsc = lvaGetDesc(varNum);
// If the method is marked as an explicit tail call we will skip the
// following three hazard checks.
// We still must check for any struct parameters and set 'hasStructParam'
// so that we won't transform the recursive tail call into a loop.
//
if (isImplicitOrStressTailCall)
{
if (varDsc->lvHasLdAddrOp && !lvaIsImplicitByRefLocal(varNum))
{
failTailCall("Local address taken", varNum);
return nullptr;
}
if (varDsc->IsAddressExposed())
{
if (lvaIsImplicitByRefLocal(varNum))
{
// The address of the implicit-byref is a non-address use of the pointer parameter.
}
else if (varDsc->lvIsStructField && lvaIsImplicitByRefLocal(varDsc->lvParentLcl))
{
// The address of the implicit-byref's field is likewise a non-address use of the pointer
// parameter.
}
else if (varDsc->lvPromoted && (lvaTable[varDsc->lvFieldLclStart].lvParentLcl != varNum))
{
// This temp was used for struct promotion bookkeeping. It will not be used, and will have
// its ref count and address-taken flag reset in fgMarkDemotedImplicitByRefArgs.
assert(lvaIsImplicitByRefLocal(lvaTable[varDsc->lvFieldLclStart].lvParentLcl));
assert(fgGlobalMorph);
}
else
{
failTailCall("Local address taken", varNum);
return nullptr;
}
}
if (varDsc->lvPromoted && varDsc->lvIsParam && !lvaIsImplicitByRefLocal(varNum))
{
failTailCall("Has Struct Promoted Param", varNum);
return nullptr;
}
if (varDsc->lvPinned)
{
// A tail call removes the method from the stack, which means the pinning
// goes away for the callee. We can't allow that.
failTailCall("Has Pinned Vars", varNum);
return nullptr;
}
}
if (varTypeIsStruct(varDsc->TypeGet()) && varDsc->lvIsParam)
{
hasStructParam = true;
// This prevents transforming a recursive tail call into a loop
// but doesn't prevent tail call optimization so we need to
// look at the rest of parameters.
}
}
if (!fgCheckStmtAfterTailCall())
{
failTailCall("Unexpected statements after the tail call");
return nullptr;
}
const char* failReason = nullptr;
bool canFastTailCall = fgCanFastTailCall(call, &failReason);
CORINFO_TAILCALL_HELPERS tailCallHelpers;
bool tailCallViaJitHelper = false;
if (!canFastTailCall)
{
if (call->IsImplicitTailCall())
{
// Implicit or opportunistic tail calls are always dispatched via fast tail call
// mechanism and never via tail call helper for perf.
failTailCall(failReason);
return nullptr;
}
assert(call->IsTailPrefixedCall());
assert(call->tailCallInfo != nullptr);
// We do not currently handle non-standard args except for VSD stubs.
if (!call->IsVirtualStub() && call->HasNonStandardAddedArgs(this))
{
failTailCall(
"Method with non-standard args passed in callee trash register cannot be tail called via helper");
return nullptr;
}
// On x86 we have a faster mechanism than the general one which we use
// in almost all cases. See fgCanTailCallViaJitHelper for more information.
if (fgCanTailCallViaJitHelper())
{
tailCallViaJitHelper = true;
}
else
{
// Make sure we can get the helpers. We do this last as the runtime
// will likely be required to generate these.
CORINFO_RESOLVED_TOKEN* token = nullptr;
CORINFO_SIG_INFO* sig = call->tailCallInfo->GetSig();
unsigned flags = 0;
if (!call->tailCallInfo->IsCalli())
{
token = call->tailCallInfo->GetToken();
if (call->tailCallInfo->IsCallvirt())
{
flags |= CORINFO_TAILCALL_IS_CALLVIRT;
}
}
if (call->gtCallThisArg != nullptr)
{
var_types thisArgType = call->gtCallThisArg->GetNode()->TypeGet();
if (thisArgType != TYP_REF)
{
flags |= CORINFO_TAILCALL_THIS_ARG_IS_BYREF;
}
}
if (!info.compCompHnd->getTailCallHelpers(token, sig, (CORINFO_GET_TAILCALL_HELPERS_FLAGS)flags,
&tailCallHelpers))
{
failTailCall("Tail call help not available");
return nullptr;
}
}
}
// Check if we can make the tailcall a loop.
bool fastTailCallToLoop = false;
#if FEATURE_TAILCALL_OPT
// TODO-CQ: enable the transformation when the method has a struct parameter that can be passed in a register
// or return type is a struct that can be passed in a register.
//
// TODO-CQ: if the method being compiled requires generic context reported in gc-info (either through
// hidden generic context param or through keep alive thisptr), then while transforming a recursive
// call to such a method requires that the generic context stored on stack slot be updated. Right now,
// fgMorphRecursiveFastTailCallIntoLoop() is not handling update of generic context while transforming
// a recursive call into a loop. Another option is to modify gtIsRecursiveCall() to check that the
// generic type parameters of both caller and callee generic method are the same.
if (opts.compTailCallLoopOpt && canFastTailCall && gtIsRecursiveCall(call) && !lvaReportParamTypeArg() &&
!lvaKeepAliveAndReportThis() && !call->IsVirtual() && !hasStructParam && !varTypeIsStruct(call->TypeGet()))
{
fastTailCallToLoop = true;
}
#endif
// Ok -- now we are committed to performing a tailcall. Report the decision.
CorInfoTailCall tailCallResult;
if (fastTailCallToLoop)
{
tailCallResult = TAILCALL_RECURSIVE;
}
else if (canFastTailCall)
{
tailCallResult = TAILCALL_OPTIMIZED;
}
else
{
tailCallResult = TAILCALL_HELPER;
}
info.compCompHnd->reportTailCallDecision(nullptr,
(call->gtCallType == CT_USER_FUNC) ? call->gtCallMethHnd : nullptr,
call->IsTailPrefixedCall(), tailCallResult, nullptr);
// Are we currently planning to expand the gtControlExpr as an early virtual call target?
//
if (call->IsExpandedEarly() && call->IsVirtualVtable())
{
// It isn't alway profitable to expand a virtual call early
//
// We alway expand the TAILCALL_HELPER type late.
// And we exapnd late when we have an optimized tail call
// and the this pointer needs to be evaluated into a temp.
//
if (tailCallResult == TAILCALL_HELPER)
{
// We will alway expand this late in lower instead.
// (see LowerTailCallViaJitHelper as it needs some work
// for us to be able to expand this earlier in morph)
//
call->ClearExpandedEarly();
}
else if ((tailCallResult == TAILCALL_OPTIMIZED) &&
((call->gtCallThisArg->GetNode()->gtFlags & GTF_SIDE_EFFECT) != 0))
{
// We generate better code when we expand this late in lower instead.
//
call->ClearExpandedEarly();
}
}
// Now actually morph the call.
compTailCallUsed = true;
// This will prevent inlining this call.
call->gtCallMoreFlags |= GTF_CALL_M_TAILCALL;
if (tailCallViaJitHelper)
{
call->gtCallMoreFlags |= GTF_CALL_M_TAILCALL_VIA_JIT_HELPER;
}
#if FEATURE_TAILCALL_OPT
if (fastTailCallToLoop)
{
call->gtCallMoreFlags |= GTF_CALL_M_TAILCALL_TO_LOOP;
}
#endif
// Mark that this is no longer a pending tailcall. We need to do this before
// we call fgMorphCall again (which happens in the fast tailcall case) to
// avoid recursing back into this method.
call->gtCallMoreFlags &= ~GTF_CALL_M_EXPLICIT_TAILCALL;
#if FEATURE_TAILCALL_OPT
call->gtCallMoreFlags &= ~GTF_CALL_M_IMPLICIT_TAILCALL;
#endif
#ifdef DEBUG
if (verbose)
{
printf("\nGTF_CALL_M_TAILCALL bit set for call ");
printTreeID(call);
printf("\n");
if (fastTailCallToLoop)
{
printf("\nGTF_CALL_M_TAILCALL_TO_LOOP bit set for call ");
printTreeID(call);
printf("\n");
}
}
#endif
// For R2R we might need a different entry point for this call if we are doing a tailcall.
// The reason is that the normal delay load helper uses the return address to find the indirection
// cell in xarch, but now the JIT is expected to leave the indirection cell in REG_R2R_INDIRECT_PARAM:
// We optimize delegate invocations manually in the JIT so skip this for those.
if (call->IsR2RRelativeIndir() && canFastTailCall && !fastTailCallToLoop && !call->IsDelegateInvoke())
{
info.compCompHnd->updateEntryPointForTailCall(&call->gtEntryPoint);
#ifdef TARGET_XARCH
// We have already computed arg info to make the fast tailcall decision, but on X64 we now
// have to pass the indirection cell, so redo arg info.
call->ResetArgInfo();
#endif
}
// If this block has a flow successor, make suitable updates.
//
BasicBlock* const nextBlock = compCurBB->GetUniqueSucc();
if (nextBlock == nullptr)
{
// No unique successor. compCurBB should be a return.
//
assert(compCurBB->bbJumpKind == BBJ_RETURN);
}
else
{
// Flow no longer reaches nextBlock from here.
//
fgRemoveRefPred(nextBlock, compCurBB);
// Adjust profile weights.
//
// Note if this is a tail call to loop, further updates
// are needed once we install the loop edge.
//
if (compCurBB->hasProfileWeight() && nextBlock->hasProfileWeight())
{
// Since we have linear flow we can update the next block weight.
//
weight_t const blockWeight = compCurBB->bbWeight;
weight_t const nextWeight = nextBlock->bbWeight;
weight_t const newNextWeight = nextWeight - blockWeight;
// If the math would result in a negative weight then there's
// no local repair we can do; just leave things inconsistent.
//
if (newNextWeight >= 0)
{
// Note if we'd already morphed the IR in nextblock we might
// have done something profile sensitive that we should arguably reconsider.
//
JITDUMP("Reducing profile weight of " FMT_BB " from " FMT_WT " to " FMT_WT "\n", nextBlock->bbNum,
nextWeight, newNextWeight);
nextBlock->setBBProfileWeight(newNextWeight);
}
else
{
JITDUMP("Not reducing profile weight of " FMT_BB " as its weight " FMT_WT
" is less than direct flow pred " FMT_BB " weight " FMT_WT "\n",
nextBlock->bbNum, nextWeight, compCurBB->bbNum, blockWeight);
}
// If nextBlock is not a BBJ_RETURN, it should have a unique successor that
// is a BBJ_RETURN, as we allow a little bit of flow after a tail call.
//
if (nextBlock->bbJumpKind != BBJ_RETURN)
{
BasicBlock* retBlock = nextBlock->GetUniqueSucc();
// Check if we have a sequence of GT_ASG blocks where the same variable is assigned
// to temp locals over and over.
// Also allow casts on the RHSs of the assignments, and blocks with GT_NOPs.
//
// { GT_ASG(t_0, GT_CALL(...)) }
// { GT_ASG(t_1, t0) } (with casts on rhs potentially)
// ...
// { GT_ASG(t_n, t_(n - 1)) }
// { GT_RET t_n }
//
if (retBlock->bbJumpKind != BBJ_RETURN)
{
// Make sure the block has a single statement
assert(nextBlock->firstStmt() == nextBlock->lastStmt());
// And the root node is "ASG(LCL_VAR, LCL_VAR)"
GenTree* asgNode = nextBlock->firstStmt()->GetRootNode();
assert(asgNode->OperIs(GT_ASG));
unsigned lcl = asgNode->gtGetOp1()->AsLclVarCommon()->GetLclNum();
while (retBlock->bbJumpKind != BBJ_RETURN)
{
#ifdef DEBUG
Statement* nonEmptyStmt = nullptr;
for (Statement* const stmt : retBlock->Statements())
{
// Ignore NOP statements
if (!stmt->GetRootNode()->OperIs(GT_NOP))
{
// Only a single non-NOP statement is allowed
assert(nonEmptyStmt == nullptr);
nonEmptyStmt = stmt;
}
}
if (nonEmptyStmt != nullptr)
{
asgNode = nonEmptyStmt->GetRootNode();
if (!asgNode->OperIs(GT_NOP))
{
assert(asgNode->OperIs(GT_ASG));
GenTree* rhs = asgNode->gtGetOp2();
while (rhs->OperIs(GT_CAST))
{
assert(!rhs->gtOverflow());
rhs = rhs->gtGetOp1();
}
assert(lcl == rhs->AsLclVarCommon()->GetLclNum());
lcl = asgNode->gtGetOp1()->AsLclVarCommon()->GetLclNum();
}
}
#endif
retBlock = retBlock->GetUniqueSucc();
}
}
assert(retBlock->bbJumpKind == BBJ_RETURN);
if (retBlock->hasProfileWeight())
{
// Do similar updates here.
//
weight_t const nextNextWeight = retBlock->bbWeight;
weight_t const newNextNextWeight = nextNextWeight - blockWeight;
// If the math would result in an negative weight then there's
// no local repair we can do; just leave things inconsistent.
//
if (newNextNextWeight >= 0)
{
JITDUMP("Reducing profile weight of " FMT_BB " from " FMT_WT " to " FMT_WT "\n",
retBlock->bbNum, nextNextWeight, newNextNextWeight);
retBlock->setBBProfileWeight(newNextNextWeight);
}
else
{
JITDUMP("Not reducing profile weight of " FMT_BB " as its weight " FMT_WT
" is less than direct flow pred " FMT_BB " weight " FMT_WT "\n",
retBlock->bbNum, nextNextWeight, compCurBB->bbNum, blockWeight);
}
}
}
}
}
#if !FEATURE_TAILCALL_OPT_SHARED_RETURN
// We enable shared-ret tail call optimization for recursive calls even if
// FEATURE_TAILCALL_OPT_SHARED_RETURN is not defined.
if (gtIsRecursiveCall(call))
#endif
{
// Many tailcalls will have call and ret in the same block, and thus be
// BBJ_RETURN, but if the call falls through to a ret, and we are doing a
// tailcall, change it here.
compCurBB->bbJumpKind = BBJ_RETURN;
}
GenTree* stmtExpr = fgMorphStmt->GetRootNode();
#ifdef DEBUG
// Tail call needs to be in one of the following IR forms
// Either a call stmt or
// GT_RETURN(GT_CALL(..)) or GT_RETURN(GT_CAST(GT_CALL(..)))
// var = GT_CALL(..) or var = (GT_CAST(GT_CALL(..)))
// GT_COMMA(GT_CALL(..), GT_NOP) or GT_COMMA(GT_CAST(GT_CALL(..)), GT_NOP)
// In the above,
// GT_CASTS may be nested.
genTreeOps stmtOper = stmtExpr->gtOper;
if (stmtOper == GT_CALL)
{
assert(stmtExpr == call);
}
else
{
assert(stmtOper == GT_RETURN || stmtOper == GT_ASG || stmtOper == GT_COMMA);
GenTree* treeWithCall;
if (stmtOper == GT_RETURN)
{
treeWithCall = stmtExpr->gtGetOp1();
}
else if (stmtOper == GT_COMMA)
{
// Second operation must be nop.
assert(stmtExpr->gtGetOp2()->IsNothingNode());
treeWithCall = stmtExpr->gtGetOp1();
}
else
{
treeWithCall = stmtExpr->gtGetOp2();
}
// Peel off casts
while (treeWithCall->gtOper == GT_CAST)
{
assert(!treeWithCall->gtOverflow());
treeWithCall = treeWithCall->gtGetOp1();
}
assert(treeWithCall == call);
}
#endif
// Store the call type for later to introduce the correct placeholder.
var_types origCallType = call->TypeGet();
GenTree* result;
if (!canFastTailCall && !tailCallViaJitHelper)
{
// For tailcall via CORINFO_TAILCALL_HELPERS we transform into regular
// calls with (to the JIT) regular control flow so we do not need to do
// much special handling.
result = fgMorphTailCallViaHelpers(call, tailCallHelpers);
}
else
{
// Otherwise we will transform into something that does not return. For
// fast tailcalls a "jump" and for tailcall via JIT helper a call to a
// JIT helper that does not return. So peel off everything after the
// call.
Statement* nextMorphStmt = fgMorphStmt->GetNextStmt();
JITDUMP("Remove all stmts after the call.\n");
while (nextMorphStmt != nullptr)
{
Statement* stmtToRemove = nextMorphStmt;
nextMorphStmt = stmtToRemove->GetNextStmt();
fgRemoveStmt(compCurBB, stmtToRemove);
}
bool isRootReplaced = false;
GenTree* root = fgMorphStmt->GetRootNode();
if (root != call)
{
JITDUMP("Replace root node [%06d] with [%06d] tail call node.\n", dspTreeID(root), dspTreeID(call));
isRootReplaced = true;
fgMorphStmt->SetRootNode(call);
}
// Avoid potential extra work for the return (for example, vzeroupper)
call->gtType = TYP_VOID;
// The runtime requires that we perform a null check on the `this` argument before
// tail calling to a virtual dispatch stub. This requirement is a consequence of limitations
// in the runtime's ability to map an AV to a NullReferenceException if
// the AV occurs in a dispatch stub that has unmanaged caller.
if (call->IsVirtualStub())
{
call->gtFlags |= GTF_CALL_NULLCHECK;
}
// Do some target-specific transformations (before we process the args,
// etc.) for the JIT helper case.
if (tailCallViaJitHelper)
{
fgMorphTailCallViaJitHelper(call);
// Force re-evaluating the argInfo. fgMorphTailCallViaJitHelper will modify the
// argument list, invalidating the argInfo.
call->fgArgInfo = nullptr;
}
// Tail call via JIT helper: The VM can't use return address hijacking
// if we're not going to return and the helper doesn't have enough info
// to safely poll, so we poll before the tail call, if the block isn't
// already safe. Since tail call via helper is a slow mechanism it
// doen't matter whether we emit GC poll. his is done to be in parity
// with Jit64. Also this avoids GC info size increase if all most all
// methods are expected to be tail calls (e.g. F#).
//
// Note that we can avoid emitting GC-poll if we know that the current
// BB is dominated by a Gc-SafePoint block. But we don't have dominator
// info at this point. One option is to just add a place holder node for
// GC-poll (e.g. GT_GCPOLL) here and remove it in lowering if the block
// is dominated by a GC-SafePoint. For now it not clear whether
// optimizing slow tail calls is worth the effort. As a low cost check,
// we check whether the first and current basic blocks are
// GC-SafePoints.
//
// Fast Tail call as epilog+jmp - No need to insert GC-poll. Instead,
// fgSetBlockOrder() is going to mark the method as fully interruptible
// if the block containing this tail call is reachable without executing
// any call.
BasicBlock* curBlock = compCurBB;
if (canFastTailCall || (fgFirstBB->bbFlags & BBF_GC_SAFE_POINT) || (compCurBB->bbFlags & BBF_GC_SAFE_POINT) ||
(fgCreateGCPoll(GCPOLL_INLINE, compCurBB) == curBlock))
{
// We didn't insert a poll block, so we need to morph the call now
// (Normally it will get morphed when we get to the split poll block)
GenTree* temp = fgMorphCall(call);
noway_assert(temp == call);
}
// Fast tail call: in case of fast tail calls, we need a jmp epilog and
// hence mark it as BBJ_RETURN with BBF_JMP flag set.
noway_assert(compCurBB->bbJumpKind == BBJ_RETURN);
if (canFastTailCall)
{
compCurBB->bbFlags |= BBF_HAS_JMP;
}
else
{
// We call CORINFO_HELP_TAILCALL which does not return, so we will
// not need epilogue.
compCurBB->bbJumpKind = BBJ_THROW;
}
if (isRootReplaced)
{
// We have replaced the root node of this stmt and deleted the rest,
// but we still have the deleted, dead nodes on the `fgMorph*` stack
// if the root node was an `ASG`, `RET` or `CAST`.
// Return a zero con node to exit morphing of the old trees without asserts
// and forbid POST_ORDER morphing doing something wrong with our call.
var_types callType;
if (varTypeIsStruct(origCallType))
{
CORINFO_CLASS_HANDLE retClsHnd = call->gtRetClsHnd;
Compiler::structPassingKind howToReturnStruct;
callType = getReturnTypeForStruct(retClsHnd, call->GetUnmanagedCallConv(), &howToReturnStruct);
assert((howToReturnStruct != SPK_Unknown) && (howToReturnStruct != SPK_ByReference));
if (howToReturnStruct == SPK_ByValue)
{
callType = TYP_I_IMPL;
}
else if (howToReturnStruct == SPK_ByValueAsHfa || varTypeIsSIMD(callType))
{
callType = TYP_FLOAT;
}
assert((callType != TYP_UNKNOWN) && !varTypeIsStruct(callType));
}
else
{
callType = origCallType;
}
assert((callType != TYP_UNKNOWN) && !varTypeIsStruct(callType));
callType = genActualType(callType);
GenTree* zero = gtNewZeroConNode(callType);
result = fgMorphTree(zero);
}
else
{
result = call;
}
}
return result;
}
//------------------------------------------------------------------------
// fgMorphTailCallViaHelpers: Transform the given GT_CALL tree for tailcall code
// generation.
//
// Arguments:
// call - The call to transform
// helpers - The tailcall helpers provided by the runtime.
//
// Return Value:
// Returns the transformed node.
//
// Notes:
// This transforms
// GT_CALL
// {callTarget}
// {this}
// {args}
// into
// GT_COMMA
// GT_CALL StoreArgsStub
// {callTarget} (depending on flags provided by the runtime)
// {this} (as a regular arg)
// {args}
// GT_COMMA
// GT_CALL Dispatcher
// GT_ADDR ReturnAddress
// {CallTargetStub}
// GT_ADDR ReturnValue
// GT_LCL ReturnValue
// whenever the call node returns a value. If the call node does not return a
// value the last comma will not be there.
//
GenTree* Compiler::fgMorphTailCallViaHelpers(GenTreeCall* call, CORINFO_TAILCALL_HELPERS& help)
{
// R2R requires different handling but we don't support tailcall via
// helpers in R2R yet, so just leave it for now.
// TODO: R2R: TailCallViaHelper
assert(!opts.IsReadyToRun());
JITDUMP("fgMorphTailCallViaHelpers (before):\n");
DISPTREE(call);
// Don't support tail calling helper methods
assert(call->gtCallType != CT_HELPER);
// We come this route only for tail prefixed calls that cannot be dispatched as
// fast tail calls
assert(!call->IsImplicitTailCall());
// We want to use the following assert, but it can modify the IR in some cases, so we
// can't do that in an assert.
// assert(!fgCanFastTailCall(call, nullptr));
// We might or might not have called fgInitArgInfo before this point: in
// builds with FEATURE_FASTTAILCALL we will have called it when checking if
// we could do a fast tailcall, so it is possible we have added extra IR
// for non-standard args that we must get rid of. Get rid of that IR here
// and do this first as it will 'expose' the retbuf as the first arg, which
// we rely upon in fgCreateCallDispatcherAndGetResult.
call->ResetArgInfo();
GenTree* callDispatcherAndGetResult = fgCreateCallDispatcherAndGetResult(call, help.hCallTarget, help.hDispatcher);
// Change the call to a call to the StoreArgs stub.
if (call->HasRetBufArg())
{
JITDUMP("Removing retbuf");
call->gtCallArgs = call->gtCallArgs->GetNext();
call->gtCallMoreFlags &= ~GTF_CALL_M_RETBUFFARG;
}
const bool stubNeedsTargetFnPtr = (help.flags & CORINFO_TAILCALL_STORE_TARGET) != 0;
GenTree* doBeforeStoreArgsStub = nullptr;
GenTree* thisPtrStubArg = nullptr;
// Put 'this' in normal param list
if (call->gtCallThisArg != nullptr)
{
JITDUMP("Moving this pointer into arg list\n");
GenTree* objp = call->gtCallThisArg->GetNode();
GenTree* thisPtr = nullptr;
call->gtCallThisArg = nullptr;
// JIT will need one or two copies of "this" in the following cases:
// 1) the call needs null check;
// 2) StoreArgs stub needs the target function pointer address and if the call is virtual
// the stub also needs "this" in order to evalute the target.
const bool callNeedsNullCheck = call->NeedsNullCheck();
const bool stubNeedsThisPtr = stubNeedsTargetFnPtr && call->IsVirtual();
// TODO-Review: The following transformation is implemented under assumption that
// both conditions can be true. However, I could not construct such example
// where a virtual tail call would require null check. In case, if the conditions
// are mutually exclusive the following could be simplified.
if (callNeedsNullCheck || stubNeedsThisPtr)
{
// Clone "this" if "this" has no side effects.
if ((objp->gtFlags & GTF_SIDE_EFFECT) == 0)
{
thisPtr = gtClone(objp, true);
}
// Create a temp and spill "this" to the temp if "this" has side effects or "this" was too complex to clone.
if (thisPtr == nullptr)
{
const unsigned lclNum = lvaGrabTemp(true DEBUGARG("tail call thisptr"));
// tmp = "this"
doBeforeStoreArgsStub = gtNewTempAssign(lclNum, objp);
if (callNeedsNullCheck)
{
// COMMA(tmp = "this", deref(tmp))
GenTree* tmp = gtNewLclvNode(lclNum, objp->TypeGet());
GenTree* nullcheck = gtNewNullCheck(tmp, compCurBB);
doBeforeStoreArgsStub = gtNewOperNode(GT_COMMA, TYP_VOID, doBeforeStoreArgsStub, nullcheck);
}
thisPtr = gtNewLclvNode(lclNum, objp->TypeGet());
if (stubNeedsThisPtr)
{
thisPtrStubArg = gtNewLclvNode(lclNum, objp->TypeGet());
}
}
else
{
if (callNeedsNullCheck)
{
// deref("this")
doBeforeStoreArgsStub = gtNewNullCheck(objp, compCurBB);
if (stubNeedsThisPtr)
{
thisPtrStubArg = gtClone(objp, true);
}
}
else
{
assert(stubNeedsThisPtr);
thisPtrStubArg = objp;
}
}
call->gtFlags &= ~GTF_CALL_NULLCHECK;
assert((thisPtrStubArg != nullptr) == stubNeedsThisPtr);
}
else
{
thisPtr = objp;
}
// During rationalization tmp="this" and null check will be materialized
// in the right execution order.
assert(thisPtr != nullptr);
call->gtCallArgs = gtPrependNewCallArg(thisPtr, call->gtCallArgs);
}
// We may need to pass the target, for instance for calli or generic methods
// where we pass instantiating stub.
if (stubNeedsTargetFnPtr)
{
JITDUMP("Adding target since VM requested it\n");
GenTree* target;
if (!call->IsVirtual())
{
if (call->gtCallType == CT_INDIRECT)
{
noway_assert(call->gtCallAddr != nullptr);
target = call->gtCallAddr;
}
else
{
CORINFO_CONST_LOOKUP addrInfo;
info.compCompHnd->getFunctionEntryPoint(call->gtCallMethHnd, &addrInfo);
CORINFO_GENERIC_HANDLE handle = nullptr;
void* pIndirection = nullptr;
assert(addrInfo.accessType != IAT_PPVALUE && addrInfo.accessType != IAT_RELPVALUE);
if (addrInfo.accessType == IAT_VALUE)
{
handle = addrInfo.handle;
}
else if (addrInfo.accessType == IAT_PVALUE)
{
pIndirection = addrInfo.addr;
}
target = gtNewIconEmbHndNode(handle, pIndirection, GTF_ICON_FTN_ADDR, call->gtCallMethHnd);
}
}
else
{
assert(!call->tailCallInfo->GetSig()->hasTypeArg());
CORINFO_CALL_INFO callInfo;
unsigned flags = CORINFO_CALLINFO_LDFTN;
if (call->tailCallInfo->IsCallvirt())
{
flags |= CORINFO_CALLINFO_CALLVIRT;
}
eeGetCallInfo(call->tailCallInfo->GetToken(), nullptr, (CORINFO_CALLINFO_FLAGS)flags, &callInfo);
target = getVirtMethodPointerTree(thisPtrStubArg, call->tailCallInfo->GetToken(), &callInfo);
}
// Insert target as last arg
GenTreeCall::Use** newArgSlot = &call->gtCallArgs;
while (*newArgSlot != nullptr)
{
newArgSlot = &(*newArgSlot)->NextRef();
}
*newArgSlot = gtNewCallArgs(target);
}
// This is now a direct call to the store args stub and not a tailcall.
call->gtCallType = CT_USER_FUNC;
call->gtCallMethHnd = help.hStoreArgs;
call->gtFlags &= ~GTF_CALL_VIRT_KIND_MASK;
call->gtCallMoreFlags &= ~(GTF_CALL_M_TAILCALL | GTF_CALL_M_DELEGATE_INV | GTF_CALL_M_WRAPPER_DELEGATE_INV);
// The store-args stub returns no value.
call->gtRetClsHnd = nullptr;
call->gtType = TYP_VOID;
call->gtReturnType = TYP_VOID;
GenTree* callStoreArgsStub = call;
if (doBeforeStoreArgsStub != nullptr)
{
callStoreArgsStub = gtNewOperNode(GT_COMMA, TYP_VOID, doBeforeStoreArgsStub, callStoreArgsStub);
}
GenTree* finalTree =
gtNewOperNode(GT_COMMA, callDispatcherAndGetResult->TypeGet(), callStoreArgsStub, callDispatcherAndGetResult);
finalTree = fgMorphTree(finalTree);
JITDUMP("fgMorphTailCallViaHelpers (after):\n");
DISPTREE(finalTree);
return finalTree;
}
//------------------------------------------------------------------------
// fgCreateCallDispatcherAndGetResult: Given a call
// CALL
// {callTarget}
// {retbuf}
// {this}
// {args}
// create a similarly typed node that calls the tailcall dispatcher and returns
// the result, as in the following:
// COMMA
// CALL TailCallDispatcher
// ADDR ReturnAddress
// &CallTargetFunc
// ADDR RetValue
// RetValue
// If the call has type TYP_VOID, only create the CALL node.
//
// Arguments:
// origCall - the call
// callTargetStubHnd - the handle of the CallTarget function (this is a special
// IL stub created by the runtime)
// dispatcherHnd - the handle of the tailcall dispatcher function
//
// Return Value:
// A node that can be used in place of the original call.
//
GenTree* Compiler::fgCreateCallDispatcherAndGetResult(GenTreeCall* origCall,
CORINFO_METHOD_HANDLE callTargetStubHnd,
CORINFO_METHOD_HANDLE dispatcherHnd)
{
GenTreeCall* callDispatcherNode =
gtNewCallNode(CT_USER_FUNC, dispatcherHnd, TYP_VOID, nullptr, fgMorphStmt->GetDebugInfo());
// The dispatcher has signature
// void DispatchTailCalls(void* callersRetAddrSlot, void* callTarget, void* retValue)
// Add return value arg.
GenTree* retValArg;
GenTree* retVal = nullptr;
unsigned int newRetLcl = BAD_VAR_NUM;
GenTree* copyToRetBufNode = nullptr;
if (origCall->HasRetBufArg())
{
JITDUMP("Transferring retbuf\n");
GenTree* retBufArg = origCall->gtCallArgs->GetNode();
assert(info.compRetBuffArg != BAD_VAR_NUM);
assert(retBufArg->OperIsLocal());
assert(retBufArg->AsLclVarCommon()->GetLclNum() == info.compRetBuffArg);
// Caller return buffer argument retBufArg can point to GC heap while the dispatcher expects
// the return value argument retValArg to point to the stack.
// We use a temporary stack allocated return buffer to hold the value during the dispatcher call
// and copy the value back to the caller return buffer after that.
unsigned int tmpRetBufNum = lvaGrabTemp(true DEBUGARG("substitute local for return buffer"));
constexpr bool unsafeValueClsCheck = false;
lvaSetStruct(tmpRetBufNum, origCall->gtRetClsHnd, unsafeValueClsCheck);
lvaSetVarAddrExposed(tmpRetBufNum DEBUGARG(AddressExposedReason::DISPATCH_RET_BUF));
var_types tmpRetBufType = lvaGetDesc(tmpRetBufNum)->TypeGet();
retValArg = gtNewOperNode(GT_ADDR, TYP_I_IMPL, gtNewLclvNode(tmpRetBufNum, tmpRetBufType));
var_types callerRetBufType = lvaGetDesc(info.compRetBuffArg)->TypeGet();
GenTree* dstAddr = gtNewLclvNode(info.compRetBuffArg, callerRetBufType);
GenTree* dst = gtNewObjNode(info.compMethodInfo->args.retTypeClass, dstAddr);
GenTree* src = gtNewLclvNode(tmpRetBufNum, tmpRetBufType);
constexpr bool isVolatile = false;
constexpr bool isCopyBlock = true;
copyToRetBufNode = gtNewBlkOpNode(dst, src, isVolatile, isCopyBlock);
if (origCall->gtType != TYP_VOID)
{
retVal = gtClone(retBufArg);
}
}
else if (origCall->gtType != TYP_VOID)
{
JITDUMP("Creating a new temp for the return value\n");
newRetLcl = lvaGrabTemp(false DEBUGARG("Return value for tail call dispatcher"));
if (varTypeIsStruct(origCall->gtType))
{
lvaSetStruct(newRetLcl, origCall->gtRetClsHnd, false);
}
else
{
// Since we pass a reference to the return value to the dispatcher
// we need to use the real return type so we can normalize it on
// load when we return it.
lvaTable[newRetLcl].lvType = (var_types)origCall->gtReturnType;
}
lvaSetVarAddrExposed(newRetLcl DEBUGARG(AddressExposedReason::DISPATCH_RET_BUF));
retValArg =
gtNewOperNode(GT_ADDR, TYP_I_IMPL, gtNewLclvNode(newRetLcl, genActualType(lvaTable[newRetLcl].lvType)));
retVal = gtNewLclvNode(newRetLcl, genActualType(lvaTable[newRetLcl].lvType));
if (varTypeIsStruct(origCall->gtType))
{
retVal = impFixupStructReturnType(retVal, origCall->gtRetClsHnd, origCall->GetUnmanagedCallConv());
}
}
else
{
JITDUMP("No return value so using null pointer as arg\n");
retValArg = gtNewZeroConNode(TYP_I_IMPL);
}
callDispatcherNode->gtCallArgs = gtPrependNewCallArg(retValArg, callDispatcherNode->gtCallArgs);
// Add callTarget
callDispatcherNode->gtCallArgs =
gtPrependNewCallArg(new (this, GT_FTN_ADDR) GenTreeFptrVal(TYP_I_IMPL, callTargetStubHnd),
callDispatcherNode->gtCallArgs);
// Add the caller's return address slot.
if (lvaRetAddrVar == BAD_VAR_NUM)
{
lvaRetAddrVar = lvaGrabTemp(false DEBUGARG("Return address"));
lvaTable[lvaRetAddrVar].lvType = TYP_I_IMPL;
lvaSetVarAddrExposed(lvaRetAddrVar DEBUGARG(AddressExposedReason::DISPATCH_RET_BUF));
}
GenTree* retAddrSlot = gtNewOperNode(GT_ADDR, TYP_I_IMPL, gtNewLclvNode(lvaRetAddrVar, TYP_I_IMPL));
callDispatcherNode->gtCallArgs = gtPrependNewCallArg(retAddrSlot, callDispatcherNode->gtCallArgs);
GenTree* finalTree = callDispatcherNode;
if (copyToRetBufNode != nullptr)
{
finalTree = gtNewOperNode(GT_COMMA, TYP_VOID, callDispatcherNode, copyToRetBufNode);
}
if (origCall->gtType == TYP_VOID)
{
return finalTree;
}
assert(retVal != nullptr);
finalTree = gtNewOperNode(GT_COMMA, origCall->TypeGet(), finalTree, retVal);
// The JIT seems to want to CSE this comma and messes up multi-reg ret
// values in the process. Just avoid CSE'ing this tree entirely in that
// case.
if (origCall->HasMultiRegRetVal())
{
finalTree->gtFlags |= GTF_DONT_CSE;
}
return finalTree;
}
//------------------------------------------------------------------------
// getLookupTree: get a lookup tree
//
// Arguments:
// pResolvedToken - resolved token of the call
// pLookup - the lookup to get the tree for
// handleFlags - flags to set on the result node
// compileTimeHandle - compile-time handle corresponding to the lookup
//
// Return Value:
// A node representing the lookup tree
//
GenTree* Compiler::getLookupTree(CORINFO_RESOLVED_TOKEN* pResolvedToken,
CORINFO_LOOKUP* pLookup,
GenTreeFlags handleFlags,
void* compileTimeHandle)
{
if (!pLookup->lookupKind.needsRuntimeLookup)
{
// No runtime lookup is required.
// Access is direct or memory-indirect (of a fixed address) reference
CORINFO_GENERIC_HANDLE handle = nullptr;
void* pIndirection = nullptr;
assert(pLookup->constLookup.accessType != IAT_PPVALUE && pLookup->constLookup.accessType != IAT_RELPVALUE);
if (pLookup->constLookup.accessType == IAT_VALUE)
{
handle = pLookup->constLookup.handle;
}
else if (pLookup->constLookup.accessType == IAT_PVALUE)
{
pIndirection = pLookup->constLookup.addr;
}
return gtNewIconEmbHndNode(handle, pIndirection, handleFlags, compileTimeHandle);
}
return getRuntimeLookupTree(pResolvedToken, pLookup, compileTimeHandle);
}
//------------------------------------------------------------------------
// getRuntimeLookupTree: get a tree for a runtime lookup
//
// Arguments:
// pResolvedToken - resolved token of the call
// pLookup - the lookup to get the tree for
// compileTimeHandle - compile-time handle corresponding to the lookup
//
// Return Value:
// A node representing the runtime lookup tree
//
GenTree* Compiler::getRuntimeLookupTree(CORINFO_RESOLVED_TOKEN* pResolvedToken,
CORINFO_LOOKUP* pLookup,
void* compileTimeHandle)
{
assert(!compIsForInlining());
CORINFO_RUNTIME_LOOKUP* pRuntimeLookup = &pLookup->runtimeLookup;
// If pRuntimeLookup->indirections is equal to CORINFO_USEHELPER, it specifies that a run-time helper should be
// used; otherwise, it specifies the number of indirections via pRuntimeLookup->offsets array.
if ((pRuntimeLookup->indirections == CORINFO_USEHELPER) || pRuntimeLookup->testForNull ||
pRuntimeLookup->testForFixup)
{
// If the first condition is true, runtime lookup tree is available only via the run-time helper function.
// TODO-CQ If the second or third condition is true, we are always using the slow path since we can't
// introduce control flow at this point. See impRuntimeLookupToTree for the logic to avoid calling the helper.
// The long-term solution is to introduce a new node representing a runtime lookup, create instances
// of that node both in the importer and here, and expand the node in lower (introducing control flow if
// necessary).
return gtNewRuntimeLookupHelperCallNode(pRuntimeLookup,
getRuntimeContextTree(pLookup->lookupKind.runtimeLookupKind),
compileTimeHandle);
}
GenTree* result = getRuntimeContextTree(pLookup->lookupKind.runtimeLookupKind);
ArrayStack<GenTree*> stmts(getAllocator(CMK_ArrayStack));
auto cloneTree = [&](GenTree** tree DEBUGARG(const char* reason)) -> GenTree* {
if (!((*tree)->gtFlags & GTF_GLOB_EFFECT))
{
GenTree* clone = gtClone(*tree, true);
if (clone)
{
return clone;
}
}
unsigned temp = lvaGrabTemp(true DEBUGARG(reason));
stmts.Push(gtNewTempAssign(temp, *tree));
*tree = gtNewLclvNode(temp, lvaGetActualType(temp));
return gtNewLclvNode(temp, lvaGetActualType(temp));
};
// Apply repeated indirections
for (WORD i = 0; i < pRuntimeLookup->indirections; i++)
{
GenTree* preInd = nullptr;
if ((i == 1 && pRuntimeLookup->indirectFirstOffset) || (i == 2 && pRuntimeLookup->indirectSecondOffset))
{
preInd = cloneTree(&result DEBUGARG("getRuntimeLookupTree indirectOffset"));
}
if (i != 0)
{
result = gtNewOperNode(GT_IND, TYP_I_IMPL, result);
result->gtFlags |= GTF_IND_NONFAULTING;
result->gtFlags |= GTF_IND_INVARIANT;
}
if ((i == 1 && pRuntimeLookup->indirectFirstOffset) || (i == 2 && pRuntimeLookup->indirectSecondOffset))
{
result = gtNewOperNode(GT_ADD, TYP_I_IMPL, preInd, result);
}
if (pRuntimeLookup->offsets[i] != 0)
{
result = gtNewOperNode(GT_ADD, TYP_I_IMPL, result, gtNewIconNode(pRuntimeLookup->offsets[i], TYP_I_IMPL));
}
}
assert(!pRuntimeLookup->testForNull);
if (pRuntimeLookup->indirections > 0)
{
assert(!pRuntimeLookup->testForFixup);
result = gtNewOperNode(GT_IND, TYP_I_IMPL, result);
result->gtFlags |= GTF_IND_NONFAULTING;
}
// Produces GT_COMMA(stmt1, GT_COMMA(stmt2, ... GT_COMMA(stmtN, result)))
while (!stmts.Empty())
{
result = gtNewOperNode(GT_COMMA, TYP_I_IMPL, stmts.Pop(), result);
}
DISPTREE(result);
return result;
}
//------------------------------------------------------------------------
// getVirtMethodPointerTree: get a tree for a virtual method pointer
//
// Arguments:
// thisPtr - tree representing `this` pointer
// pResolvedToken - pointer to the resolved token of the method
// pCallInfo - pointer to call info
//
// Return Value:
// A node representing the virtual method pointer
GenTree* Compiler::getVirtMethodPointerTree(GenTree* thisPtr,
CORINFO_RESOLVED_TOKEN* pResolvedToken,
CORINFO_CALL_INFO* pCallInfo)
{
GenTree* exactTypeDesc = getTokenHandleTree(pResolvedToken, true);
GenTree* exactMethodDesc = getTokenHandleTree(pResolvedToken, false);
GenTreeCall::Use* helpArgs = gtNewCallArgs(thisPtr, exactTypeDesc, exactMethodDesc);
return gtNewHelperCallNode(CORINFO_HELP_VIRTUAL_FUNC_PTR, TYP_I_IMPL, helpArgs);
}
//------------------------------------------------------------------------
// getTokenHandleTree: get a handle tree for a token
//
// Arguments:
// pResolvedToken - token to get a handle for
// parent - whether parent should be imported
//
// Return Value:
// A node representing the virtual method pointer
GenTree* Compiler::getTokenHandleTree(CORINFO_RESOLVED_TOKEN* pResolvedToken, bool parent)
{
CORINFO_GENERICHANDLE_RESULT embedInfo;
info.compCompHnd->embedGenericHandle(pResolvedToken, parent, &embedInfo);
GenTree* result = getLookupTree(pResolvedToken, &embedInfo.lookup, gtTokenToIconFlags(pResolvedToken->token),
embedInfo.compileTimeHandle);
// If we have a result and it requires runtime lookup, wrap it in a runtime lookup node.
if ((result != nullptr) && embedInfo.lookup.lookupKind.needsRuntimeLookup)
{
result = gtNewRuntimeLookup(embedInfo.compileTimeHandle, embedInfo.handleType, result);
}
return result;
}
/*****************************************************************************
*
* Transform the given GT_CALL tree for tail call via JIT helper.
*/
void Compiler::fgMorphTailCallViaJitHelper(GenTreeCall* call)
{
JITDUMP("fgMorphTailCallViaJitHelper (before):\n");
DISPTREE(call);
// For the helper-assisted tail calls, we need to push all the arguments
// into a single list, and then add a few extra at the beginning or end.
//
// For x86, the tailcall helper is defined as:
//
// JIT_TailCall(<function args>, int numberOfOldStackArgsWords, int numberOfNewStackArgsWords, int flags, void*
// callTarget)
//
// Note that the special arguments are on the stack, whereas the function arguments follow
// the normal convention: there might be register arguments in ECX and EDX. The stack will
// look like (highest address at the top):
// first normal stack argument
// ...
// last normal stack argument
// numberOfOldStackArgs
// numberOfNewStackArgs
// flags
// callTarget
//
// Each special arg is 4 bytes.
//
// 'flags' is a bitmask where:
// 1 == restore callee-save registers (EDI,ESI,EBX). The JIT always saves all
// callee-saved registers for tailcall functions. Note that the helper assumes
// that the callee-saved registers live immediately below EBP, and must have been
// pushed in this order: EDI, ESI, EBX.
// 2 == call target is a virtual stub dispatch.
//
// The x86 tail call helper lives in VM\i386\jithelp.asm. See that function for more details
// on the custom calling convention.
// Check for PInvoke call types that we don't handle in codegen yet.
assert(!call->IsUnmanaged());
assert(call->IsVirtual() || (call->gtCallType != CT_INDIRECT) || (call->gtCallCookie == nullptr));
// Don't support tail calling helper methods
assert(call->gtCallType != CT_HELPER);
// We come this route only for tail prefixed calls that cannot be dispatched as
// fast tail calls
assert(!call->IsImplicitTailCall());
// We want to use the following assert, but it can modify the IR in some cases, so we
// can't do that in an assert.
// assert(!fgCanFastTailCall(call, nullptr));
// First move the 'this' pointer (if any) onto the regular arg list. We do this because
// we are going to prepend special arguments onto the argument list (for non-x86 platforms),
// and thus shift where the 'this' pointer will be passed to a later argument slot. In
// addition, for all platforms, we are going to change the call into a helper call. Our code
// generation code for handling calls to helpers does not handle 'this' pointers. So, when we
// do this transformation, we must explicitly create a null 'this' pointer check, if required,
// since special 'this' pointer handling will no longer kick in.
//
// Some call types, such as virtual vtable calls, require creating a call address expression
// that involves the "this" pointer. Lowering will sometimes create an embedded statement
// to create a temporary that is assigned to the "this" pointer expression, and then use
// that temp to create the call address expression. This temp creation embedded statement
// will occur immediately before the "this" pointer argument, and then will be used for both
// the "this" pointer argument as well as the call address expression. In the normal ordering,
// the embedded statement establishing the "this" pointer temp will execute before both uses
// of the temp. However, for tail calls via a helper, we move the "this" pointer onto the
// normal call argument list, and insert a placeholder which will hold the call address
// expression. For non-x86, things are ok, because the order of execution of these is not
// altered. However, for x86, the call address expression is inserted as the *last* argument
// in the argument list, *after* the "this" pointer. It will be put on the stack, and be
// evaluated first. To ensure we don't end up with out-of-order temp definition and use,
// for those cases where call lowering creates an embedded form temp of "this", we will
// create a temp here, early, that will later get morphed correctly.
if (call->gtCallThisArg != nullptr)
{
GenTree* thisPtr = nullptr;
GenTree* objp = call->gtCallThisArg->GetNode();
call->gtCallThisArg = nullptr;
if ((call->IsDelegateInvoke() || call->IsVirtualVtable()) && !objp->OperIs(GT_LCL_VAR))
{
// tmp = "this"
unsigned lclNum = lvaGrabTemp(true DEBUGARG("tail call thisptr"));
GenTree* asg = gtNewTempAssign(lclNum, objp);
// COMMA(tmp = "this", tmp)
var_types vt = objp->TypeGet();
GenTree* tmp = gtNewLclvNode(lclNum, vt);
thisPtr = gtNewOperNode(GT_COMMA, vt, asg, tmp);
objp = thisPtr;
}
if (call->NeedsNullCheck())
{
// clone "this" if "this" has no side effects.
if ((thisPtr == nullptr) && !(objp->gtFlags & GTF_SIDE_EFFECT))
{
thisPtr = gtClone(objp, true);
}
var_types vt = objp->TypeGet();
if (thisPtr == nullptr)
{
// create a temp if either "this" has side effects or "this" is too complex to clone.
// tmp = "this"
unsigned lclNum = lvaGrabTemp(true DEBUGARG("tail call thisptr"));
GenTree* asg = gtNewTempAssign(lclNum, objp);
// COMMA(tmp = "this", deref(tmp))
GenTree* tmp = gtNewLclvNode(lclNum, vt);
GenTree* nullcheck = gtNewNullCheck(tmp, compCurBB);
asg = gtNewOperNode(GT_COMMA, TYP_VOID, asg, nullcheck);
// COMMA(COMMA(tmp = "this", deref(tmp)), tmp)
thisPtr = gtNewOperNode(GT_COMMA, vt, asg, gtNewLclvNode(lclNum, vt));
}
else
{
// thisPtr = COMMA(deref("this"), "this")
GenTree* nullcheck = gtNewNullCheck(thisPtr, compCurBB);
thisPtr = gtNewOperNode(GT_COMMA, vt, nullcheck, gtClone(objp, true));
}
call->gtFlags &= ~GTF_CALL_NULLCHECK;
}
else
{
thisPtr = objp;
}
// TODO-Cleanup: we leave it as a virtual stub call to
// use logic in `LowerVirtualStubCall`, clear GTF_CALL_VIRT_KIND_MASK here
// and change `LowerCall` to recognize it as a direct call.
// During rationalization tmp="this" and null check will
// materialize as embedded stmts in right execution order.
assert(thisPtr != nullptr);
call->gtCallArgs = gtPrependNewCallArg(thisPtr, call->gtCallArgs);
}
// Find the end of the argument list. ppArg will point at the last pointer; setting *ppArg will
// append to the list.
GenTreeCall::Use** ppArg = &call->gtCallArgs;
for (GenTreeCall::Use& use : call->Args())
{
ppArg = &use.NextRef();
}
assert(ppArg != nullptr);
assert(*ppArg == nullptr);
unsigned nOldStkArgsWords =
(compArgSize - (codeGen->intRegState.rsCalleeRegArgCount * REGSIZE_BYTES)) / REGSIZE_BYTES;
GenTree* arg3 = gtNewIconNode((ssize_t)nOldStkArgsWords, TYP_I_IMPL);
*ppArg = gtNewCallArgs(arg3); // numberOfOldStackArgs
ppArg = &((*ppArg)->NextRef());
// Inject a placeholder for the count of outgoing stack arguments that the Lowering phase will generate.
// The constant will be replaced.
GenTree* arg2 = gtNewIconNode(9, TYP_I_IMPL);
*ppArg = gtNewCallArgs(arg2); // numberOfNewStackArgs
ppArg = &((*ppArg)->NextRef());
// Inject a placeholder for the flags.
// The constant will be replaced.
GenTree* arg1 = gtNewIconNode(8, TYP_I_IMPL);
*ppArg = gtNewCallArgs(arg1);
ppArg = &((*ppArg)->NextRef());
// Inject a placeholder for the real call target that the Lowering phase will generate.
// The constant will be replaced.
GenTree* arg0 = gtNewIconNode(7, TYP_I_IMPL);
*ppArg = gtNewCallArgs(arg0);
// It is now a varargs tail call.
call->gtCallMoreFlags |= GTF_CALL_M_VARARGS;
call->gtFlags &= ~GTF_CALL_POP_ARGS;
// The function is responsible for doing explicit null check when it is necessary.
assert(!call->NeedsNullCheck());
JITDUMP("fgMorphTailCallViaJitHelper (after):\n");
DISPTREE(call);
}
//------------------------------------------------------------------------
// fgGetStubAddrArg: Return the virtual stub address for the given call.
//
// Notes:
// the JIT must place the address of the stub used to load the call target,
// the "stub indirection cell", in special call argument with special register.
//
// Arguments:
// call - a call that needs virtual stub dispatching.
//
// Return Value:
// addr tree with set resister requirements.
//
GenTree* Compiler::fgGetStubAddrArg(GenTreeCall* call)
{
assert(call->IsVirtualStub());
GenTree* stubAddrArg;
if (call->gtCallType == CT_INDIRECT)
{
stubAddrArg = gtClone(call->gtCallAddr, true);
}
else
{
assert(call->gtCallMoreFlags & GTF_CALL_M_VIRTSTUB_REL_INDIRECT);
ssize_t addr = ssize_t(call->gtStubCallStubAddr);
stubAddrArg = gtNewIconHandleNode(addr, GTF_ICON_FTN_ADDR);
#ifdef DEBUG
stubAddrArg->AsIntCon()->gtTargetHandle = (size_t)call->gtCallMethHnd;
#endif
}
assert(stubAddrArg != nullptr);
stubAddrArg->SetRegNum(virtualStubParamInfo->GetReg());
return stubAddrArg;
}
//------------------------------------------------------------------------------
// fgGetArgTabEntryParameterLclNum : Get the lcl num for the parameter that
// corresponds to the argument to a recursive call.
//
// Notes:
// Due to non-standard args this is not just fgArgTabEntry::argNum.
// For example, in R2R compilations we will have added a non-standard
// arg for the R2R indirection cell.
//
// Arguments:
// argTabEntry - the arg
//
unsigned Compiler::fgGetArgTabEntryParameterLclNum(GenTreeCall* call, fgArgTabEntry* argTabEntry)
{
fgArgInfo* argInfo = call->fgArgInfo;
unsigned argCount = argInfo->ArgCount();
fgArgTabEntry** argTable = argInfo->ArgTable();
unsigned numToRemove = 0;
for (unsigned i = 0; i < argCount; i++)
{
fgArgTabEntry* arg = argTable[i];
// Late added args add extra args that do not map to IL parameters and that we should not reassign.
if (!arg->isNonStandard() || !arg->isNonStandardArgAddedLate())
continue;
if (arg->argNum < argTabEntry->argNum)
numToRemove++;
}
return argTabEntry->argNum - numToRemove;
}
//------------------------------------------------------------------------------
// fgMorphRecursiveFastTailCallIntoLoop : Transform a recursive fast tail call into a loop.
//
//
// Arguments:
// block - basic block ending with a recursive fast tail call
// recursiveTailCall - recursive tail call to transform
//
// Notes:
// The legality of the transformation is ensured by the checks in endsWithTailCallConvertibleToLoop.
void Compiler::fgMorphRecursiveFastTailCallIntoLoop(BasicBlock* block, GenTreeCall* recursiveTailCall)
{
assert(recursiveTailCall->IsTailCallConvertibleToLoop());
Statement* lastStmt = block->lastStmt();
assert(recursiveTailCall == lastStmt->GetRootNode());
// Transform recursive tail call into a loop.
Statement* earlyArgInsertionPoint = lastStmt;
const DebugInfo& callDI = lastStmt->GetDebugInfo();
// Hoist arg setup statement for the 'this' argument.
GenTreeCall::Use* thisArg = recursiveTailCall->gtCallThisArg;
if ((thisArg != nullptr) && !thisArg->GetNode()->IsNothingNode() && !thisArg->GetNode()->IsArgPlaceHolderNode())
{
Statement* thisArgStmt = gtNewStmt(thisArg->GetNode(), callDI);
fgInsertStmtBefore(block, earlyArgInsertionPoint, thisArgStmt);
}
// All arguments whose trees may involve caller parameter local variables need to be assigned to temps first;
// then the temps need to be assigned to the method parameters. This is done so that the caller
// parameters are not re-assigned before call arguments depending on them are evaluated.
// tmpAssignmentInsertionPoint and paramAssignmentInsertionPoint keep track of
// where the next temp or parameter assignment should be inserted.
// In the example below the first call argument (arg1 - 1) needs to be assigned to a temp first
// while the second call argument (const 1) doesn't.
// Basic block before tail recursion elimination:
// ***** BB04, stmt 1 (top level)
// [000037] ------------ * stmtExpr void (top level) (IL 0x00A...0x013)
// [000033] --C - G------ - \--* call void RecursiveMethod
// [000030] ------------ | / --* const int - 1
// [000031] ------------arg0 in rcx + --* +int
// [000029] ------------ | \--* lclVar int V00 arg1
// [000032] ------------arg1 in rdx \--* const int 1
//
//
// Basic block after tail recursion elimination :
// ***** BB04, stmt 1 (top level)
// [000051] ------------ * stmtExpr void (top level) (IL 0x00A... ? ? ? )
// [000030] ------------ | / --* const int - 1
// [000031] ------------ | / --* +int
// [000029] ------------ | | \--* lclVar int V00 arg1
// [000050] - A---------- \--* = int
// [000049] D------N---- \--* lclVar int V02 tmp0
//
// ***** BB04, stmt 2 (top level)
// [000055] ------------ * stmtExpr void (top level) (IL 0x00A... ? ? ? )
// [000052] ------------ | / --* lclVar int V02 tmp0
// [000054] - A---------- \--* = int
// [000053] D------N---- \--* lclVar int V00 arg0
// ***** BB04, stmt 3 (top level)
// [000058] ------------ * stmtExpr void (top level) (IL 0x00A... ? ? ? )
// [000032] ------------ | / --* const int 1
// [000057] - A---------- \--* = int
// [000056] D------N---- \--* lclVar int V01 arg1
Statement* tmpAssignmentInsertionPoint = lastStmt;
Statement* paramAssignmentInsertionPoint = lastStmt;
// Process early args. They may contain both setup statements for late args and actual args.
// Early args don't include 'this' arg. We need to account for that so that the call to gtArgEntryByArgNum
// below has the correct second argument.
int earlyArgIndex = (thisArg == nullptr) ? 0 : 1;
for (GenTreeCall::Use& use : recursiveTailCall->Args())
{
GenTree* earlyArg = use.GetNode();
if (!earlyArg->IsNothingNode() && !earlyArg->IsArgPlaceHolderNode())
{
if ((earlyArg->gtFlags & GTF_LATE_ARG) != 0)
{
// This is a setup node so we need to hoist it.
Statement* earlyArgStmt = gtNewStmt(earlyArg, callDI);
fgInsertStmtBefore(block, earlyArgInsertionPoint, earlyArgStmt);
}
else
{
// This is an actual argument that needs to be assigned to the corresponding caller parameter.
fgArgTabEntry* curArgTabEntry = gtArgEntryByArgNum(recursiveTailCall, earlyArgIndex);
// Late-added non-standard args are extra args that are not passed as locals, so skip those
if (!curArgTabEntry->isNonStandard() || !curArgTabEntry->isNonStandardArgAddedLate())
{
Statement* paramAssignStmt =
fgAssignRecursiveCallArgToCallerParam(earlyArg, curArgTabEntry,
fgGetArgTabEntryParameterLclNum(recursiveTailCall,
curArgTabEntry),
block, callDI, tmpAssignmentInsertionPoint,
paramAssignmentInsertionPoint);
if ((tmpAssignmentInsertionPoint == lastStmt) && (paramAssignStmt != nullptr))
{
// All temp assignments will happen before the first param assignment.
tmpAssignmentInsertionPoint = paramAssignStmt;
}
}
}
}
earlyArgIndex++;
}
// Process late args.
int lateArgIndex = 0;
for (GenTreeCall::Use& use : recursiveTailCall->LateArgs())
{
// A late argument is an actual argument that needs to be assigned to the corresponding caller's parameter.
GenTree* lateArg = use.GetNode();
fgArgTabEntry* curArgTabEntry = gtArgEntryByLateArgIndex(recursiveTailCall, lateArgIndex);
// Late-added non-standard args are extra args that are not passed as locals, so skip those
if (!curArgTabEntry->isNonStandard() || !curArgTabEntry->isNonStandardArgAddedLate())
{
Statement* paramAssignStmt =
fgAssignRecursiveCallArgToCallerParam(lateArg, curArgTabEntry,
fgGetArgTabEntryParameterLclNum(recursiveTailCall,
curArgTabEntry),
block, callDI, tmpAssignmentInsertionPoint,
paramAssignmentInsertionPoint);
if ((tmpAssignmentInsertionPoint == lastStmt) && (paramAssignStmt != nullptr))
{
// All temp assignments will happen before the first param assignment.
tmpAssignmentInsertionPoint = paramAssignStmt;
}
}
lateArgIndex++;
}
// If the method has starg.s 0 or ldarga.s 0 a special local (lvaArg0Var) is created so that
// compThisArg stays immutable. Normally it's assigned in fgFirstBBScratch block. Since that
// block won't be in the loop (it's assumed to have no predecessors), we need to update the special local here.
if (!info.compIsStatic && (lvaArg0Var != info.compThisArg))
{
var_types thisType = lvaTable[info.compThisArg].TypeGet();
GenTree* arg0 = gtNewLclvNode(lvaArg0Var, thisType);
GenTree* arg0Assignment = gtNewAssignNode(arg0, gtNewLclvNode(info.compThisArg, thisType));
Statement* arg0AssignmentStmt = gtNewStmt(arg0Assignment, callDI);
fgInsertStmtBefore(block, paramAssignmentInsertionPoint, arg0AssignmentStmt);
}
// If compInitMem is set, we may need to zero-initialize some locals. Normally it's done in the prolog
// but this loop can't include the prolog. Since we don't have liveness information, we insert zero-initialization
// for all non-parameter IL locals as well as temp structs with GC fields.
// Liveness phase will remove unnecessary initializations.
if (info.compInitMem || compSuppressedZeroInit)
{
unsigned varNum;
LclVarDsc* varDsc;
for (varNum = 0, varDsc = lvaTable; varNum < lvaCount; varNum++, varDsc++)
{
#if FEATURE_FIXED_OUT_ARGS
if (varNum == lvaOutgoingArgSpaceVar)
{
continue;
}
#endif // FEATURE_FIXED_OUT_ARGS
if (!varDsc->lvIsParam)
{
var_types lclType = varDsc->TypeGet();
bool isUserLocal = (varNum < info.compLocalsCount);
bool structWithGCFields = ((lclType == TYP_STRUCT) && varDsc->GetLayout()->HasGCPtr());
bool hadSuppressedInit = varDsc->lvSuppressedZeroInit;
if ((info.compInitMem && (isUserLocal || structWithGCFields)) || hadSuppressedInit)
{
GenTree* lcl = gtNewLclvNode(varNum, lclType);
GenTree* init = nullptr;
if (varTypeIsStruct(lclType))
{
const bool isVolatile = false;
const bool isCopyBlock = false;
init = gtNewBlkOpNode(lcl, gtNewIconNode(0), isVolatile, isCopyBlock);
init = fgMorphInitBlock(init);
}
else
{
GenTree* zero = gtNewZeroConNode(genActualType(lclType));
init = gtNewAssignNode(lcl, zero);
}
Statement* initStmt = gtNewStmt(init, callDI);
fgInsertStmtBefore(block, lastStmt, initStmt);
}
}
}
}
// Remove the call
fgRemoveStmt(block, lastStmt);
// Set the loop edge.
if (opts.IsOSR())
{
// Todo: this may not look like a viable loop header.
// Might need the moral equivalent of a scratch BB.
block->bbJumpDest = fgEntryBB;
}
else
{
// Ensure we have a scratch block and then target the next
// block. Loop detection needs to see a pred out of the loop,
// so mark the scratch block BBF_DONT_REMOVE to prevent empty
// block removal on it.
fgEnsureFirstBBisScratch();
fgFirstBB->bbFlags |= BBF_DONT_REMOVE;
block->bbJumpDest = fgFirstBB->bbNext;
}
// Finish hooking things up.
block->bbJumpKind = BBJ_ALWAYS;
fgAddRefPred(block->bbJumpDest, block);
block->bbFlags &= ~BBF_HAS_JMP;
}
//------------------------------------------------------------------------------
// fgAssignRecursiveCallArgToCallerParam : Assign argument to a recursive call to the corresponding caller parameter.
//
//
// Arguments:
// arg - argument to assign
// argTabEntry - argument table entry corresponding to arg
// lclParamNum - the lcl num of the parameter
// block --- basic block the call is in
// callILOffset - IL offset of the call
// tmpAssignmentInsertionPoint - tree before which temp assignment should be inserted (if necessary)
// paramAssignmentInsertionPoint - tree before which parameter assignment should be inserted
//
// Return Value:
// parameter assignment statement if one was inserted; nullptr otherwise.
Statement* Compiler::fgAssignRecursiveCallArgToCallerParam(GenTree* arg,
fgArgTabEntry* argTabEntry,
unsigned lclParamNum,
BasicBlock* block,
const DebugInfo& callDI,
Statement* tmpAssignmentInsertionPoint,
Statement* paramAssignmentInsertionPoint)
{
// Call arguments should be assigned to temps first and then the temps should be assigned to parameters because
// some argument trees may reference parameters directly.
GenTree* argInTemp = nullptr;
bool needToAssignParameter = true;
// TODO-CQ: enable calls with struct arguments passed in registers.
noway_assert(!varTypeIsStruct(arg->TypeGet()));
if ((argTabEntry->isTmp) || arg->IsCnsIntOrI() || arg->IsCnsFltOrDbl())
{
// The argument is already assigned to a temp or is a const.
argInTemp = arg;
}
else if (arg->OperGet() == GT_LCL_VAR)
{
unsigned lclNum = arg->AsLclVar()->GetLclNum();
LclVarDsc* varDsc = lvaGetDesc(lclNum);
if (!varDsc->lvIsParam)
{
// The argument is a non-parameter local so it doesn't need to be assigned to a temp.
argInTemp = arg;
}
else if (lclNum == lclParamNum)
{
// The argument is the same parameter local that we were about to assign so
// we can skip the assignment.
needToAssignParameter = false;
}
}
// TODO: We don't need temp assignments if we can prove that the argument tree doesn't involve
// any caller parameters. Some common cases are handled above but we may be able to eliminate
// more temp assignments.
Statement* paramAssignStmt = nullptr;
if (needToAssignParameter)
{
if (argInTemp == nullptr)
{
// The argument is not assigned to a temp. We need to create a new temp and insert an assignment.
// TODO: we can avoid a temp assignment if we can prove that the argument tree
// doesn't involve any caller parameters.
unsigned tmpNum = lvaGrabTemp(true DEBUGARG("arg temp"));
lvaTable[tmpNum].lvType = arg->gtType;
GenTree* tempSrc = arg;
GenTree* tempDest = gtNewLclvNode(tmpNum, tempSrc->gtType);
GenTree* tmpAssignNode = gtNewAssignNode(tempDest, tempSrc);
Statement* tmpAssignStmt = gtNewStmt(tmpAssignNode, callDI);
fgInsertStmtBefore(block, tmpAssignmentInsertionPoint, tmpAssignStmt);
argInTemp = gtNewLclvNode(tmpNum, tempSrc->gtType);
}
// Now assign the temp to the parameter.
const LclVarDsc* paramDsc = lvaGetDesc(lclParamNum);
assert(paramDsc->lvIsParam);
GenTree* paramDest = gtNewLclvNode(lclParamNum, paramDsc->lvType);
GenTree* paramAssignNode = gtNewAssignNode(paramDest, argInTemp);
paramAssignStmt = gtNewStmt(paramAssignNode, callDI);
fgInsertStmtBefore(block, paramAssignmentInsertionPoint, paramAssignStmt);
}
return paramAssignStmt;
}
/*****************************************************************************
*
* Transform the given GT_CALL tree for code generation.
*/
GenTree* Compiler::fgMorphCall(GenTreeCall* call)
{
if (call->CanTailCall())
{
GenTree* newNode = fgMorphPotentialTailCall(call);
if (newNode != nullptr)
{
return newNode;
}
assert(!call->CanTailCall());
#if FEATURE_MULTIREG_RET
if (fgGlobalMorph && call->HasMultiRegRetVal() && varTypeIsStruct(call->TypeGet()))
{
// The tail call has been rejected so we must finish the work deferred
// by impFixupCallStructReturn for multi-reg-returning calls and transform
// ret call
// into
// temp = call
// ret temp
// Force re-evaluating the argInfo as the return argument has changed.
call->ResetArgInfo();
// Create a new temp.
unsigned tmpNum =
lvaGrabTemp(false DEBUGARG("Return value temp for multi-reg return (rejected tail call)."));
lvaTable[tmpNum].lvIsMultiRegRet = true;
CORINFO_CLASS_HANDLE structHandle = call->gtRetClsHnd;
assert(structHandle != NO_CLASS_HANDLE);
const bool unsafeValueClsCheck = false;
lvaSetStruct(tmpNum, structHandle, unsafeValueClsCheck);
var_types structType = lvaTable[tmpNum].lvType;
GenTree* dst = gtNewLclvNode(tmpNum, structType);
GenTree* assg = gtNewAssignNode(dst, call);
assg = fgMorphTree(assg);
// Create the assignment statement and insert it before the current statement.
Statement* assgStmt = gtNewStmt(assg, compCurStmt->GetDebugInfo());
fgInsertStmtBefore(compCurBB, compCurStmt, assgStmt);
// Return the temp.
GenTree* result = gtNewLclvNode(tmpNum, lvaTable[tmpNum].lvType);
result->gtFlags |= GTF_DONT_CSE;
compCurBB->bbFlags |= BBF_HAS_CALL; // This block has a call
#ifdef DEBUG
if (verbose)
{
printf("\nInserting assignment of a multi-reg call result to a temp:\n");
gtDispStmt(assgStmt);
}
result->gtDebugFlags |= GTF_DEBUG_NODE_MORPHED;
#endif // DEBUG
return result;
}
#endif
}
if ((call->gtCallMoreFlags & GTF_CALL_M_SPECIAL_INTRINSIC) == 0 &&
(call->gtCallMethHnd == eeFindHelper(CORINFO_HELP_VIRTUAL_FUNC_PTR)
#ifdef FEATURE_READYTORUN
|| call->gtCallMethHnd == eeFindHelper(CORINFO_HELP_READYTORUN_VIRTUAL_FUNC_PTR)
#endif
) &&
(call == fgMorphStmt->GetRootNode()))
{
// This is call to CORINFO_HELP_VIRTUAL_FUNC_PTR with ignored result.
// Transform it into a null check.
GenTree* thisPtr = call->gtCallArgs->GetNode();
GenTree* nullCheck = gtNewNullCheck(thisPtr, compCurBB);
return fgMorphTree(nullCheck);
}
noway_assert(call->gtOper == GT_CALL);
//
// Only count calls once (only in the global morph phase)
//
if (fgGlobalMorph)
{
if (call->gtCallType == CT_INDIRECT)
{
optCallCount++;
optIndirectCallCount++;
}
else if (call->gtCallType == CT_USER_FUNC)
{
optCallCount++;
if (call->IsVirtual())
{
optIndirectCallCount++;
}
}
}
// Couldn't inline - remember that this BB contains method calls
// Mark the block as a GC safe point for the call if possible.
// In the event the call indicates the block isn't a GC safe point
// and the call is unmanaged with a GC transition suppression request
// then insert a GC poll.
CLANG_FORMAT_COMMENT_ANCHOR;
if (IsGcSafePoint(call))
{
compCurBB->bbFlags |= BBF_GC_SAFE_POINT;
}
// Regardless of the state of the basic block with respect to GC safe point,
// we will always insert a GC Poll for scenarios involving a suppressed GC
// transition. Only mark the block for GC Poll insertion on the first morph.
if (fgGlobalMorph && call->IsUnmanaged() && call->IsSuppressGCTransition())
{
compCurBB->bbFlags |= (BBF_HAS_SUPPRESSGC_CALL | BBF_GC_SAFE_POINT);
optMethodFlags |= OMF_NEEDS_GCPOLLS;
}
// Morph Type.op_Equality, Type.op_Inequality, and Enum.HasFlag
//
// We need to do these before the arguments are morphed
if ((call->gtCallMoreFlags & GTF_CALL_M_SPECIAL_INTRINSIC))
{
// See if this is foldable
GenTree* optTree = gtFoldExprCall(call);
// If we optimized, morph the result
if (optTree != call)
{
return fgMorphTree(optTree);
}
}
compCurBB->bbFlags |= BBF_HAS_CALL; // This block has a call
// Process the "normal" argument list
call = fgMorphArgs(call);
noway_assert(call->gtOper == GT_CALL);
// Assign DEF flags if it produces a definition from "return buffer".
fgAssignSetVarDef(call);
if (call->OperRequiresAsgFlag())
{
call->gtFlags |= GTF_ASG;
}
// Should we expand this virtual method call target early here?
//
if (call->IsExpandedEarly() && call->IsVirtualVtable())
{
// We only expand the Vtable Call target once in the global morph phase
if (fgGlobalMorph)
{
assert(call->gtControlExpr == nullptr); // We only call this method and assign gtControlExpr once
call->gtControlExpr = fgExpandVirtualVtableCallTarget(call);
}
// We always have to morph or re-morph the control expr
//
call->gtControlExpr = fgMorphTree(call->gtControlExpr);
// Propagate any gtFlags into the call
call->gtFlags |= call->gtControlExpr->gtFlags;
}
// Morph stelem.ref helper call to store a null value, into a store into an array without the helper.
// This needs to be done after the arguments are morphed to ensure constant propagation has already taken place.
if (opts.OptimizationEnabled() && (call->gtCallType == CT_HELPER) &&
(call->gtCallMethHnd == eeFindHelper(CORINFO_HELP_ARRADDR_ST)))
{
GenTree* value = gtArgEntryByArgNum(call, 2)->GetNode();
if (value->IsIntegralConst(0))
{
assert(value->OperGet() == GT_CNS_INT);
GenTree* arr = gtArgEntryByArgNum(call, 0)->GetNode();
GenTree* index = gtArgEntryByArgNum(call, 1)->GetNode();
// Either or both of the array and index arguments may have been spilled to temps by `fgMorphArgs`. Copy
// the spill trees as well if necessary.
GenTreeOp* argSetup = nullptr;
for (GenTreeCall::Use& use : call->Args())
{
GenTree* const arg = use.GetNode();
if (arg->OperGet() != GT_ASG)
{
continue;
}
assert(arg != arr);
assert(arg != index);
arg->gtFlags &= ~GTF_LATE_ARG;
GenTree* op1 = argSetup;
if (op1 == nullptr)
{
op1 = gtNewNothingNode();
#if DEBUG
op1->gtDebugFlags |= GTF_DEBUG_NODE_MORPHED;
#endif // DEBUG
}
argSetup = new (this, GT_COMMA) GenTreeOp(GT_COMMA, TYP_VOID, op1, arg);
#if DEBUG
argSetup->gtDebugFlags |= GTF_DEBUG_NODE_MORPHED;
#endif // DEBUG
}
#ifdef DEBUG
auto resetMorphedFlag = [](GenTree** slot, fgWalkData* data) -> fgWalkResult {
(*slot)->gtDebugFlags &= ~GTF_DEBUG_NODE_MORPHED;
return WALK_CONTINUE;
};
fgWalkTreePost(&arr, resetMorphedFlag);
fgWalkTreePost(&index, resetMorphedFlag);
fgWalkTreePost(&value, resetMorphedFlag);
#endif // DEBUG
GenTree* const arrIndexNode = gtNewIndexRef(TYP_REF, arr, index);
GenTree* const arrStore = gtNewAssignNode(arrIndexNode, value);
GenTree* result = fgMorphTree(arrStore);
if (argSetup != nullptr)
{
result = new (this, GT_COMMA) GenTreeOp(GT_COMMA, TYP_VOID, argSetup, result);
#if DEBUG
result->gtDebugFlags |= GTF_DEBUG_NODE_MORPHED;
#endif // DEBUG
}
return result;
}
}
if (call->IsNoReturn())
{
//
// If we know that the call does not return then we can set fgRemoveRestOfBlock
// to remove all subsequent statements and change the call's basic block to BBJ_THROW.
// As a result the compiler won't need to preserve live registers across the call.
//
// This isn't need for tail calls as there shouldn't be any code after the call anyway.
// Besides, the tail call code is part of the epilog and converting the block to
// BBJ_THROW would result in the tail call being dropped as the epilog is generated
// only for BBJ_RETURN blocks.
//
if (!call->IsTailCall())
{
fgRemoveRestOfBlock = true;
}
}
return call;
}
/*****************************************************************************
*
* Expand and return the call target address for a VirtualCall
* The code here should match that generated by LowerVirtualVtableCall
*/
GenTree* Compiler::fgExpandVirtualVtableCallTarget(GenTreeCall* call)
{
GenTree* result;
JITDUMP("Expanding virtual call target for %d.%s:\n", call->gtTreeID, GenTree::OpName(call->gtOper));
noway_assert(call->gtCallType == CT_USER_FUNC);
// get a reference to the thisPtr being passed
fgArgTabEntry* thisArgTabEntry = gtArgEntryByArgNum(call, 0);
GenTree* thisPtr = thisArgTabEntry->GetNode();
// fgMorphArgs must enforce this invariant by creating a temp
//
assert(thisPtr->OperIsLocal());
// Make a copy of the thisPtr by cloning
//
thisPtr = gtClone(thisPtr, true);
noway_assert(thisPtr != nullptr);
// Get hold of the vtable offset
unsigned vtabOffsOfIndirection;
unsigned vtabOffsAfterIndirection;
bool isRelative;
info.compCompHnd->getMethodVTableOffset(call->gtCallMethHnd, &vtabOffsOfIndirection, &vtabOffsAfterIndirection,
&isRelative);
// Dereference the this pointer to obtain the method table, it is called vtab below
GenTree* vtab;
assert(VPTR_OFFS == 0); // We have to add this value to the thisPtr to get the methodTable
vtab = gtNewOperNode(GT_IND, TYP_I_IMPL, thisPtr);
vtab->gtFlags |= GTF_IND_INVARIANT;
// Get the appropriate vtable chunk
if (vtabOffsOfIndirection != CORINFO_VIRTUALCALL_NO_CHUNK)
{
// Note this isRelative code path is currently never executed
// as the VM doesn't ever return: isRelative == true
//
if (isRelative)
{
// MethodTable offset is a relative pointer.
//
// Additional temporary variable is used to store virtual table pointer.
// Address of method is obtained by the next computations:
//
// Save relative offset to tmp (vtab is virtual table pointer, vtabOffsOfIndirection is offset of
// vtable-1st-level-indirection):
// tmp = vtab
//
// Save address of method to result (vtabOffsAfterIndirection is offset of vtable-2nd-level-indirection):
// result = [tmp + vtabOffsOfIndirection + vtabOffsAfterIndirection + [tmp + vtabOffsOfIndirection]]
//
//
// When isRelative is true we need to setup two temporary variables
// var1 = vtab
// var2 = var1 + vtabOffsOfIndirection + vtabOffsAfterIndirection + [var1 + vtabOffsOfIndirection]
// result = [var2] + var2
//
unsigned varNum1 = lvaGrabTemp(true DEBUGARG("var1 - vtab"));
unsigned varNum2 = lvaGrabTemp(true DEBUGARG("var2 - relative"));
GenTree* asgVar1 = gtNewTempAssign(varNum1, vtab); // var1 = vtab
// [tmp + vtabOffsOfIndirection]
GenTree* tmpTree1 = gtNewOperNode(GT_ADD, TYP_I_IMPL, gtNewLclvNode(varNum1, TYP_I_IMPL),
gtNewIconNode(vtabOffsOfIndirection, TYP_I_IMPL));
tmpTree1 = gtNewOperNode(GT_IND, TYP_I_IMPL, tmpTree1, false);
tmpTree1->gtFlags |= GTF_IND_NONFAULTING;
tmpTree1->gtFlags |= GTF_IND_INVARIANT;
// var1 + vtabOffsOfIndirection + vtabOffsAfterIndirection
GenTree* tmpTree2 =
gtNewOperNode(GT_ADD, TYP_I_IMPL, gtNewLclvNode(varNum1, TYP_I_IMPL),
gtNewIconNode(vtabOffsOfIndirection + vtabOffsAfterIndirection, TYP_I_IMPL));
// var1 + vtabOffsOfIndirection + vtabOffsAfterIndirection + [var1 + vtabOffsOfIndirection]
tmpTree2 = gtNewOperNode(GT_ADD, TYP_I_IMPL, tmpTree2, tmpTree1);
GenTree* asgVar2 = gtNewTempAssign(varNum2, tmpTree2); // var2 = <expression>
// This last indirection is not invariant, but is non-faulting
result = gtNewOperNode(GT_IND, TYP_I_IMPL, gtNewLclvNode(varNum2, TYP_I_IMPL), false); // [var2]
result->gtFlags |= GTF_IND_NONFAULTING;
result = gtNewOperNode(GT_ADD, TYP_I_IMPL, result, gtNewLclvNode(varNum2, TYP_I_IMPL)); // [var2] + var2
// Now stitch together the two assignment and the calculation of result into a single tree
GenTree* commaTree = gtNewOperNode(GT_COMMA, TYP_I_IMPL, asgVar2, result);
result = gtNewOperNode(GT_COMMA, TYP_I_IMPL, asgVar1, commaTree);
}
else
{
// result = [vtab + vtabOffsOfIndirection]
result = gtNewOperNode(GT_ADD, TYP_I_IMPL, vtab, gtNewIconNode(vtabOffsOfIndirection, TYP_I_IMPL));
result = gtNewOperNode(GT_IND, TYP_I_IMPL, result, false);
result->gtFlags |= GTF_IND_NONFAULTING;
result->gtFlags |= GTF_IND_INVARIANT;
}
}
else
{
result = vtab;
assert(!isRelative);
}
if (!isRelative)
{
// Load the function address
// result = [result + vtabOffsAfterIndirection]
result = gtNewOperNode(GT_ADD, TYP_I_IMPL, result, gtNewIconNode(vtabOffsAfterIndirection, TYP_I_IMPL));
// This last indirection is not invariant, but is non-faulting
result = gtNewOperNode(GT_IND, TYP_I_IMPL, result, false);
result->gtFlags |= GTF_IND_NONFAULTING;
}
return result;
}
/*****************************************************************************
*
* Transform the given constant tree for code generation.
*/
GenTree* Compiler::fgMorphConst(GenTree* tree)
{
assert(tree->OperIsConst());
/* Clear any exception flags or other unnecessary flags
* that may have been set before folding this node to a constant */
tree->gtFlags &= ~(GTF_ALL_EFFECT | GTF_REVERSE_OPS);
if (!tree->OperIs(GT_CNS_STR))
{
return tree;
}
if (tree->AsStrCon()->IsStringEmptyField())
{
LPVOID pValue;
InfoAccessType iat = info.compCompHnd->emptyStringLiteral(&pValue);
return fgMorphTree(gtNewStringLiteralNode(iat, pValue));
}
// TODO-CQ: Do this for compCurBB->isRunRarely(). Doing that currently will
// guarantee slow performance for that block. Instead cache the return value
// of CORINFO_HELP_STRCNS and go to cache first giving reasonable perf.
bool useLazyStrCns = false;
if (compCurBB->bbJumpKind == BBJ_THROW)
{
useLazyStrCns = true;
}
else if (fgGlobalMorph && compCurStmt->GetRootNode()->IsCall())
{
// Quick check: if the root node of the current statement happens to be a noreturn call.
GenTreeCall* call = compCurStmt->GetRootNode()->AsCall();
useLazyStrCns = call->IsNoReturn() || fgIsThrow(call);
}
if (useLazyStrCns)
{
CorInfoHelpFunc helper = info.compCompHnd->getLazyStringLiteralHelper(tree->AsStrCon()->gtScpHnd);
if (helper != CORINFO_HELP_UNDEF)
{
// For un-important blocks, we want to construct the string lazily
GenTreeCall::Use* args;
if (helper == CORINFO_HELP_STRCNS_CURRENT_MODULE)
{
args = gtNewCallArgs(gtNewIconNode(RidFromToken(tree->AsStrCon()->gtSconCPX), TYP_INT));
}
else
{
args = gtNewCallArgs(gtNewIconNode(RidFromToken(tree->AsStrCon()->gtSconCPX), TYP_INT),
gtNewIconEmbScpHndNode(tree->AsStrCon()->gtScpHnd));
}
tree = gtNewHelperCallNode(helper, TYP_REF, args);
return fgMorphTree(tree);
}
}
assert(tree->AsStrCon()->gtScpHnd == info.compScopeHnd || !IsUninitialized(tree->AsStrCon()->gtScpHnd));
LPVOID pValue;
InfoAccessType iat =
info.compCompHnd->constructStringLiteral(tree->AsStrCon()->gtScpHnd, tree->AsStrCon()->gtSconCPX, &pValue);
tree = gtNewStringLiteralNode(iat, pValue);
return fgMorphTree(tree);
}
//------------------------------------------------------------------------
// fgMorphTryFoldObjAsLclVar: try to fold an Obj node as a LclVar.
//
// Arguments:
// obj - the obj node.
// destroyNodes -- destroy nodes that are optimized away
//
// Return value:
// GenTreeLclVar if the obj can be replaced by it, null otherwise.
//
// Notes:
// TODO-CQ: currently this transformation is done only under copy block,
// but it is benefitial to do for each OBJ node. However, `PUT_ARG_STACK`
// for some platforms does not expect struct `LCL_VAR` as a source, so
// it needs more work.
//
GenTreeLclVar* Compiler::fgMorphTryFoldObjAsLclVar(GenTreeObj* obj, bool destroyNodes)
{
if (opts.OptimizationEnabled())
{
GenTree* op1 = obj->Addr();
assert(!op1->OperIs(GT_LCL_VAR_ADDR) && "missed an opt opportunity");
if (op1->OperIs(GT_ADDR))
{
GenTreeUnOp* addr = op1->AsUnOp();
GenTree* addrOp = addr->gtGetOp1();
if (addrOp->TypeIs(obj->TypeGet()) && addrOp->OperIs(GT_LCL_VAR))
{
GenTreeLclVar* lclVar = addrOp->AsLclVar();
ClassLayout* lclVarLayout = lvaGetDesc(lclVar)->GetLayout();
ClassLayout* objLayout = obj->GetLayout();
if (ClassLayout::AreCompatible(lclVarLayout, objLayout))
{
#ifdef DEBUG
CORINFO_CLASS_HANDLE objClsHandle = obj->GetLayout()->GetClassHandle();
assert(objClsHandle != NO_CLASS_HANDLE);
if (verbose)
{
CORINFO_CLASS_HANDLE lclClsHnd = gtGetStructHandle(lclVar);
printf("fold OBJ(ADDR(X)) [%06u] into X [%06u], ", dspTreeID(obj), dspTreeID(lclVar));
printf("with %s handles\n", ((lclClsHnd == objClsHandle) ? "matching" : "different"));
}
#endif
// Keep the DONT_CSE flag in sync
// (as the addr always marks it for its op1)
lclVar->gtFlags &= ~GTF_DONT_CSE;
lclVar->gtFlags |= (obj->gtFlags & GTF_DONT_CSE);
if (destroyNodes)
{
DEBUG_DESTROY_NODE(obj);
DEBUG_DESTROY_NODE(addr);
}
return lclVar;
}
}
}
}
return nullptr;
}
/*****************************************************************************
*
* Transform the given GTK_LEAF tree for code generation.
*/
GenTree* Compiler::fgMorphLeaf(GenTree* tree)
{
assert(tree->OperKind() & GTK_LEAF);
if (tree->gtOper == GT_LCL_VAR)
{
const bool forceRemorph = false;
return fgMorphLocalVar(tree, forceRemorph);
}
else if (tree->gtOper == GT_LCL_FLD)
{
if (lvaGetDesc(tree->AsLclFld())->IsAddressExposed())
{
tree->gtFlags |= GTF_GLOB_REF;
}
#ifdef TARGET_X86
if (info.compIsVarArgs)
{
GenTree* newTree = fgMorphStackArgForVarArgs(tree->AsLclFld()->GetLclNum(), tree->TypeGet(),
tree->AsLclFld()->GetLclOffs());
if (newTree != nullptr)
{
if (newTree->OperIsBlk() && ((tree->gtFlags & GTF_VAR_DEF) == 0))
{
newTree->SetOper(GT_IND);
}
return newTree;
}
}
#endif // TARGET_X86
}
else if (tree->gtOper == GT_FTN_ADDR)
{
GenTreeFptrVal* fptrValTree = tree->AsFptrVal();
// A function pointer address is being used. Let the VM know if this is the
// target of a Delegate or a raw function pointer.
bool isUnsafeFunctionPointer = !fptrValTree->gtFptrDelegateTarget;
CORINFO_CONST_LOOKUP addrInfo;
#ifdef FEATURE_READYTORUN
if (fptrValTree->gtEntryPoint.addr != nullptr)
{
addrInfo = fptrValTree->gtEntryPoint;
}
else
#endif
{
info.compCompHnd->getFunctionFixedEntryPoint(fptrValTree->gtFptrMethod, isUnsafeFunctionPointer, &addrInfo);
}
GenTree* indNode = nullptr;
switch (addrInfo.accessType)
{
case IAT_PPVALUE:
indNode = gtNewIndOfIconHandleNode(TYP_I_IMPL, (size_t)addrInfo.handle, GTF_ICON_CONST_PTR, true);
// Add the second indirection
indNode = gtNewOperNode(GT_IND, TYP_I_IMPL, indNode);
// This indirection won't cause an exception.
indNode->gtFlags |= GTF_IND_NONFAULTING;
// This indirection also is invariant.
indNode->gtFlags |= GTF_IND_INVARIANT;
break;
case IAT_PVALUE:
indNode = gtNewIndOfIconHandleNode(TYP_I_IMPL, (size_t)addrInfo.handle, GTF_ICON_FTN_ADDR, true);
break;
case IAT_VALUE:
// Refer to gtNewIconHandleNode() as the template for constructing a constant handle
//
tree->SetOper(GT_CNS_INT);
tree->AsIntConCommon()->SetIconValue(ssize_t(addrInfo.handle));
tree->gtFlags |= GTF_ICON_FTN_ADDR;
break;
default:
noway_assert(!"Unknown addrInfo.accessType");
}
if (indNode != nullptr)
{
DEBUG_DESTROY_NODE(tree);
tree = fgMorphTree(indNode);
}
}
return tree;
}
void Compiler::fgAssignSetVarDef(GenTree* tree)
{
GenTreeLclVarCommon* lclVarCmnTree;
bool isEntire = false;
if (tree->DefinesLocal(this, &lclVarCmnTree, &isEntire))
{
if (isEntire)
{
lclVarCmnTree->gtFlags |= GTF_VAR_DEF;
}
else
{
// We consider partial definitions to be modeled as uses followed by definitions.
// This captures the idea that precedings defs are not necessarily made redundant
// by this definition.
lclVarCmnTree->gtFlags |= (GTF_VAR_DEF | GTF_VAR_USEASG);
}
}
}
//------------------------------------------------------------------------
// fgMorphOneAsgBlockOp: Attempt to replace a block assignment with a scalar assignment
//
// Arguments:
// tree - The block assignment to be possibly morphed
//
// Return Value:
// The modified tree if successful, nullptr otherwise.
//
// Assumptions:
// 'tree' must be a block assignment.
//
// Notes:
// If successful, this method always returns the incoming tree, modifying only
// its arguments.
//
GenTree* Compiler::fgMorphOneAsgBlockOp(GenTree* tree)
{
// This must be a block assignment.
noway_assert(tree->OperIsBlkOp());
var_types asgType = tree->TypeGet();
GenTree* asg = tree;
GenTree* dest = asg->gtGetOp1();
GenTree* src = asg->gtGetOp2();
unsigned destVarNum = BAD_VAR_NUM;
LclVarDsc* destVarDsc = nullptr;
GenTree* destLclVarTree = nullptr;
bool isCopyBlock = asg->OperIsCopyBlkOp();
bool isInitBlock = !isCopyBlock;
unsigned size = 0;
CORINFO_CLASS_HANDLE clsHnd = NO_CLASS_HANDLE;
if (dest->gtEffectiveVal()->OperIsBlk())
{
GenTreeBlk* lhsBlk = dest->gtEffectiveVal()->AsBlk();
size = lhsBlk->Size();
if (impIsAddressInLocal(lhsBlk->Addr(), &destLclVarTree))
{
destVarNum = destLclVarTree->AsLclVarCommon()->GetLclNum();
destVarDsc = lvaGetDesc(destVarNum);
}
if (lhsBlk->OperGet() == GT_OBJ)
{
clsHnd = lhsBlk->AsObj()->GetLayout()->GetClassHandle();
}
}
else
{
// Is this an enregisterable struct that is already a simple assignment?
// This can happen if we are re-morphing.
// Note that we won't do this straightaway if this is a SIMD type, since it
// may be a promoted lclVar (sometimes we promote the individual float fields of
// fixed-size SIMD).
if (dest->OperGet() == GT_IND)
{
noway_assert(asgType != TYP_STRUCT);
if (varTypeIsStruct(asgType))
{
destLclVarTree = fgIsIndirOfAddrOfLocal(dest);
}
if (isCopyBlock && destLclVarTree == nullptr && !src->OperIs(GT_LCL_VAR))
{
fgMorphBlockOperand(src, asgType, genTypeSize(asgType), false /*isBlkReqd*/);
dest->gtFlags |= GTF_DONT_CSE;
return tree;
}
}
else
{
noway_assert(dest->OperIsLocal());
destLclVarTree = dest;
}
if (destLclVarTree != nullptr)
{
destVarNum = destLclVarTree->AsLclVarCommon()->GetLclNum();
destVarDsc = lvaGetDesc(destVarNum);
if (asgType == TYP_STRUCT)
{
clsHnd = destVarDsc->GetStructHnd();
size = destVarDsc->lvExactSize;
}
}
if (asgType != TYP_STRUCT)
{
size = genTypeSize(asgType);
}
}
if (size == 0)
{
return nullptr;
}
if ((destVarDsc != nullptr) && varTypeIsStruct(destLclVarTree) && destVarDsc->lvPromoted)
{
// Let fgMorphCopyBlock handle it.
return nullptr;
}
if (src->IsCall() || src->OperIsSIMD())
{
// Can't take ADDR from these nodes, let fgMorphCopyBlock handle it, #11413.
return nullptr;
}
if ((destVarDsc != nullptr) && !varTypeIsStruct(destVarDsc->TypeGet()))
{
//
// See if we can do a simple transformation:
//
// GT_ASG <TYP_size>
// / \.
// GT_IND GT_IND or CNS_INT
// | |
// [dest] [src]
//
if (asgType == TYP_STRUCT)
{
// It is possible to use `initobj` to init a primitive type on the stack,
// like `ldloca.s 1; initobj 1B000003` where `V01` has type `ref`;
// in this case we generate `ASG struct(BLK<8> struct(ADDR byref(LCL_VAR ref)), 0)`
// and this code path transforms it into `ASG ref(LCL_VARref, 0)` because it is not a real
// struct assignment.
if (size == REGSIZE_BYTES)
{
if (clsHnd == NO_CLASS_HANDLE)
{
// A register-sized cpblk can be treated as an integer asignment.
asgType = TYP_I_IMPL;
}
else
{
BYTE gcPtr;
info.compCompHnd->getClassGClayout(clsHnd, &gcPtr);
asgType = getJitGCType(gcPtr);
}
}
else
{
switch (size)
{
case 1:
asgType = TYP_BYTE;
break;
case 2:
asgType = TYP_SHORT;
break;
#ifdef TARGET_64BIT
case 4:
asgType = TYP_INT;
break;
#endif // TARGET_64BIT
}
}
}
}
GenTree* srcLclVarTree = nullptr;
LclVarDsc* srcVarDsc = nullptr;
if (isCopyBlock)
{
if (src->OperGet() == GT_LCL_VAR)
{
srcLclVarTree = src;
srcVarDsc = lvaGetDesc(src->AsLclVarCommon());
}
else if (src->OperIsIndir() && impIsAddressInLocal(src->AsOp()->gtOp1, &srcLclVarTree))
{
srcVarDsc = lvaGetDesc(srcLclVarTree->AsLclVarCommon());
}
if ((srcVarDsc != nullptr) && varTypeIsStruct(srcLclVarTree) && srcVarDsc->lvPromoted)
{
// Let fgMorphCopyBlock handle it.
return nullptr;
}
}
if (asgType != TYP_STRUCT)
{
noway_assert((size <= REGSIZE_BYTES) || varTypeIsSIMD(asgType));
// For initBlk, a non constant source is not going to allow us to fiddle
// with the bits to create a single assigment.
// Nor do we (for now) support transforming an InitBlock of SIMD type, unless
// it is a direct assignment to a lclVar and the value is zero.
if (isInitBlock)
{
if (!src->IsConstInitVal())
{
return nullptr;
}
if (varTypeIsSIMD(asgType) && (!src->IsIntegralConst(0) || (destVarDsc == nullptr)))
{
return nullptr;
}
}
if (destVarDsc != nullptr)
{
// Kill everything about dest
if (optLocalAssertionProp)
{
if (optAssertionCount > 0)
{
fgKillDependentAssertions(destVarNum DEBUGARG(tree));
}
}
// A previous incarnation of this code also required the local not to be
// address-exposed(=taken). That seems orthogonal to the decision of whether
// to do field-wise assignments: being address-exposed will cause it to be
// "dependently" promoted, so it will be in the right memory location. One possible
// further reason for avoiding field-wise stores is that the struct might have alignment-induced
// holes, whose contents could be meaningful in unsafe code. If we decide that's a valid
// concern, then we could compromise, and say that address-exposed + fields do not completely cover the
// memory of the struct prevent field-wise assignments. Same situation exists for the "src" decision.
if (varTypeIsStruct(destLclVarTree) && destVarDsc->lvPromoted)
{
// Let fgMorphInitBlock handle it. (Since we'll need to do field-var-wise assignments.)
return nullptr;
}
else if (!varTypeIsFloating(destLclVarTree->TypeGet()) && (size == genTypeSize(destVarDsc)))
{
// Use the dest local var directly, as well as its type.
dest = destLclVarTree;
asgType = destVarDsc->lvType;
// If the block operation had been a write to a local var of a small int type,
// of the exact size of the small int type, and the var is NormalizeOnStore,
// we would have labeled it GTF_VAR_USEASG, because the block operation wouldn't
// have done that normalization. If we're now making it into an assignment,
// the NormalizeOnStore will work, and it can be a full def.
if (destVarDsc->lvNormalizeOnStore())
{
dest->gtFlags &= (~GTF_VAR_USEASG);
}
}
else
{
// Could be a non-promoted struct, or a floating point type local, or
// an int subject to a partial write. Don't enregister.
lvaSetVarDoNotEnregister(destVarNum DEBUGARG(DoNotEnregisterReason::OneAsgRetyping));
// Mark the local var tree as a definition point of the local.
destLclVarTree->gtFlags |= GTF_VAR_DEF;
if (size < destVarDsc->lvExactSize)
{ // If it's not a full-width assignment....
destLclVarTree->gtFlags |= GTF_VAR_USEASG;
}
if (dest == destLclVarTree)
{
GenTree* addr = gtNewOperNode(GT_ADDR, TYP_BYREF, dest);
dest = gtNewIndir(asgType, addr);
}
}
}
// Check to ensure we don't have a reducible *(& ... )
if (dest->OperIsIndir() && dest->AsIndir()->Addr()->OperGet() == GT_ADDR)
{
// If dest is an Indir or Block, and it has a child that is a Addr node
//
GenTree* addrNode = dest->AsIndir()->Addr(); // known to be a GT_ADDR
// Can we just remove the Ind(Addr(destOp)) and operate directly on 'destOp'?
//
GenTree* destOp = addrNode->gtGetOp1();
var_types destOpType = destOp->TypeGet();
// We can if we have a primitive integer type and the sizes are exactly the same.
//
if ((varTypeIsIntegralOrI(destOp) && (size == genTypeSize(destOpType))))
{
dest = destOp;
asgType = destOpType;
}
}
if (dest->gtEffectiveVal()->OperIsIndir())
{
// If we have no information about the destination, we have to assume it could
// live anywhere (not just in the GC heap).
// Mark the GT_IND node so that we use the correct write barrier helper in case
// the field is a GC ref.
if (!fgIsIndirOfAddrOfLocal(dest))
{
dest->gtFlags |= (GTF_GLOB_REF | GTF_IND_TGTANYWHERE);
tree->gtFlags |= GTF_GLOB_REF;
}
dest->SetIndirExceptionFlags(this);
tree->gtFlags |= (dest->gtFlags & GTF_EXCEPT);
}
if (isCopyBlock)
{
if (srcVarDsc != nullptr)
{
// Handled above.
assert(!varTypeIsStruct(srcLclVarTree) || !srcVarDsc->lvPromoted);
if (!varTypeIsFloating(srcLclVarTree->TypeGet()) &&
size == genTypeSize(genActualType(srcLclVarTree->TypeGet())))
{
// Use the src local var directly.
src = srcLclVarTree;
}
else
{
// The source argument of the copyblk can potentially be accessed only through indir(addr(lclVar))
// or indir(lclVarAddr) so it must be on the stack.
unsigned lclVarNum = srcLclVarTree->AsLclVarCommon()->GetLclNum();
lvaSetVarDoNotEnregister(lclVarNum DEBUGARG(DoNotEnregisterReason::OneAsgRetyping));
GenTree* srcAddr;
if (src == srcLclVarTree)
{
srcAddr = gtNewOperNode(GT_ADDR, TYP_BYREF, src);
src = gtNewOperNode(GT_IND, asgType, srcAddr);
}
else
{
assert(src->OperIsIndir());
}
}
}
if (src->OperIsIndir())
{
if (!fgIsIndirOfAddrOfLocal(src))
{
// If we have no information about the src, we have to assume it could
// live anywhere (not just in the GC heap).
// Mark the GT_IND node so that we use the correct write barrier helper in case
// the field is a GC ref.
src->gtFlags |= (GTF_GLOB_REF | GTF_IND_TGTANYWHERE);
}
src->SetIndirExceptionFlags(this);
}
}
else // InitBlk
{
#ifdef FEATURE_SIMD
if (varTypeIsSIMD(asgType))
{
assert(!isCopyBlock); // Else we would have returned the tree above.
noway_assert(src->IsIntegralConst(0));
noway_assert(destVarDsc != nullptr);
src = gtNewSIMDNode(asgType, src, SIMDIntrinsicInit, destVarDsc->GetSimdBaseJitType(), size);
}
else
#endif
{
if (src->OperIsInitVal())
{
src = src->gtGetOp1();
}
assert(src->IsCnsIntOrI());
// This will mutate the integer constant, in place, to be the correct
// value for the type we are using in the assignment.
src->AsIntCon()->FixupInitBlkValue(asgType);
}
}
// Ensure that the dest is setup appropriately.
if (dest->gtEffectiveVal()->OperIsIndir())
{
dest = fgMorphBlockOperand(dest, asgType, size, false /*isBlkReqd*/);
}
// Ensure that the rhs is setup appropriately.
if (isCopyBlock)
{
src = fgMorphBlockOperand(src, asgType, size, false /*isBlkReqd*/);
}
// Set the lhs and rhs on the assignment.
if (dest != tree->AsOp()->gtOp1)
{
asg->AsOp()->gtOp1 = dest;
}
if (src != asg->AsOp()->gtOp2)
{
asg->AsOp()->gtOp2 = src;
}
asg->ChangeType(asgType);
dest->gtFlags |= GTF_DONT_CSE;
asg->gtFlags &= ~GTF_EXCEPT;
asg->gtFlags |= ((dest->gtFlags | src->gtFlags) & GTF_ALL_EFFECT);
// Un-set GTF_REVERSE_OPS, and it will be set later if appropriate.
asg->gtFlags &= ~GTF_REVERSE_OPS;
#ifdef DEBUG
if (verbose)
{
printf("fgMorphOneAsgBlock (after):\n");
gtDispTree(tree);
}
#endif
return tree;
}
return nullptr;
}
//------------------------------------------------------------------------
// fgMorphPromoteLocalInitBlock: Attempts to promote a local block init tree
// to a tree of promoted field initialization assignments.
//
// Arguments:
// destLclNode - The destination LclVar node
// initVal - The initialization value
// blockSize - The amount of bytes to initialize
//
// Return Value:
// A tree that performs field by field initialization of the destination
// struct variable if various conditions are met, nullptr otherwise.
//
// Notes:
// This transforms a single block initialization assignment like:
//
// * ASG struct (init)
// +--* BLK(12) struct
// | \--* ADDR long
// | \--* LCL_VAR struct(P) V02 loc0
// | \--* int V02.a (offs=0x00) -> V06 tmp3
// | \--* ubyte V02.c (offs=0x04) -> V07 tmp4
// | \--* float V02.d (offs=0x08) -> V08 tmp5
// \--* INIT_VAL int
// \--* CNS_INT int 42
//
// into a COMMA tree of assignments that initialize each promoted struct
// field:
//
// * COMMA void
// +--* COMMA void
// | +--* ASG int
// | | +--* LCL_VAR int V06 tmp3
// | | \--* CNS_INT int 0x2A2A2A2A
// | \--* ASG ubyte
// | +--* LCL_VAR ubyte V07 tmp4
// | \--* CNS_INT int 42
// \--* ASG float
// +--* LCL_VAR float V08 tmp5
// \--* CNS_DBL float 1.5113661732714390e-13
//
GenTree* Compiler::fgMorphPromoteLocalInitBlock(GenTreeLclVar* destLclNode, GenTree* initVal, unsigned blockSize)
{
assert(destLclNode->OperIs(GT_LCL_VAR));
LclVarDsc* destLclVar = lvaGetDesc(destLclNode);
assert(varTypeIsStruct(destLclVar->TypeGet()));
assert(destLclVar->lvPromoted);
if (blockSize == 0)
{
JITDUMP(" size is zero or unknown.\n");
return nullptr;
}
if (destLclVar->IsAddressExposed() && destLclVar->lvContainsHoles)
{
JITDUMP(" dest is address exposed and contains holes.\n");
return nullptr;
}
if (destLclVar->lvCustomLayout && destLclVar->lvContainsHoles)
{
// TODO-1stClassStructs: there are no reasons for this pessimization, delete it.
JITDUMP(" dest has custom layout and contains holes.\n");
return nullptr;
}
if (destLclVar->lvExactSize != blockSize)
{
JITDUMP(" dest size mismatch.\n");
return nullptr;
}
if (!initVal->OperIs(GT_CNS_INT))
{
JITDUMP(" source is not constant.\n");
return nullptr;
}
const int64_t initPattern = (initVal->AsIntCon()->IconValue() & 0xFF) * 0x0101010101010101LL;
if (initPattern != 0)
{
for (unsigned i = 0; i < destLclVar->lvFieldCnt; ++i)
{
LclVarDsc* fieldDesc = lvaGetDesc(destLclVar->lvFieldLclStart + i);
if (varTypeIsSIMD(fieldDesc->TypeGet()) || varTypeIsGC(fieldDesc->TypeGet()))
{
// Cannot initialize GC or SIMD types with a non-zero constant.
// The former is completly bogus. The later restriction could be
// lifted by supporting non-zero SIMD constants or by generating
// field initialization code that converts an integer constant to
// the appropiate SIMD value. Unlikely to be very useful, though.
JITDUMP(" dest contains GC and/or SIMD fields and source constant is not 0.\n");
return nullptr;
}
}
}
JITDUMP(" using field by field initialization.\n");
GenTree* tree = nullptr;
for (unsigned i = 0; i < destLclVar->lvFieldCnt; ++i)
{
unsigned fieldLclNum = destLclVar->lvFieldLclStart + i;
LclVarDsc* fieldDesc = lvaGetDesc(fieldLclNum);
GenTree* dest = gtNewLclvNode(fieldLclNum, fieldDesc->TypeGet());
// If it had been labeled a "USEASG", assignments to the individual promoted fields are not.
dest->gtFlags |= (destLclNode->gtFlags & ~(GTF_NODE_MASK | GTF_VAR_USEASG));
GenTree* src;
switch (dest->TypeGet())
{
case TYP_BOOL:
case TYP_BYTE:
case TYP_UBYTE:
case TYP_SHORT:
case TYP_USHORT:
// Promoted fields are expected to be "normalize on load". If that changes then
// we may need to adjust this code to widen the constant correctly.
assert(fieldDesc->lvNormalizeOnLoad());
FALLTHROUGH;
case TYP_INT:
{
int64_t mask = (int64_t(1) << (genTypeSize(dest->TypeGet()) * 8)) - 1;
src = gtNewIconNode(static_cast<int32_t>(initPattern & mask));
break;
}
case TYP_LONG:
src = gtNewLconNode(initPattern);
break;
case TYP_FLOAT:
float floatPattern;
memcpy(&floatPattern, &initPattern, sizeof(floatPattern));
src = gtNewDconNode(floatPattern, dest->TypeGet());
break;
case TYP_DOUBLE:
double doublePattern;
memcpy(&doublePattern, &initPattern, sizeof(doublePattern));
src = gtNewDconNode(doublePattern, dest->TypeGet());
break;
case TYP_REF:
case TYP_BYREF:
#ifdef FEATURE_SIMD
case TYP_SIMD8:
case TYP_SIMD12:
case TYP_SIMD16:
case TYP_SIMD32:
#endif // FEATURE_SIMD
assert(initPattern == 0);
src = gtNewIconNode(0, dest->TypeGet());
break;
default:
unreached();
}
GenTree* asg = gtNewAssignNode(dest, src);
if (optLocalAssertionProp)
{
optAssertionGen(asg);
}
if (tree != nullptr)
{
tree = gtNewOperNode(GT_COMMA, TYP_VOID, tree, asg);
}
else
{
tree = asg;
}
}
return tree;
}
//------------------------------------------------------------------------
// fgMorphGetStructAddr: Gets the address of a struct object
//
// Arguments:
// pTree - the parent's pointer to the struct object node
// clsHnd - the class handle for the struct type
// isRValue - true if this is a source (not dest)
//
// Return Value:
// Returns the address of the struct value, possibly modifying the existing tree to
// sink the address below any comma nodes (this is to canonicalize for value numbering).
// If this is a source, it will morph it to an GT_IND before taking its address,
// since it may not be remorphed (and we don't want blk nodes as rvalues).
GenTree* Compiler::fgMorphGetStructAddr(GenTree** pTree, CORINFO_CLASS_HANDLE clsHnd, bool isRValue)
{
GenTree* addr;
GenTree* tree = *pTree;
// If this is an indirection, we can return its op1, unless it's a GTF_IND_ARR_INDEX, in which case we
// need to hang onto that for the purposes of value numbering.
if (tree->OperIsIndir())
{
if ((tree->gtFlags & GTF_IND_ARR_INDEX) == 0)
{
addr = tree->AsOp()->gtOp1;
}
else
{
if (isRValue && tree->OperIsBlk())
{
tree->ChangeOper(GT_IND);
}
addr = gtNewOperNode(GT_ADDR, TYP_BYREF, tree);
}
}
else if (tree->gtOper == GT_COMMA)
{
// If this is a comma, we're going to "sink" the GT_ADDR below it.
(void)fgMorphGetStructAddr(&(tree->AsOp()->gtOp2), clsHnd, isRValue);
tree->gtType = TYP_BYREF;
addr = tree;
}
else
{
switch (tree->gtOper)
{
case GT_LCL_FLD:
case GT_LCL_VAR:
case GT_INDEX:
case GT_FIELD:
case GT_ARR_ELEM:
addr = gtNewOperNode(GT_ADDR, TYP_BYREF, tree);
break;
case GT_INDEX_ADDR:
addr = tree;
break;
default:
{
// TODO: Consider using lvaGrabTemp and gtNewTempAssign instead, since we're
// not going to use "temp"
GenTree* temp = fgInsertCommaFormTemp(pTree, clsHnd);
unsigned lclNum = temp->gtEffectiveVal()->AsLclVar()->GetLclNum();
lvaSetVarDoNotEnregister(lclNum DEBUG_ARG(DoNotEnregisterReason::VMNeedsStackAddr));
addr = fgMorphGetStructAddr(pTree, clsHnd, isRValue);
break;
}
}
}
*pTree = addr;
return addr;
}
//------------------------------------------------------------------------
// fgMorphBlockOperand: Canonicalize an operand of a block assignment
//
// Arguments:
// tree - The block operand
// asgType - The type of the assignment
// blockWidth - The size of the block
// isBlkReqd - true iff this operand must remain a block node
//
// Return Value:
// Returns the morphed block operand
//
// Notes:
// This does the following:
// - Ensures that a struct operand is a block node or lclVar.
// - Ensures that any COMMAs are above ADDR nodes.
// Although 'tree' WAS an operand of a block assignment, the assignment
// may have been retyped to be a scalar assignment.
GenTree* Compiler::fgMorphBlockOperand(GenTree* tree, var_types asgType, unsigned blockWidth, bool isBlkReqd)
{
GenTree* effectiveVal = tree->gtEffectiveVal();
if (asgType != TYP_STRUCT)
{
if (effectiveVal->OperIsIndir())
{
if (!isBlkReqd)
{
GenTree* addr = effectiveVal->AsIndir()->Addr();
if ((addr->OperGet() == GT_ADDR) && (addr->gtGetOp1()->TypeGet() == asgType))
{
effectiveVal = addr->gtGetOp1();
}
else if (effectiveVal->OperIsBlk())
{
effectiveVal->SetOper(GT_IND);
}
}
effectiveVal->gtType = asgType;
}
else if (effectiveVal->TypeGet() != asgType)
{
if (effectiveVal->IsCall())
{
#ifdef DEBUG
GenTreeCall* call = effectiveVal->AsCall();
assert(call->TypeGet() == TYP_STRUCT);
assert(blockWidth == info.compCompHnd->getClassSize(call->gtRetClsHnd));
#endif
}
else
{
GenTree* addr = gtNewOperNode(GT_ADDR, TYP_BYREF, effectiveVal);
effectiveVal = gtNewIndir(asgType, addr);
}
}
}
else
{
GenTreeIndir* indirTree = nullptr;
GenTreeLclVarCommon* lclNode = nullptr;
bool needsIndirection = true;
if (effectiveVal->OperIsIndir())
{
indirTree = effectiveVal->AsIndir();
GenTree* addr = effectiveVal->AsIndir()->Addr();
if ((addr->OperGet() == GT_ADDR) && (addr->gtGetOp1()->OperGet() == GT_LCL_VAR))
{
lclNode = addr->gtGetOp1()->AsLclVarCommon();
}
}
else if (effectiveVal->OperGet() == GT_LCL_VAR)
{
lclNode = effectiveVal->AsLclVarCommon();
}
else if (effectiveVal->IsCall())
{
needsIndirection = false;
#ifdef DEBUG
GenTreeCall* call = effectiveVal->AsCall();
assert(call->TypeGet() == TYP_STRUCT);
assert(blockWidth == info.compCompHnd->getClassSize(call->gtRetClsHnd));
#endif
}
#ifdef TARGET_ARM64
else if (effectiveVal->OperIsHWIntrinsic())
{
needsIndirection = false;
#ifdef DEBUG
GenTreeHWIntrinsic* intrinsic = effectiveVal->AsHWIntrinsic();
assert(intrinsic->TypeGet() == TYP_STRUCT);
assert(HWIntrinsicInfo::IsMultiReg(intrinsic->GetHWIntrinsicId()));
#endif
}
#endif // TARGET_ARM64
if (lclNode != nullptr)
{
const LclVarDsc* varDsc = lvaGetDesc(lclNode);
if (varTypeIsStruct(varDsc) && (varDsc->lvExactSize == blockWidth) && (varDsc->lvType == asgType))
{
if (effectiveVal != lclNode)
{
JITDUMP("Replacing block node [%06d] with lclVar V%02u\n", dspTreeID(tree), lclNode->GetLclNum());
effectiveVal = lclNode;
}
needsIndirection = false;
}
else
{
// This may be a lclVar that was determined to be address-exposed.
effectiveVal->gtFlags |= (lclNode->gtFlags & GTF_ALL_EFFECT);
}
}
if (needsIndirection)
{
if (indirTree != nullptr)
{
// If we have an indirection and a block is required, it should already be a block.
assert(indirTree->OperIsBlk() || !isBlkReqd);
effectiveVal->gtType = asgType;
}
else
{
GenTree* newTree;
GenTree* addr = gtNewOperNode(GT_ADDR, TYP_BYREF, effectiveVal);
if (isBlkReqd)
{
CORINFO_CLASS_HANDLE clsHnd = gtGetStructHandleIfPresent(effectiveVal);
if (clsHnd == NO_CLASS_HANDLE)
{
newTree = new (this, GT_BLK) GenTreeBlk(GT_BLK, TYP_STRUCT, addr, typGetBlkLayout(blockWidth));
}
else
{
newTree = gtNewObjNode(clsHnd, addr);
gtSetObjGcInfo(newTree->AsObj());
}
}
else
{
newTree = gtNewIndir(asgType, addr);
}
effectiveVal = newTree;
}
}
}
assert(effectiveVal->TypeIs(asgType) || (varTypeIsSIMD(asgType) && varTypeIsStruct(effectiveVal)));
tree = effectiveVal;
return tree;
}
//------------------------------------------------------------------------
// fgMorphCanUseLclFldForCopy: check if we can access LclVar2 using LclVar1's fields.
//
// Arguments:
// lclNum1 - a promoted lclVar that is used in fieldwise assignment;
// lclNum2 - the local variable on the other side of ASG, can be BAD_VAR_NUM.
//
// Return Value:
// True if the second local is valid and has the same struct handle as the first,
// false otherwise.
//
// Notes:
// This check is needed to avoid accessing LCL_VARs with incorrect
// CORINFO_FIELD_HANDLE that would confuse VN optimizations.
//
bool Compiler::fgMorphCanUseLclFldForCopy(unsigned lclNum1, unsigned lclNum2)
{
assert(lclNum1 != BAD_VAR_NUM);
if (lclNum2 == BAD_VAR_NUM)
{
return false;
}
const LclVarDsc* varDsc1 = lvaGetDesc(lclNum1);
const LclVarDsc* varDsc2 = lvaGetDesc(lclNum2);
assert(varTypeIsStruct(varDsc1));
if (!varTypeIsStruct(varDsc2))
{
return false;
}
CORINFO_CLASS_HANDLE struct1 = varDsc1->GetStructHnd();
CORINFO_CLASS_HANDLE struct2 = varDsc2->GetStructHnd();
assert(struct1 != NO_CLASS_HANDLE);
assert(struct2 != NO_CLASS_HANDLE);
if (struct1 != struct2)
{
return false;
}
return true;
}
// insert conversions and normalize to make tree amenable to register
// FP architectures
GenTree* Compiler::fgMorphForRegisterFP(GenTree* tree)
{
if (tree->OperIsArithmetic())
{
if (varTypeIsFloating(tree))
{
GenTree* op1 = tree->AsOp()->gtOp1;
GenTree* op2 = tree->gtGetOp2();
assert(varTypeIsFloating(op1->TypeGet()) && varTypeIsFloating(op2->TypeGet()));
if (op1->TypeGet() != tree->TypeGet())
{
tree->AsOp()->gtOp1 = gtNewCastNode(tree->TypeGet(), op1, false, tree->TypeGet());
}
if (op2->TypeGet() != tree->TypeGet())
{
tree->AsOp()->gtOp2 = gtNewCastNode(tree->TypeGet(), op2, false, tree->TypeGet());
}
}
}
else if (tree->OperIsCompare())
{
GenTree* op1 = tree->AsOp()->gtOp1;
if (varTypeIsFloating(op1))
{
GenTree* op2 = tree->gtGetOp2();
assert(varTypeIsFloating(op2));
if (op1->TypeGet() != op2->TypeGet())
{
// both had better be floating, just one bigger than other
if (op1->TypeGet() == TYP_FLOAT)
{
assert(op2->TypeGet() == TYP_DOUBLE);
tree->AsOp()->gtOp1 = gtNewCastNode(TYP_DOUBLE, op1, false, TYP_DOUBLE);
}
else if (op2->TypeGet() == TYP_FLOAT)
{
assert(op1->TypeGet() == TYP_DOUBLE);
tree->AsOp()->gtOp2 = gtNewCastNode(TYP_DOUBLE, op2, false, TYP_DOUBLE);
}
}
}
}
return tree;
}
#ifdef FEATURE_SIMD
//--------------------------------------------------------------------------------------------------------------
// getSIMDStructFromField:
// Checking whether the field belongs to a simd struct or not. If it is, return the GenTree* for
// the struct node, also base type, field index and simd size. If it is not, just return nullptr.
// Usually if the tree node is from a simd lclvar which is not used in any SIMD intrinsic, then we
// should return nullptr, since in this case we should treat SIMD struct as a regular struct.
// However if no matter what, you just want get simd struct node, you can set the ignoreUsedInSIMDIntrinsic
// as true. Then there will be no IsUsedInSIMDIntrinsic checking, and it will return SIMD struct node
// if the struct is a SIMD struct.
//
// Arguments:
// tree - GentreePtr. This node will be checked to see this is a field which belongs to a simd
// struct used for simd intrinsic or not.
// simdBaseJitTypeOut - CorInfoType pointer, if the tree node is the tree we want, we set *simdBaseJitTypeOut
// to simd lclvar's base JIT type.
// indexOut - unsigned pointer, if the tree is used for simd intrinsic, we will set *indexOut
// equals to the index number of this field.
// simdSizeOut - unsigned pointer, if the tree is used for simd intrinsic, set the *simdSizeOut
// equals to the simd struct size which this tree belongs to.
// ignoreUsedInSIMDIntrinsic - bool. If this is set to true, then this function will ignore
// the UsedInSIMDIntrinsic check.
//
// return value:
// A GenTree* which points the simd lclvar tree belongs to. If the tree is not the simd
// instrinic related field, return nullptr.
//
GenTree* Compiler::getSIMDStructFromField(GenTree* tree,
CorInfoType* simdBaseJitTypeOut,
unsigned* indexOut,
unsigned* simdSizeOut,
bool ignoreUsedInSIMDIntrinsic /*false*/)
{
GenTree* ret = nullptr;
if (tree->OperGet() == GT_FIELD)
{
GenTree* objRef = tree->AsField()->GetFldObj();
if (objRef != nullptr)
{
GenTree* obj = nullptr;
if (objRef->gtOper == GT_ADDR)
{
obj = objRef->AsOp()->gtOp1;
}
else if (ignoreUsedInSIMDIntrinsic)
{
obj = objRef;
}
else
{
return nullptr;
}
if (isSIMDTypeLocal(obj))
{
LclVarDsc* varDsc = lvaGetDesc(obj->AsLclVarCommon());
if (varDsc->lvIsUsedInSIMDIntrinsic() || ignoreUsedInSIMDIntrinsic)
{
*simdSizeOut = varDsc->lvExactSize;
*simdBaseJitTypeOut = getBaseJitTypeOfSIMDLocal(obj);
ret = obj;
}
}
else if (obj->OperGet() == GT_SIMD)
{
ret = obj;
GenTreeSIMD* simdNode = obj->AsSIMD();
*simdSizeOut = simdNode->GetSimdSize();
*simdBaseJitTypeOut = simdNode->GetSimdBaseJitType();
}
#ifdef FEATURE_HW_INTRINSICS
else if (obj->OperIsHWIntrinsic())
{
ret = obj;
GenTreeHWIntrinsic* simdNode = obj->AsHWIntrinsic();
*simdSizeOut = simdNode->GetSimdSize();
*simdBaseJitTypeOut = simdNode->GetSimdBaseJitType();
}
#endif // FEATURE_HW_INTRINSICS
}
}
if (ret != nullptr)
{
var_types fieldType = tree->TypeGet();
if (fieldType == TYP_LONG)
{
// Vector2/3/4 expose public float fields while Vector<T>
// and Vector64/128/256<T> have internal ulong fields. So
// we should only ever encounter accesses for TYP_FLOAT or
// TYP_LONG and in the case of the latter we don't want the
// generic type since we are executing some algorithm on the
// raw underlying bits instead.
*simdBaseJitTypeOut = CORINFO_TYPE_ULONG;
}
else
{
assert(fieldType == TYP_FLOAT);
}
unsigned baseTypeSize = genTypeSize(JITtype2varType(*simdBaseJitTypeOut));
*indexOut = tree->AsField()->gtFldOffset / baseTypeSize;
}
return ret;
}
/*****************************************************************************
* If a read operation tries to access simd struct field, then transform the operation
* to the SimdGetElementNode, and return the new tree. Otherwise, return the old tree.
* Argument:
* tree - GenTree*. If this pointer points to simd struct which is used for simd
* intrinsic, we will morph it as simd intrinsic NI_Vector128_GetElement.
* Return:
* A GenTree* which points to the new tree. If the tree is not for simd intrinsic,
* return nullptr.
*/
GenTree* Compiler::fgMorphFieldToSimdGetElement(GenTree* tree)
{
unsigned index = 0;
CorInfoType simdBaseJitType = CORINFO_TYPE_UNDEF;
unsigned simdSize = 0;
GenTree* simdStructNode = getSIMDStructFromField(tree, &simdBaseJitType, &index, &simdSize);
if (simdStructNode != nullptr)
{
var_types simdBaseType = JitType2PreciseVarType(simdBaseJitType);
GenTree* op2 = gtNewIconNode(index, TYP_INT);
assert(simdSize <= 16);
assert(simdSize >= ((index + 1) * genTypeSize(simdBaseType)));
#if defined(TARGET_XARCH)
switch (simdBaseType)
{
case TYP_BYTE:
case TYP_UBYTE:
case TYP_INT:
case TYP_UINT:
case TYP_LONG:
case TYP_ULONG:
{
if (!compOpportunisticallyDependsOn(InstructionSet_SSE41))
{
return tree;
}
break;
}
case TYP_DOUBLE:
case TYP_FLOAT:
case TYP_SHORT:
case TYP_USHORT:
{
if (!compOpportunisticallyDependsOn(InstructionSet_SSE2))
{
return tree;
}
break;
}
default:
{
unreached();
}
}
#elif defined(TARGET_ARM64)
if (!compOpportunisticallyDependsOn(InstructionSet_AdvSimd))
{
return tree;
}
#endif // !TARGET_XARCH && !TARGET_ARM64
tree = gtNewSimdGetElementNode(simdBaseType, simdStructNode, op2, simdBaseJitType, simdSize,
/* isSimdAsHWIntrinsic */ true);
}
return tree;
}
/*****************************************************************************
* Transform an assignment of a SIMD struct field to SimdWithElementNode, and
* return a new tree. If it is not such an assignment, then return the old tree.
* Argument:
* tree - GenTree*. If this pointer points to simd struct which is used for simd
* intrinsic, we will morph it as simd intrinsic set.
* Return:
* A GenTree* which points to the new tree. If the tree is not for simd intrinsic,
* return nullptr.
*/
GenTree* Compiler::fgMorphFieldAssignToSimdSetElement(GenTree* tree)
{
assert(tree->OperGet() == GT_ASG);
unsigned index = 0;
CorInfoType simdBaseJitType = CORINFO_TYPE_UNDEF;
unsigned simdSize = 0;
GenTree* simdStructNode = getSIMDStructFromField(tree->gtGetOp1(), &simdBaseJitType, &index, &simdSize);
if (simdStructNode != nullptr)
{
var_types simdType = simdStructNode->gtType;
var_types simdBaseType = JitType2PreciseVarType(simdBaseJitType);
assert(simdSize <= 16);
assert(simdSize >= ((index + 1) * genTypeSize(simdBaseType)));
GenTree* op2 = gtNewIconNode(index, TYP_INT);
GenTree* op3 = tree->gtGetOp2();
NamedIntrinsic intrinsicId = NI_Vector128_WithElement;
GenTree* target = gtClone(simdStructNode);
assert(target != nullptr);
GenTree* simdTree = gtNewSimdWithElementNode(simdType, simdStructNode, op2, op3, simdBaseJitType, simdSize,
/* isSimdAsHWIntrinsic */ true);
tree->AsOp()->gtOp1 = target;
tree->AsOp()->gtOp2 = simdTree;
// fgMorphTree has already called fgMorphImplicitByRefArgs() on this assignment, but the source
// and target have not yet been morphed.
// Therefore, in case the source and/or target are now implicit byrefs, we need to call it again.
if (fgMorphImplicitByRefArgs(tree))
{
if (tree->gtGetOp1()->OperIsBlk())
{
assert(tree->gtGetOp1()->TypeGet() == simdType);
tree->gtGetOp1()->SetOper(GT_IND);
tree->gtGetOp1()->gtType = simdType;
}
}
#ifdef DEBUG
tree->gtDebugFlags |= GTF_DEBUG_NODE_MORPHED;
#endif
}
return tree;
}
#endif // FEATURE_SIMD
//------------------------------------------------------------------------------
// fgMorphCommutative : Try to simplify "(X op C1) op C2" to "X op C3"
// for commutative operators.
//
// Arguments:
// tree - node to fold
//
// return value:
// A folded GenTree* instance or nullptr if something prevents folding.
//
GenTreeOp* Compiler::fgMorphCommutative(GenTreeOp* tree)
{
assert(varTypeIsIntegralOrI(tree->TypeGet()));
assert(tree->OperIs(GT_ADD, GT_MUL, GT_OR, GT_AND, GT_XOR));
// op1 can be GT_COMMA, in this case we're going to fold
// "(op (COMMA(... (op X C1))) C2)" to "(COMMA(... (op X C3)))"
GenTree* op1 = tree->gtGetOp1()->gtEffectiveVal(true);
genTreeOps oper = tree->OperGet();
if (!op1->OperIs(oper) || !tree->gtGetOp2()->IsCnsIntOrI() || !op1->gtGetOp2()->IsCnsIntOrI() ||
op1->gtGetOp1()->IsCnsIntOrI())
{
return nullptr;
}
if (!fgGlobalMorph && (op1 != tree->gtGetOp1()))
{
// Since 'tree->gtGetOp1()' can have complex structure (e.g. COMMA(..(COMMA(..,op1)))
// don't run the optimization for such trees outside of global morph.
// Otherwise, there is a chance of violating VNs invariants and/or modifying a tree
// that is an active CSE candidate.
return nullptr;
}
if (gtIsActiveCSE_Candidate(tree) || gtIsActiveCSE_Candidate(op1))
{
// The optimization removes 'tree' from IR and changes the value of 'op1'.
return nullptr;
}
if (tree->OperMayOverflow() && (tree->gtOverflow() || op1->gtOverflow()))
{
return nullptr;
}
GenTreeIntCon* cns1 = op1->gtGetOp2()->AsIntCon();
GenTreeIntCon* cns2 = tree->gtGetOp2()->AsIntCon();
if (!varTypeIsIntegralOrI(tree->TypeGet()) || cns1->TypeIs(TYP_REF) || !cns1->TypeIs(cns2->TypeGet()))
{
return nullptr;
}
if (gtIsActiveCSE_Candidate(cns1) || gtIsActiveCSE_Candidate(cns2))
{
// The optimization removes 'cns2' from IR and changes the value of 'cns1'.
return nullptr;
}
GenTree* folded = gtFoldExprConst(gtNewOperNode(oper, cns1->TypeGet(), cns1, cns2));
if (!folded->IsCnsIntOrI())
{
// Give up if we can't fold "C1 op C2"
return nullptr;
}
auto foldedCns = folded->AsIntCon();
cns1->SetIconValue(foldedCns->IconValue());
cns1->SetVNsFromNode(foldedCns);
cns1->gtFieldSeq = foldedCns->gtFieldSeq;
op1 = tree->gtGetOp1();
op1->SetVNsFromNode(tree);
DEBUG_DESTROY_NODE(tree);
DEBUG_DESTROY_NODE(cns2);
DEBUG_DESTROY_NODE(foldedCns);
INDEBUG(cns1->gtDebugFlags |= GTF_DEBUG_NODE_MORPHED);
return op1->AsOp();
}
//------------------------------------------------------------------------------
// fgMorphCastedBitwiseOp : Try to simplify "(T)x op (T)y" to "(T)(x op y)".
//
// Arguments:
// tree - node to fold
//
// Return Value:
// A folded GenTree* instance, or nullptr if it couldn't be folded
GenTree* Compiler::fgMorphCastedBitwiseOp(GenTreeOp* tree)
{
// This transform does not preserve VNs and deletes a node.
assert(fgGlobalMorph);
assert(varTypeIsIntegralOrI(tree));
assert(tree->OperIs(GT_OR, GT_AND, GT_XOR));
GenTree* op1 = tree->gtGetOp1();
GenTree* op2 = tree->gtGetOp2();
genTreeOps oper = tree->OperGet();
// see whether both ops are casts, with matching to and from types.
if (op1->OperIs(GT_CAST) && op2->OperIs(GT_CAST))
{
// bail if either operand is a checked cast
if (op1->gtOverflow() || op2->gtOverflow())
{
return nullptr;
}
var_types fromType = op1->AsCast()->CastOp()->TypeGet();
var_types toType = op1->AsCast()->CastToType();
bool isUnsigned = op1->IsUnsigned();
if (varTypeIsFloating(fromType) || (op2->CastFromType() != fromType) || (op2->CastToType() != toType) ||
(op2->IsUnsigned() != isUnsigned))
{
return nullptr;
}
/*
// Reuse gentree nodes:
//
// tree op1
// / \ |
// op1 op2 ==> tree
// | | / \.
// x y x y
//
// (op2 becomes garbage)
*/
tree->gtOp1 = op1->AsCast()->CastOp();
tree->gtOp2 = op2->AsCast()->CastOp();
tree->gtType = genActualType(fromType);
op1->gtType = genActualType(toType);
op1->AsCast()->gtOp1 = tree;
op1->AsCast()->CastToType() = toType;
op1->SetAllEffectsFlags(tree);
// no need to update isUnsigned
DEBUG_DESTROY_NODE(op2);
INDEBUG(op1->gtDebugFlags |= GTF_DEBUG_NODE_MORPHED);
return op1;
}
return nullptr;
}
/*****************************************************************************
*
* Transform the given GTK_SMPOP tree for code generation.
*/
#ifdef _PREFAST_
#pragma warning(push)
#pragma warning(disable : 21000) // Suppress PREFast warning about overly large function
#endif
GenTree* Compiler::fgMorphSmpOp(GenTree* tree, MorphAddrContext* mac)
{
ALLOCA_CHECK();
assert(tree->OperKind() & GTK_SMPOP);
/* The steps in this function are :
o Perform required preorder processing
o Process the first, then second operand, if any
o Perform required postorder morphing
o Perform optional postorder morphing if optimizing
*/
bool isQmarkColon = false;
AssertionIndex origAssertionCount = DUMMY_INIT(0);
AssertionDsc* origAssertionTab = DUMMY_INIT(NULL);
AssertionIndex thenAssertionCount = DUMMY_INIT(0);
AssertionDsc* thenAssertionTab = DUMMY_INIT(NULL);
if (fgGlobalMorph)
{
tree = fgMorphForRegisterFP(tree);
}
genTreeOps oper = tree->OperGet();
var_types typ = tree->TypeGet();
GenTree* op1 = tree->AsOp()->gtOp1;
GenTree* op2 = tree->gtGetOp2IfPresent();
/*-------------------------------------------------------------------------
* First do any PRE-ORDER processing
*/
switch (oper)
{
// Some arithmetic operators need to use a helper call to the EE
int helper;
case GT_ASG:
tree = fgDoNormalizeOnStore(tree);
/* fgDoNormalizeOnStore can change op2 */
noway_assert(op1 == tree->AsOp()->gtOp1);
op2 = tree->AsOp()->gtOp2;
#ifdef FEATURE_SIMD
if (IsBaselineSimdIsaSupported())
{
// We should check whether op2 should be assigned to a SIMD field or not.
// If it is, we should tranlate the tree to simd intrinsic.
assert(!fgGlobalMorph || ((tree->gtDebugFlags & GTF_DEBUG_NODE_MORPHED) == 0));
GenTree* newTree = fgMorphFieldAssignToSimdSetElement(tree);
typ = tree->TypeGet();
op1 = tree->gtGetOp1();
op2 = tree->gtGetOp2();
#ifdef DEBUG
assert((tree == newTree) && (tree->OperGet() == oper));
if ((tree->gtDebugFlags & GTF_DEBUG_NODE_MORPHED) != 0)
{
tree->gtDebugFlags &= ~GTF_DEBUG_NODE_MORPHED;
}
#endif // DEBUG
}
#endif
// We can't CSE the LHS of an assignment. Only r-values can be CSEed.
// Previously, the "lhs" (addr) of a block op was CSE'd. So, to duplicate the former
// behavior, allow CSE'ing if is a struct type (or a TYP_REF transformed from a struct type)
// TODO-1stClassStructs: improve this.
if (op1->IsLocal() || (op1->TypeGet() != TYP_STRUCT))
{
op1->gtFlags |= GTF_DONT_CSE;
}
break;
case GT_ADDR:
/* op1 of a GT_ADDR is an l-value. Only r-values can be CSEed */
op1->gtFlags |= GTF_DONT_CSE;
break;
case GT_QMARK:
case GT_JTRUE:
noway_assert(op1);
if (op1->OperIsCompare())
{
/* Mark the comparison node with GTF_RELOP_JMP_USED so it knows that it does
not need to materialize the result as a 0 or 1. */
/* We also mark it as DONT_CSE, as we don't handle QMARKs with nonRELOP op1s */
op1->gtFlags |= (GTF_RELOP_JMP_USED | GTF_DONT_CSE);
// Request that the codegen for op1 sets the condition flags
// when it generates the code for op1.
//
// Codegen for op1 must set the condition flags if
// this method returns true.
//
op1->gtRequestSetFlags();
}
else
{
GenTree* effOp1 = op1->gtEffectiveVal();
noway_assert((effOp1->gtOper == GT_CNS_INT) &&
(effOp1->IsIntegralConst(0) || effOp1->IsIntegralConst(1)));
}
break;
case GT_COLON:
if (optLocalAssertionProp)
{
isQmarkColon = true;
}
break;
case GT_FIELD:
return fgMorphField(tree, mac);
case GT_INDEX:
return fgMorphArrayIndex(tree);
case GT_CAST:
{
GenTree* morphedCast = fgMorphExpandCast(tree->AsCast());
if (morphedCast != nullptr)
{
return morphedCast;
}
op1 = tree->AsCast()->CastOp();
}
break;
case GT_MUL:
noway_assert(op2 != nullptr);
if (opts.OptimizationEnabled() && !optValnumCSE_phase && !tree->gtOverflow())
{
// MUL(NEG(a), C) => MUL(a, NEG(C))
if (op1->OperIs(GT_NEG) && !op1->gtGetOp1()->IsCnsIntOrI() && op2->IsCnsIntOrI() &&
!op2->IsIconHandle())
{
GenTree* newOp1 = op1->gtGetOp1();
GenTree* newConst = gtNewIconNode(-op2->AsIntCon()->IconValue(), op2->TypeGet());
DEBUG_DESTROY_NODE(op1);
DEBUG_DESTROY_NODE(op2);
tree->AsOp()->gtOp1 = newOp1;
tree->AsOp()->gtOp2 = newConst;
return fgMorphSmpOp(tree, mac);
}
}
#ifndef TARGET_64BIT
if (typ == TYP_LONG)
{
// For (long)int1 * (long)int2, we dont actually do the
// casts, and just multiply the 32 bit values, which will
// give us the 64 bit result in edx:eax.
if (tree->Is64RsltMul())
{
// We are seeing this node again.
// Morph only the children of casts,
// so as to avoid losing them.
tree = fgMorphLongMul(tree->AsOp());
goto DONE_MORPHING_CHILDREN;
}
tree = fgRecognizeAndMorphLongMul(tree->AsOp());
op1 = tree->AsOp()->gtGetOp1();
op2 = tree->AsOp()->gtGetOp2();
if (tree->Is64RsltMul())
{
goto DONE_MORPHING_CHILDREN;
}
else
{
if (tree->gtOverflow())
helper = tree->IsUnsigned() ? CORINFO_HELP_ULMUL_OVF : CORINFO_HELP_LMUL_OVF;
else
helper = CORINFO_HELP_LMUL;
goto USE_HELPER_FOR_ARITH;
}
}
#endif // !TARGET_64BIT
break;
case GT_ARR_LENGTH:
if (op1->OperIs(GT_CNS_STR))
{
// Optimize `ldstr + String::get_Length()` to CNS_INT
// e.g. "Hello".Length => 5
GenTreeIntCon* iconNode = gtNewStringLiteralLength(op1->AsStrCon());
if (iconNode != nullptr)
{
INDEBUG(iconNode->gtDebugFlags |= GTF_DEBUG_NODE_MORPHED);
return iconNode;
}
}
break;
case GT_DIV:
// Replace "val / dcon" with "val * (1.0 / dcon)" if dcon is a power of two.
// Powers of two within range are always exactly represented,
// so multiplication by the reciprocal is safe in this scenario
if (fgGlobalMorph && op2->IsCnsFltOrDbl())
{
double divisor = op2->AsDblCon()->gtDconVal;
if (((typ == TYP_DOUBLE) && FloatingPointUtils::hasPreciseReciprocal(divisor)) ||
((typ == TYP_FLOAT) && FloatingPointUtils::hasPreciseReciprocal(forceCastToFloat(divisor))))
{
oper = GT_MUL;
tree->ChangeOper(oper);
op2->AsDblCon()->gtDconVal = 1.0 / divisor;
}
}
// Convert DIV to UDIV if boths op1 and op2 are known to be never negative
if (!gtIsActiveCSE_Candidate(tree) && varTypeIsIntegral(tree) && op1->IsNeverNegative(this) &&
op2->IsNeverNegative(this))
{
assert(tree->OperIs(GT_DIV));
tree->ChangeOper(GT_UDIV, GenTree::PRESERVE_VN);
return fgMorphSmpOp(tree, mac);
}
#ifndef TARGET_64BIT
if (typ == TYP_LONG)
{
helper = CORINFO_HELP_LDIV;
goto USE_HELPER_FOR_ARITH;
}
#if USE_HELPERS_FOR_INT_DIV
if (typ == TYP_INT)
{
helper = CORINFO_HELP_DIV;
goto USE_HELPER_FOR_ARITH;
}
#endif
#endif // !TARGET_64BIT
break;
case GT_UDIV:
#ifndef TARGET_64BIT
if (typ == TYP_LONG)
{
helper = CORINFO_HELP_ULDIV;
goto USE_HELPER_FOR_ARITH;
}
#if USE_HELPERS_FOR_INT_DIV
if (typ == TYP_INT)
{
helper = CORINFO_HELP_UDIV;
goto USE_HELPER_FOR_ARITH;
}
#endif
#endif // TARGET_64BIT
break;
case GT_MOD:
if (varTypeIsFloating(typ))
{
helper = CORINFO_HELP_DBLREM;
noway_assert(op2);
if (op1->TypeGet() == TYP_FLOAT)
{
if (op2->TypeGet() == TYP_FLOAT)
{
helper = CORINFO_HELP_FLTREM;
}
else
{
tree->AsOp()->gtOp1 = op1 = gtNewCastNode(TYP_DOUBLE, op1, false, TYP_DOUBLE);
}
}
else if (op2->TypeGet() == TYP_FLOAT)
{
tree->AsOp()->gtOp2 = op2 = gtNewCastNode(TYP_DOUBLE, op2, false, TYP_DOUBLE);
}
goto USE_HELPER_FOR_ARITH;
}
// Convert MOD to UMOD if boths op1 and op2 are known to be never negative
if (!gtIsActiveCSE_Candidate(tree) && varTypeIsIntegral(tree) && op1->IsNeverNegative(this) &&
op2->IsNeverNegative(this))
{
assert(tree->OperIs(GT_MOD));
tree->ChangeOper(GT_UMOD, GenTree::PRESERVE_VN);
return fgMorphSmpOp(tree, mac);
}
// Do not use optimizations (unlike UMOD's idiv optimizing during codegen) for signed mod.
// A similar optimization for signed mod will not work for a negative perfectly divisible
// HI-word. To make it correct, we would need to divide without the sign and then flip the
// result sign after mod. This requires 18 opcodes + flow making it not worthy to inline.
goto ASSIGN_HELPER_FOR_MOD;
case GT_UMOD:
#ifdef TARGET_ARMARCH
//
// Note for TARGET_ARMARCH we don't have a remainder instruction, so we don't do this optimization
//
#else // TARGET_XARCH
// If this is an unsigned long mod with a constant divisor,
// then don't morph to a helper call - it can be done faster inline using idiv.
noway_assert(op2);
if ((typ == TYP_LONG) && opts.OptEnabled(CLFLG_CONSTANTFOLD))
{
if (op2->OperIs(GT_CNS_NATIVELONG) && op2->AsIntConCommon()->LngValue() >= 2 &&
op2->AsIntConCommon()->LngValue() <= 0x3fffffff)
{
tree->AsOp()->gtOp1 = op1 = fgMorphTree(op1);
noway_assert(op1->TypeIs(TYP_LONG));
// Update flags for op1 morph.
tree->gtFlags &= ~GTF_ALL_EFFECT;
// Only update with op1 as op2 is a constant.
tree->gtFlags |= (op1->gtFlags & GTF_ALL_EFFECT);
// If op1 is a constant, then do constant folding of the division operator.
if (op1->OperIs(GT_CNS_NATIVELONG))
{
tree = gtFoldExpr(tree);
}
if (!tree->OperIsConst())
{
tree->AsOp()->CheckDivideByConstOptimized(this);
}
return tree;
}
}
#endif // TARGET_XARCH
ASSIGN_HELPER_FOR_MOD:
// For "val % 1", return 0 if op1 doesn't have any side effects
// and we are not in the CSE phase, we cannot discard 'tree'
// because it may contain CSE expressions that we haven't yet examined.
//
if (((op1->gtFlags & GTF_SIDE_EFFECT) == 0) && !optValnumCSE_phase)
{
if (op2->IsIntegralConst(1))
{
GenTree* zeroNode = gtNewZeroConNode(typ);
#ifdef DEBUG
zeroNode->gtDebugFlags |= GTF_DEBUG_NODE_MORPHED;
#endif
DEBUG_DESTROY_NODE(tree);
return zeroNode;
}
}
#ifndef TARGET_64BIT
if (typ == TYP_LONG)
{
helper = (oper == GT_UMOD) ? CORINFO_HELP_ULMOD : CORINFO_HELP_LMOD;
goto USE_HELPER_FOR_ARITH;
}
#if USE_HELPERS_FOR_INT_DIV
if (typ == TYP_INT)
{
if (oper == GT_UMOD)
{
helper = CORINFO_HELP_UMOD;
goto USE_HELPER_FOR_ARITH;
}
else if (oper == GT_MOD)
{
helper = CORINFO_HELP_MOD;
goto USE_HELPER_FOR_ARITH;
}
}
#endif
#endif // !TARGET_64BIT
if (!optValnumCSE_phase)
{
#ifdef TARGET_ARM64
if (tree->OperIs(GT_UMOD) && op2->IsIntegralConstUnsignedPow2())
{
// Transformation: a % b = a & (b - 1);
tree = fgMorphUModToAndSub(tree->AsOp());
op1 = tree->AsOp()->gtOp1;
op2 = tree->AsOp()->gtOp2;
}
// ARM64 architecture manual suggests this transformation
// for the mod operator.
else
#else
// XARCH only applies this transformation if we know
// that magic division will be used - which is determined
// when 'b' is not a power of 2 constant and mod operator is signed.
// Lowering for XARCH does this optimization already,
// but is also done here to take advantage of CSE.
if (tree->OperIs(GT_MOD) && op2->IsIntegralConst() && !op2->IsIntegralConstAbsPow2())
#endif
{
// Transformation: a % b = a - (a / b) * b;
tree = fgMorphModToSubMulDiv(tree->AsOp());
op1 = tree->AsOp()->gtOp1;
op2 = tree->AsOp()->gtOp2;
}
}
break;
USE_HELPER_FOR_ARITH:
{
// TODO: this comment is wrong now, do an appropriate fix.
/* We have to morph these arithmetic operations into helper calls
before morphing the arguments (preorder), else the arguments
won't get correct values of fgPtrArgCntCur.
However, try to fold the tree first in case we end up with a
simple node which won't need a helper call at all */
noway_assert(tree->OperIsBinary());
GenTree* oldTree = tree;
tree = gtFoldExpr(tree);
// Were we able to fold it ?
// Note that gtFoldExpr may return a non-leaf even if successful
// e.g. for something like "expr / 1" - see also bug #290853
if (tree->OperIsLeaf() || (oldTree != tree))
{
return (oldTree != tree) ? fgMorphTree(tree) : fgMorphLeaf(tree);
}
// Did we fold it into a comma node with throw?
if (tree->gtOper == GT_COMMA)
{
noway_assert(fgIsCommaThrow(tree));
return fgMorphTree(tree);
}
}
return fgMorphIntoHelperCall(tree, helper, gtNewCallArgs(op1, op2));
case GT_RETURN:
if (!tree->TypeIs(TYP_VOID))
{
if (op1->OperIs(GT_OBJ, GT_BLK, GT_IND))
{
op1 = fgMorphRetInd(tree->AsUnOp());
}
if (op1->OperIs(GT_LCL_VAR))
{
// With a `genReturnBB` this `RETURN(src)` tree will be replaced by a `ASG(genReturnLocal, src)`
// and `ASG` will be tranformed into field by field copy without parent local referencing if
// possible.
GenTreeLclVar* lclVar = op1->AsLclVar();
unsigned lclNum = lclVar->GetLclNum();
if ((genReturnLocal == BAD_VAR_NUM) || (genReturnLocal == lclNum))
{
LclVarDsc* varDsc = lvaGetDesc(lclVar);
if (varDsc->CanBeReplacedWithItsField(this))
{
// We can replace the struct with its only field and allow copy propagation to replace
// return value that was written as a field.
unsigned fieldLclNum = varDsc->lvFieldLclStart;
LclVarDsc* fieldDsc = lvaGetDesc(fieldLclNum);
JITDUMP("Replacing an independently promoted local var V%02u with its only field "
"V%02u for "
"the return [%06u]\n",
lclVar->GetLclNum(), fieldLclNum, dspTreeID(tree));
lclVar->SetLclNum(fieldLclNum);
lclVar->ChangeType(fieldDsc->lvType);
}
}
}
}
// normalize small integer return values
if (fgGlobalMorph && varTypeIsSmall(info.compRetType) && (op1 != nullptr) && !op1->TypeIs(TYP_VOID) &&
fgCastNeeded(op1, info.compRetType))
{
// Small-typed return values are normalized by the callee
op1 = gtNewCastNode(TYP_INT, op1, false, info.compRetType);
// Propagate GTF_COLON_COND
op1->gtFlags |= (tree->gtFlags & GTF_COLON_COND);
tree->AsOp()->gtOp1 = fgMorphTree(op1);
// Propagate side effect flags
tree->SetAllEffectsFlags(tree->AsOp()->gtGetOp1());
return tree;
}
break;
case GT_EQ:
case GT_NE:
{
GenTree* optimizedTree = gtFoldTypeCompare(tree);
if (optimizedTree != tree)
{
return fgMorphTree(optimizedTree);
}
// Pattern-matching optimization:
// (a % c) ==/!= 0
// for power-of-2 constant `c`
// =>
// a & (c - 1) ==/!= 0
// For integer `a`, even if negative.
if (opts.OptimizationEnabled() && !optValnumCSE_phase)
{
assert(tree->OperIs(GT_EQ, GT_NE));
if (op1->OperIs(GT_MOD) && varTypeIsIntegral(op1) && op2->IsIntegralConst(0))
{
GenTree* op1op2 = op1->AsOp()->gtOp2;
if (op1op2->IsCnsIntOrI())
{
const ssize_t modValue = op1op2->AsIntCon()->IconValue();
if (isPow2(modValue))
{
JITDUMP("\nTransforming:\n");
DISPTREE(tree);
op1->SetOper(GT_AND); // Change % => &
op1op2->AsIntConCommon()->SetIconValue(modValue - 1); // Change c => c - 1
fgUpdateConstTreeValueNumber(op1op2);
JITDUMP("\ninto:\n");
DISPTREE(tree);
}
}
}
}
}
FALLTHROUGH;
case GT_GT:
{
// Try and optimize nullable boxes feeding compares
GenTree* optimizedTree = gtFoldBoxNullable(tree);
if (optimizedTree->OperGet() != tree->OperGet())
{
return optimizedTree;
}
else
{
tree = optimizedTree;
}
op1 = tree->AsOp()->gtOp1;
op2 = tree->gtGetOp2IfPresent();
break;
}
case GT_RUNTIMELOOKUP:
return fgMorphTree(op1);
#ifdef TARGET_ARM
case GT_INTRINSIC:
if (tree->AsIntrinsic()->gtIntrinsicName == NI_System_Math_Round)
{
switch (tree->TypeGet())
{
case TYP_DOUBLE:
return fgMorphIntoHelperCall(tree, CORINFO_HELP_DBLROUND, gtNewCallArgs(op1));
case TYP_FLOAT:
return fgMorphIntoHelperCall(tree, CORINFO_HELP_FLTROUND, gtNewCallArgs(op1));
default:
unreached();
}
}
break;
#endif
case GT_PUTARG_TYPE:
return fgMorphTree(tree->AsUnOp()->gtGetOp1());
case GT_NULLCHECK:
{
op1 = tree->AsUnOp()->gtGetOp1();
if (op1->IsCall())
{
GenTreeCall* const call = op1->AsCall();
if (call->IsHelperCall() && s_helperCallProperties.NonNullReturn(eeGetHelperNum(call->gtCallMethHnd)))
{
JITDUMP("\nNULLCHECK on [%06u] will always succeed\n", dspTreeID(call));
// TODO: Can we also remove the call?
//
return fgMorphTree(call);
}
}
}
break;
default:
break;
}
if (opts.OptimizationEnabled() && fgGlobalMorph)
{
GenTree* morphed = fgMorphReduceAddOps(tree);
if (morphed != tree)
return fgMorphTree(morphed);
}
/*-------------------------------------------------------------------------
* Process the first operand, if any
*/
if (op1)
{
// If we are entering the "then" part of a Qmark-Colon we must
// save the state of the current copy assignment table
// so that we can restore this state when entering the "else" part
if (isQmarkColon)
{
noway_assert(optLocalAssertionProp);
if (optAssertionCount)
{
noway_assert(optAssertionCount <= optMaxAssertionCount); // else ALLOCA() is a bad idea
unsigned tabSize = optAssertionCount * sizeof(AssertionDsc);
origAssertionTab = (AssertionDsc*)ALLOCA(tabSize);
origAssertionCount = optAssertionCount;
memcpy(origAssertionTab, optAssertionTabPrivate, tabSize);
}
else
{
origAssertionCount = 0;
origAssertionTab = nullptr;
}
}
// We might need a new MorphAddressContext context. (These are used to convey
// parent context about how addresses being calculated will be used; see the
// specification comment for MorphAddrContext for full details.)
// Assume it's an Ind context to start.
MorphAddrContext subIndMac1(MACK_Ind);
MorphAddrContext* subMac1 = mac;
if (subMac1 == nullptr || subMac1->m_kind == MACK_Ind)
{
switch (tree->gtOper)
{
case GT_ADDR:
// A non-null mac here implies this node is part of an address computation.
// If so, we need to pass the existing mac down to the child node.
//
// Otherwise, use a new mac.
if (subMac1 == nullptr)
{
subMac1 = &subIndMac1;
subMac1->m_kind = MACK_Addr;
}
break;
case GT_COMMA:
// In a comma, the incoming context only applies to the rightmost arg of the
// comma list. The left arg (op1) gets a fresh context.
subMac1 = nullptr;
break;
case GT_OBJ:
case GT_BLK:
case GT_IND:
// A non-null mac here implies this node is part of an address computation (the tree parent is
// GT_ADDR).
// If so, we need to pass the existing mac down to the child node.
//
// Otherwise, use a new mac.
if (subMac1 == nullptr)
{
subMac1 = &subIndMac1;
}
break;
default:
break;
}
}
// For additions, if we're in an IND context keep track of whether
// all offsets added to the address are constant, and their sum.
if (tree->gtOper == GT_ADD && subMac1 != nullptr)
{
assert(subMac1->m_kind == MACK_Ind || subMac1->m_kind == MACK_Addr); // Can't be a CopyBlock.
GenTree* otherOp = tree->AsOp()->gtOp2;
// Is the other operator a constant?
if (otherOp->IsCnsIntOrI())
{
ClrSafeInt<size_t> totalOffset(subMac1->m_totalOffset);
totalOffset += otherOp->AsIntConCommon()->IconValue();
if (totalOffset.IsOverflow())
{
// We will consider an offset so large as to overflow as "not a constant" --
// we will do a null check.
subMac1->m_allConstantOffsets = false;
}
else
{
subMac1->m_totalOffset += otherOp->AsIntConCommon()->IconValue();
}
}
else
{
subMac1->m_allConstantOffsets = false;
}
}
// If op1 is a GT_FIELD or indir, we need to pass down the mac if
// its parent is GT_ADDR, since the address of op1
// is part of an ongoing address computation. Otherwise
// op1 represents the value of the field and so any address
// calculations it does are in a new context.
if (((op1->gtOper == GT_FIELD) || op1->OperIsIndir()) && (tree->gtOper != GT_ADDR))
{
subMac1 = nullptr;
// The impact of op1's value to any ongoing
// address computation is handled below when looking
// at op2.
}
tree->AsOp()->gtOp1 = op1 = fgMorphTree(op1, subMac1);
// If we are exiting the "then" part of a Qmark-Colon we must
// save the state of the current copy assignment table
// so that we can merge this state with the "else" part exit
if (isQmarkColon)
{
noway_assert(optLocalAssertionProp);
if (optAssertionCount)
{
noway_assert(optAssertionCount <= optMaxAssertionCount); // else ALLOCA() is a bad idea
unsigned tabSize = optAssertionCount * sizeof(AssertionDsc);
thenAssertionTab = (AssertionDsc*)ALLOCA(tabSize);
thenAssertionCount = optAssertionCount;
memcpy(thenAssertionTab, optAssertionTabPrivate, tabSize);
}
else
{
thenAssertionCount = 0;
thenAssertionTab = nullptr;
}
}
/* Morphing along with folding and inlining may have changed the
* side effect flags, so we have to reset them
*
* NOTE: Don't reset the exception flags on nodes that may throw */
assert(tree->gtOper != GT_CALL);
if (!tree->OperRequiresCallFlag(this))
{
tree->gtFlags &= ~GTF_CALL;
}
/* Propagate the new flags */
tree->gtFlags |= (op1->gtFlags & GTF_ALL_EFFECT);
// &aliasedVar doesn't need GTF_GLOB_REF, though alisasedVar does
// Similarly for clsVar
if (oper == GT_ADDR && (op1->gtOper == GT_LCL_VAR || op1->gtOper == GT_CLS_VAR))
{
tree->gtFlags &= ~GTF_GLOB_REF;
}
} // if (op1)
/*-------------------------------------------------------------------------
* Process the second operand, if any
*/
if (op2)
{
// If we are entering the "else" part of a Qmark-Colon we must
// reset the state of the current copy assignment table
if (isQmarkColon)
{
noway_assert(optLocalAssertionProp);
optAssertionReset(0);
if (origAssertionCount)
{
size_t tabSize = origAssertionCount * sizeof(AssertionDsc);
memcpy(optAssertionTabPrivate, origAssertionTab, tabSize);
optAssertionReset(origAssertionCount);
}
}
// We might need a new MorphAddressContext context to use in evaluating op2.
// (These are used to convey parent context about how addresses being calculated
// will be used; see the specification comment for MorphAddrContext for full details.)
// Assume it's an Ind context to start.
switch (tree->gtOper)
{
case GT_ADD:
if (mac != nullptr && mac->m_kind == MACK_Ind)
{
GenTree* otherOp = tree->AsOp()->gtOp1;
// Is the other operator a constant?
if (otherOp->IsCnsIntOrI())
{
mac->m_totalOffset += otherOp->AsIntConCommon()->IconValue();
}
else
{
mac->m_allConstantOffsets = false;
}
}
break;
default:
break;
}
// If op2 is a GT_FIELD or indir, we must be taking its value,
// so it should evaluate its address in a new context.
if ((op2->gtOper == GT_FIELD) || op2->OperIsIndir())
{
// The impact of op2's value to any ongoing
// address computation is handled above when looking
// at op1.
mac = nullptr;
}
tree->AsOp()->gtOp2 = op2 = fgMorphTree(op2, mac);
/* Propagate the side effect flags from op2 */
tree->gtFlags |= (op2->gtFlags & GTF_ALL_EFFECT);
// If we are exiting the "else" part of a Qmark-Colon we must
// merge the state of the current copy assignment table with
// that of the exit of the "then" part.
if (isQmarkColon)
{
noway_assert(optLocalAssertionProp);
// If either exit table has zero entries then
// the merged table also has zero entries
if (optAssertionCount == 0 || thenAssertionCount == 0)
{
optAssertionReset(0);
}
else
{
size_t tabSize = optAssertionCount * sizeof(AssertionDsc);
if ((optAssertionCount != thenAssertionCount) ||
(memcmp(thenAssertionTab, optAssertionTabPrivate, tabSize) != 0))
{
// Yes they are different so we have to find the merged set
// Iterate over the copy asgn table removing any entries
// that do not have an exact match in the thenAssertionTab
AssertionIndex index = 1;
while (index <= optAssertionCount)
{
AssertionDsc* curAssertion = optGetAssertion(index);
for (unsigned j = 0; j < thenAssertionCount; j++)
{
AssertionDsc* thenAssertion = &thenAssertionTab[j];
// Do the left sides match?
if ((curAssertion->op1.lcl.lclNum == thenAssertion->op1.lcl.lclNum) &&
(curAssertion->assertionKind == thenAssertion->assertionKind))
{
// Do the right sides match?
if ((curAssertion->op2.kind == thenAssertion->op2.kind) &&
(curAssertion->op2.lconVal == thenAssertion->op2.lconVal))
{
goto KEEP;
}
else
{
goto REMOVE;
}
}
}
//
// If we fall out of the loop above then we didn't find
// any matching entry in the thenAssertionTab so it must
// have been killed on that path so we remove it here
//
REMOVE:
// The data at optAssertionTabPrivate[i] is to be removed
CLANG_FORMAT_COMMENT_ANCHOR;
#ifdef DEBUG
if (verbose)
{
printf("The QMARK-COLON ");
printTreeID(tree);
printf(" removes assertion candidate #%d\n", index);
}
#endif
optAssertionRemove(index);
continue;
KEEP:
// The data at optAssertionTabPrivate[i] is to be kept
index++;
}
}
}
}
} // if (op2)
#ifndef TARGET_64BIT
DONE_MORPHING_CHILDREN:
#endif // !TARGET_64BIT
if (tree->OperIsIndirOrArrLength())
{
tree->SetIndirExceptionFlags(this);
}
else
{
if (tree->OperMayThrow(this))
{
// Mark the tree node as potentially throwing an exception
tree->gtFlags |= GTF_EXCEPT;
}
else
{
if (((op1 == nullptr) || ((op1->gtFlags & GTF_EXCEPT) == 0)) &&
((op2 == nullptr) || ((op2->gtFlags & GTF_EXCEPT) == 0)))
{
tree->gtFlags &= ~GTF_EXCEPT;
}
}
}
if (tree->OperRequiresAsgFlag())
{
tree->gtFlags |= GTF_ASG;
}
else
{
if (((op1 == nullptr) || ((op1->gtFlags & GTF_ASG) == 0)) &&
((op2 == nullptr) || ((op2->gtFlags & GTF_ASG) == 0)))
{
tree->gtFlags &= ~GTF_ASG;
}
}
if (tree->OperRequiresCallFlag(this))
{
tree->gtFlags |= GTF_CALL;
}
else
{
if (((op1 == nullptr) || ((op1->gtFlags & GTF_CALL) == 0)) &&
((op2 == nullptr) || ((op2->gtFlags & GTF_CALL) == 0)))
{
tree->gtFlags &= ~GTF_CALL;
}
}
/*-------------------------------------------------------------------------
* Now do POST-ORDER processing
*/
if (varTypeIsGC(tree->TypeGet()) && (op1 && !varTypeIsGC(op1->TypeGet())) && (op2 && !varTypeIsGC(op2->TypeGet())))
{
// The tree is really not GC but was marked as such. Now that the
// children have been unmarked, unmark the tree too.
// Remember that GT_COMMA inherits it's type only from op2
if (tree->gtOper == GT_COMMA)
{
tree->gtType = genActualType(op2->TypeGet());
}
else
{
tree->gtType = genActualType(op1->TypeGet());
}
}
GenTree* oldTree = tree;
GenTree* qmarkOp1 = nullptr;
GenTree* qmarkOp2 = nullptr;
if ((tree->OperGet() == GT_QMARK) && (tree->AsOp()->gtOp2->OperGet() == GT_COLON))
{
qmarkOp1 = oldTree->AsOp()->gtOp2->AsOp()->gtOp1;
qmarkOp2 = oldTree->AsOp()->gtOp2->AsOp()->gtOp2;
}
// Try to fold it, maybe we get lucky,
tree = gtFoldExpr(tree);
if (oldTree != tree)
{
/* if gtFoldExpr returned op1 or op2 then we are done */
if ((tree == op1) || (tree == op2) || (tree == qmarkOp1) || (tree == qmarkOp2))
{
return tree;
}
/* If we created a comma-throw tree then we need to morph op1 */
if (fgIsCommaThrow(tree))
{
tree->AsOp()->gtOp1 = fgMorphTree(tree->AsOp()->gtOp1);
fgMorphTreeDone(tree);
return tree;
}
return tree;
}
else if (tree->OperIsConst())
{
return tree;
}
/* gtFoldExpr could have used setOper to change the oper */
oper = tree->OperGet();
typ = tree->TypeGet();
/* gtFoldExpr could have changed op1 and op2 */
op1 = tree->AsOp()->gtOp1;
op2 = tree->gtGetOp2IfPresent();
// Do we have an integer compare operation?
//
if (tree->OperIsCompare() && varTypeIsIntegralOrI(tree->TypeGet()))
{
// Are we comparing against zero?
//
if (op2->IsIntegralConst(0))
{
// Request that the codegen for op1 sets the condition flags
// when it generates the code for op1.
//
// Codegen for op1 must set the condition flags if
// this method returns true.
//
op1->gtRequestSetFlags();
}
}
/*-------------------------------------------------------------------------
* Perform the required oper-specific postorder morphing
*/
GenTree* temp;
size_t ival1;
GenTree* lclVarTree;
GenTree* effectiveOp1;
FieldSeqNode* fieldSeq = nullptr;
switch (oper)
{
case GT_ASG:
if (op1->OperIs(GT_LCL_VAR) && ((op1->gtFlags & GTF_VAR_FOLDED_IND) != 0))
{
op1->gtFlags &= ~GTF_VAR_FOLDED_IND;
tree = fgDoNormalizeOnStore(tree);
op2 = tree->gtGetOp2();
}
lclVarTree = fgIsIndirOfAddrOfLocal(op1);
if (lclVarTree != nullptr)
{
lclVarTree->gtFlags |= GTF_VAR_DEF;
}
effectiveOp1 = op1->gtEffectiveVal();
// If we are storing a small type, we might be able to omit a cast.
if (effectiveOp1->OperIs(GT_IND, GT_CLS_VAR) && varTypeIsSmall(effectiveOp1))
{
if (!gtIsActiveCSE_Candidate(op2) && op2->OperIs(GT_CAST) &&
varTypeIsIntegral(op2->AsCast()->CastOp()) && !op2->gtOverflow())
{
var_types castType = op2->CastToType();
// If we are performing a narrowing cast and
// castType is larger or the same as op1's type
// then we can discard the cast.
if (varTypeIsSmall(castType) && (genTypeSize(castType) >= genTypeSize(effectiveOp1)))
{
tree->AsOp()->gtOp2 = op2 = op2->AsCast()->CastOp();
}
}
}
fgAssignSetVarDef(tree);
/* We can't CSE the LHS of an assignment */
/* We also must set in the pre-morphing phase, otherwise assertionProp doesn't see it */
if (op1->IsLocal() || (op1->TypeGet() != TYP_STRUCT))
{
op1->gtFlags |= GTF_DONT_CSE;
}
break;
case GT_CAST:
tree = fgOptimizeCast(tree->AsCast());
if (!tree->OperIsSimple())
{
return tree;
}
if (tree->OperIs(GT_CAST) && tree->gtOverflow())
{
fgAddCodeRef(compCurBB, bbThrowIndex(compCurBB), SCK_OVERFLOW);
}
typ = tree->TypeGet();
oper = tree->OperGet();
op1 = tree->AsOp()->gtGetOp1();
op2 = tree->gtGetOp2IfPresent();
break;
case GT_EQ:
case GT_NE:
// It is not safe to reorder/delete CSE's
if (!optValnumCSE_phase && op2->IsIntegralConst())
{
tree = fgOptimizeEqualityComparisonWithConst(tree->AsOp());
assert(tree->OperIsCompare());
oper = tree->OperGet();
op1 = tree->gtGetOp1();
op2 = tree->gtGetOp2();
}
goto COMPARE;
case GT_LT:
case GT_LE:
case GT_GE:
case GT_GT:
if (!optValnumCSE_phase && (op1->OperIs(GT_CAST) || op2->OperIs(GT_CAST)))
{
tree = fgOptimizeRelationalComparisonWithCasts(tree->AsOp());
oper = tree->OperGet();
op1 = tree->gtGetOp1();
op2 = tree->gtGetOp2();
}
// op2's value may be changed, so it cannot be a CSE candidate.
if (op2->IsIntegralConst() && !gtIsActiveCSE_Candidate(op2))
{
tree = fgOptimizeRelationalComparisonWithConst(tree->AsOp());
oper = tree->OperGet();
assert(op1 == tree->AsOp()->gtGetOp1());
assert(op2 == tree->AsOp()->gtGetOp2());
}
COMPARE:
noway_assert(tree->OperIsCompare());
break;
case GT_MUL:
#ifndef TARGET_64BIT
if (typ == TYP_LONG)
{
// This must be GTF_MUL_64RSLT
INDEBUG(tree->AsOp()->DebugCheckLongMul());
return tree;
}
#endif // TARGET_64BIT
goto CM_OVF_OP;
case GT_SUB:
if (tree->gtOverflow())
{
goto CM_OVF_OP;
}
// TODO #4104: there are a lot of other places where
// this condition is not checked before transformations.
if (fgGlobalMorph)
{
/* Check for "op1 - cns2" , we change it to "op1 + (-cns2)" */
noway_assert(op2);
if (op2->IsCnsIntOrI() && !op2->IsIconHandle())
{
// Negate the constant and change the node to be "+",
// except when `op2` is a const byref.
op2->AsIntConCommon()->SetIconValue(-op2->AsIntConCommon()->IconValue());
op2->AsIntConRef().gtFieldSeq = FieldSeqStore::NotAField();
oper = GT_ADD;
tree->ChangeOper(oper);
goto CM_ADD_OP;
}
/* Check for "cns1 - op2" , we change it to "(cns1 + (-op2))" */
noway_assert(op1);
if (op1->IsCnsIntOrI())
{
noway_assert(varTypeIsIntOrI(tree));
// The type of the new GT_NEG node cannot just be op2->TypeGet().
// Otherwise we may sign-extend incorrectly in cases where the GT_NEG
// node ends up feeding directly into a cast, for example in
// GT_CAST<ubyte>(GT_SUB(0, s_1.ubyte))
tree->AsOp()->gtOp2 = op2 = gtNewOperNode(GT_NEG, genActualType(op2->TypeGet()), op2);
fgMorphTreeDone(op2);
oper = GT_ADD;
tree->ChangeOper(oper);
goto CM_ADD_OP;
}
/* No match - exit */
}
// Skip optimization if non-NEG operand is constant.
// Both op1 and op2 are not constant because it was already checked above.
if (opts.OptimizationEnabled() && fgGlobalMorph)
{
// a - -b = > a + b
// SUB(a, (NEG(b)) => ADD(a, b)
if (!op1->OperIs(GT_NEG) && op2->OperIs(GT_NEG))
{
// tree: SUB
// op1: a
// op2: NEG
// op2Child: b
GenTree* op2Child = op2->AsOp()->gtOp1; // b
oper = GT_ADD;
tree->SetOper(oper, GenTree::PRESERVE_VN);
tree->AsOp()->gtOp2 = op2Child;
DEBUG_DESTROY_NODE(op2);
op2 = op2Child;
}
// -a - -b = > b - a
// SUB(NEG(a), (NEG(b)) => SUB(b, a)
else if (op1->OperIs(GT_NEG) && op2->OperIs(GT_NEG) && gtCanSwapOrder(op1, op2))
{
// tree: SUB
// op1: NEG
// op1Child: a
// op2: NEG
// op2Child: b
GenTree* op1Child = op1->AsOp()->gtOp1; // a
GenTree* op2Child = op2->AsOp()->gtOp1; // b
tree->AsOp()->gtOp1 = op2Child;
tree->AsOp()->gtOp2 = op1Child;
DEBUG_DESTROY_NODE(op1);
DEBUG_DESTROY_NODE(op2);
op1 = op2Child;
op2 = op1Child;
}
}
break;
#ifdef TARGET_ARM64
case GT_DIV:
if (!varTypeIsFloating(tree->gtType))
{
// Codegen for this instruction needs to be able to throw two exceptions:
fgAddCodeRef(compCurBB, bbThrowIndex(compCurBB), SCK_OVERFLOW);
fgAddCodeRef(compCurBB, bbThrowIndex(compCurBB), SCK_DIV_BY_ZERO);
}
break;
case GT_UDIV:
// Codegen for this instruction needs to be able to throw one exception:
fgAddCodeRef(compCurBB, bbThrowIndex(compCurBB), SCK_DIV_BY_ZERO);
break;
#endif
case GT_ADD:
CM_OVF_OP:
if (tree->gtOverflow())
{
tree->gtRequestSetFlags();
// Add the excptn-throwing basic block to jump to on overflow
fgAddCodeRef(compCurBB, bbThrowIndex(compCurBB), SCK_OVERFLOW);
// We can't do any commutative morphing for overflow instructions
break;
}
CM_ADD_OP:
FALLTHROUGH;
case GT_OR:
case GT_XOR:
case GT_AND:
tree = fgOptimizeCommutativeArithmetic(tree->AsOp());
if (!tree->OperIsSimple())
{
return tree;
}
typ = tree->TypeGet();
oper = tree->OperGet();
op1 = tree->gtGetOp1();
op2 = tree->gtGetOp2IfPresent();
break;
case GT_NOT:
case GT_NEG:
// Remove double negation/not.
// Note: this is not a safe tranformation if "tree" is a CSE candidate.
// Consider for example the following expression: NEG(NEG(OP)), where any
// NEG is a CSE candidate. Were we to morph this to just OP, CSE would fail to find
// the original NEG in the statement.
if (op1->OperIs(oper) && opts.OptimizationEnabled() && !gtIsActiveCSE_Candidate(tree) &&
!gtIsActiveCSE_Candidate(op1))
{
JITDUMP("Remove double negation/not\n")
GenTree* op1op1 = op1->gtGetOp1();
DEBUG_DESTROY_NODE(tree);
DEBUG_DESTROY_NODE(op1);
return op1op1;
}
// Distribute negation over simple multiplication/division expressions
if (opts.OptimizationEnabled() && !optValnumCSE_phase && tree->OperIs(GT_NEG) &&
op1->OperIs(GT_MUL, GT_DIV))
{
GenTreeOp* mulOrDiv = op1->AsOp();
GenTree* op1op1 = mulOrDiv->gtGetOp1();
GenTree* op1op2 = mulOrDiv->gtGetOp2();
if (!op1op1->IsCnsIntOrI() && op1op2->IsCnsIntOrI() && !op1op2->IsIconHandle())
{
// NEG(MUL(a, C)) => MUL(a, -C)
// NEG(DIV(a, C)) => DIV(a, -C), except when C = {-1, 1}
ssize_t constVal = op1op2->AsIntCon()->IconValue();
if ((mulOrDiv->OperIs(GT_DIV) && (constVal != -1) && (constVal != 1)) ||
(mulOrDiv->OperIs(GT_MUL) && !mulOrDiv->gtOverflow()))
{
GenTree* newOp1 = op1op1; // a
GenTree* newOp2 = gtNewIconNode(-constVal, op1op2->TypeGet()); // -C
mulOrDiv->gtOp1 = newOp1;
mulOrDiv->gtOp2 = newOp2;
mulOrDiv->SetVNsFromNode(tree);
DEBUG_DESTROY_NODE(tree);
DEBUG_DESTROY_NODE(op1op2);
return mulOrDiv;
}
}
}
/* Any constant cases should have been folded earlier */
noway_assert(!op1->OperIsConst() || !opts.OptEnabled(CLFLG_CONSTANTFOLD) || optValnumCSE_phase);
break;
case GT_CKFINITE:
noway_assert(varTypeIsFloating(op1->TypeGet()));
fgAddCodeRef(compCurBB, bbThrowIndex(compCurBB), SCK_ARITH_EXCPN);
break;
case GT_BOUNDS_CHECK:
fgSetRngChkTarget(tree);
break;
case GT_OBJ:
case GT_BLK:
case GT_IND:
{
// If we have IND(ADDR(X)) and X has GTF_GLOB_REF, we must set GTF_GLOB_REF on
// the OBJ. Note that the GTF_GLOB_REF will have been cleared on ADDR(X) where X
// is a local or CLS_VAR, even if it has been address-exposed.
if (op1->OperIs(GT_ADDR))
{
tree->gtFlags |= (op1->AsUnOp()->gtGetOp1()->gtFlags & GTF_GLOB_REF);
}
if (!tree->OperIs(GT_IND))
{
break;
}
// Can not remove a GT_IND if it is currently a CSE candidate.
if (gtIsActiveCSE_Candidate(tree))
{
break;
}
bool foldAndReturnTemp = false;
temp = nullptr;
ival1 = 0;
// Don't remove a volatile GT_IND, even if the address points to a local variable.
if ((tree->gtFlags & GTF_IND_VOLATILE) == 0)
{
/* Try to Fold *(&X) into X */
if (op1->gtOper == GT_ADDR)
{
// Can not remove a GT_ADDR if it is currently a CSE candidate.
if (gtIsActiveCSE_Candidate(op1))
{
break;
}
temp = op1->AsOp()->gtOp1; // X
// In the test below, if they're both TYP_STRUCT, this of course does *not* mean that
// they are the *same* struct type. In fact, they almost certainly aren't. If the
// address has an associated field sequence, that identifies this case; go through
// the "lcl_fld" path rather than this one.
FieldSeqNode* addrFieldSeq = nullptr; // This is an unused out parameter below.
if (typ == temp->TypeGet() && !GetZeroOffsetFieldMap()->Lookup(op1, &addrFieldSeq))
{
foldAndReturnTemp = true;
}
else if (temp->OperIsLocal())
{
unsigned lclNum = temp->AsLclVarCommon()->GetLclNum();
LclVarDsc* varDsc = lvaGetDesc(lclNum);
// We will try to optimize when we have a promoted struct promoted with a zero lvFldOffset
if (varDsc->lvPromoted && (varDsc->lvFldOffset == 0))
{
noway_assert(varTypeIsStruct(varDsc));
// We will try to optimize when we have a single field struct that is being struct promoted
if (varDsc->lvFieldCnt == 1)
{
unsigned lclNumFld = varDsc->lvFieldLclStart;
// just grab the promoted field
LclVarDsc* fieldVarDsc = lvaGetDesc(lclNumFld);
// Also make sure that the tree type matches the fieldVarType and that it's lvFldOffset
// is zero
if (fieldVarDsc->TypeGet() == typ && (fieldVarDsc->lvFldOffset == 0))
{
// We can just use the existing promoted field LclNum
temp->AsLclVarCommon()->SetLclNum(lclNumFld);
temp->gtType = fieldVarDsc->TypeGet();
foldAndReturnTemp = true;
}
}
}
// If the type of the IND (typ) is a "small int", and the type of the local has the
// same width, then we can reduce to just the local variable -- it will be
// correctly normalized.
//
// The below transformation cannot be applied if the local var needs to be normalized on load.
else if (varTypeIsSmall(typ) && (genTypeSize(varDsc) == genTypeSize(typ)) &&
!lvaTable[lclNum].lvNormalizeOnLoad())
{
const bool definitelyLoad = (tree->gtFlags & GTF_DONT_CSE) == 0;
const bool possiblyStore = !definitelyLoad;
if (possiblyStore || (varTypeIsUnsigned(varDsc) == varTypeIsUnsigned(typ)))
{
typ = temp->TypeGet();
tree->gtType = typ;
foldAndReturnTemp = true;
if (possiblyStore)
{
// This node can be on the left-hand-side of an assignment node.
// Mark this node with GTF_VAR_FOLDED_IND to make sure that fgDoNormalizeOnStore()
// is called on its parent in post-order morph.
temp->gtFlags |= GTF_VAR_FOLDED_IND;
}
}
}
// For matching types we can fold
else if (!varTypeIsStruct(typ) && (lvaTable[lclNum].lvType == typ) &&
!lvaTable[lclNum].lvNormalizeOnLoad())
{
tree->gtType = typ = temp->TypeGet();
foldAndReturnTemp = true;
}
else
{
// Assumes that when Lookup returns "false" it will leave "fieldSeq" unmodified (i.e.
// nullptr)
assert(fieldSeq == nullptr);
bool b = GetZeroOffsetFieldMap()->Lookup(op1, &fieldSeq);
assert(b || fieldSeq == nullptr);
if ((fieldSeq != nullptr) && (temp->OperGet() == GT_LCL_FLD))
{
// Append the field sequence, change the type.
temp->AsLclFld()->SetFieldSeq(
GetFieldSeqStore()->Append(temp->AsLclFld()->GetFieldSeq(), fieldSeq));
temp->gtType = typ;
foldAndReturnTemp = true;
}
}
// Otherwise will will fold this into a GT_LCL_FLD below
// where we check (temp != nullptr)
}
else // !temp->OperIsLocal()
{
// We don't try to fold away the GT_IND/GT_ADDR for this case
temp = nullptr;
}
}
else if (op1->OperGet() == GT_ADD)
{
#ifdef TARGET_ARM
// Check for a misalignment floating point indirection.
if (varTypeIsFloating(typ))
{
GenTree* addOp2 = op1->AsOp()->gtGetOp2();
if (addOp2->IsCnsIntOrI())
{
ssize_t offset = addOp2->AsIntCon()->gtIconVal;
if ((offset % emitTypeSize(TYP_FLOAT)) != 0)
{
tree->gtFlags |= GTF_IND_UNALIGNED;
}
}
}
#endif // TARGET_ARM
/* Try to change *(&lcl + cns) into lcl[cns] to prevent materialization of &lcl */
if (op1->AsOp()->gtOp1->OperGet() == GT_ADDR && op1->AsOp()->gtOp2->OperGet() == GT_CNS_INT &&
opts.OptimizationEnabled())
{
// No overflow arithmetic with pointers
noway_assert(!op1->gtOverflow());
temp = op1->AsOp()->gtOp1->AsOp()->gtOp1;
if (!temp->OperIsLocal())
{
temp = nullptr;
break;
}
// Can not remove the GT_ADDR if it is currently a CSE candidate.
if (gtIsActiveCSE_Candidate(op1->AsOp()->gtOp1))
{
break;
}
ival1 = op1->AsOp()->gtOp2->AsIntCon()->gtIconVal;
fieldSeq = op1->AsOp()->gtOp2->AsIntCon()->gtFieldSeq;
// Does the address have an associated zero-offset field sequence?
FieldSeqNode* addrFieldSeq = nullptr;
if (GetZeroOffsetFieldMap()->Lookup(op1->AsOp()->gtOp1, &addrFieldSeq))
{
fieldSeq = GetFieldSeqStore()->Append(addrFieldSeq, fieldSeq);
}
if (ival1 == 0 && typ == temp->TypeGet() && temp->TypeGet() != TYP_STRUCT)
{
noway_assert(!varTypeIsGC(temp->TypeGet()));
foldAndReturnTemp = true;
}
else
{
// The emitter can't handle large offsets
if (ival1 != (unsigned short)ival1)
{
break;
}
// The emitter can get confused by invalid offsets
if (ival1 >= Compiler::lvaLclSize(temp->AsLclVarCommon()->GetLclNum()))
{
break;
}
}
// Now we can fold this into a GT_LCL_FLD below
// where we check (temp != nullptr)
}
}
}
// At this point we may have a lclVar or lclFld that might be foldable with a bit of extra massaging:
// - We may have a load of a local where the load has a different type than the local
// - We may have a load of a local plus an offset
//
// In these cases, we will change the lclVar or lclFld into a lclFld of the appropriate type and
// offset if doing so is legal. The only cases in which this transformation is illegal are if the load
// begins before the local or if the load extends beyond the end of the local (i.e. if the load is
// out-of-bounds w.r.t. the local).
if ((temp != nullptr) && !foldAndReturnTemp)
{
assert(temp->OperIsLocal());
const unsigned lclNum = temp->AsLclVarCommon()->GetLclNum();
LclVarDsc* const varDsc = lvaGetDesc(lclNum);
const var_types tempTyp = temp->TypeGet();
const bool useExactSize = varTypeIsStruct(tempTyp) || (tempTyp == TYP_BLK) || (tempTyp == TYP_LCLBLK);
const unsigned varSize = useExactSize ? varDsc->lvExactSize : genTypeSize(temp);
// Make sure we do not enregister this lclVar.
lvaSetVarDoNotEnregister(lclNum DEBUGARG(DoNotEnregisterReason::LocalField));
// If the size of the load is greater than the size of the lclVar, we cannot fold this access into
// a lclFld: the access represented by an lclFld node must begin at or after the start of the
// lclVar and must not extend beyond the end of the lclVar.
if ((ival1 >= 0) && ((ival1 + genTypeSize(typ)) <= varSize))
{
GenTreeLclFld* lclFld;
// We will turn a GT_LCL_VAR into a GT_LCL_FLD with an gtLclOffs of 'ival'
// or if we already have a GT_LCL_FLD we will adjust the gtLclOffs by adding 'ival'
// Then we change the type of the GT_LCL_FLD to match the orginal GT_IND type.
//
if (temp->OperGet() == GT_LCL_FLD)
{
lclFld = temp->AsLclFld();
lclFld->SetLclOffs(lclFld->GetLclOffs() + static_cast<unsigned>(ival1));
lclFld->SetFieldSeq(GetFieldSeqStore()->Append(lclFld->GetFieldSeq(), fieldSeq));
}
else // We have a GT_LCL_VAR.
{
assert(temp->OperGet() == GT_LCL_VAR);
temp->ChangeOper(GT_LCL_FLD); // Note that this makes the gtFieldSeq "NotAField".
lclFld = temp->AsLclFld();
lclFld->SetLclOffs(static_cast<unsigned>(ival1));
if (fieldSeq != nullptr)
{
// If it does represent a field, note that.
lclFld->SetFieldSeq(fieldSeq);
}
}
temp->gtType = tree->gtType;
foldAndReturnTemp = true;
}
}
if (foldAndReturnTemp)
{
assert(temp != nullptr);
assert(temp->TypeGet() == typ);
assert((op1->OperGet() == GT_ADD) || (op1->OperGet() == GT_ADDR));
// Copy the value of GTF_DONT_CSE from the original tree to `temp`: it can be set for
// 'temp' because a GT_ADDR always marks it for its operand.
temp->gtFlags &= ~GTF_DONT_CSE;
temp->gtFlags |= (tree->gtFlags & GTF_DONT_CSE);
if (op1->OperGet() == GT_ADD)
{
DEBUG_DESTROY_NODE(op1->AsOp()->gtOp1); // GT_ADDR
DEBUG_DESTROY_NODE(op1->AsOp()->gtOp2); // GT_CNS_INT
}
DEBUG_DESTROY_NODE(op1); // GT_ADD or GT_ADDR
DEBUG_DESTROY_NODE(tree); // GT_IND
// If the result of the fold is a local var, we may need to perform further adjustments e.g. for
// normalization.
if (temp->OperIs(GT_LCL_VAR))
{
#ifdef DEBUG
// We clear this flag on `temp` because `fgMorphLocalVar` may assert that this bit is clear
// and the node in question must have this bit set (as it has already been morphed).
temp->gtDebugFlags &= ~GTF_DEBUG_NODE_MORPHED;
#endif // DEBUG
const bool forceRemorph = true;
temp = fgMorphLocalVar(temp, forceRemorph);
#ifdef DEBUG
// We then set this flag on `temp` because `fgMorhpLocalVar` may not set it itself, and the
// caller of `fgMorphSmpOp` may assert that this flag is set on `temp` once this function
// returns.
temp->gtDebugFlags |= GTF_DEBUG_NODE_MORPHED;
#endif // DEBUG
}
return temp;
}
// Only do this optimization when we are in the global optimizer. Doing this after value numbering
// could result in an invalid value number for the newly generated GT_IND node.
if ((op1->OperGet() == GT_COMMA) && fgGlobalMorph)
{
// Perform the transform IND(COMMA(x, ..., z)) == COMMA(x, ..., IND(z)).
// TBD: this transformation is currently necessary for correctness -- it might
// be good to analyze the failures that result if we don't do this, and fix them
// in other ways. Ideally, this should be optional.
GenTree* commaNode = op1;
GenTreeFlags treeFlags = tree->gtFlags;
commaNode->gtType = typ;
commaNode->gtFlags = (treeFlags & ~GTF_REVERSE_OPS); // Bashing the GT_COMMA flags here is
// dangerous, clear the GTF_REVERSE_OPS at
// least.
#ifdef DEBUG
commaNode->gtDebugFlags |= GTF_DEBUG_NODE_MORPHED;
#endif
while (commaNode->AsOp()->gtOp2->gtOper == GT_COMMA)
{
commaNode = commaNode->AsOp()->gtOp2;
commaNode->gtType = typ;
commaNode->gtFlags =
(treeFlags & ~GTF_REVERSE_OPS & ~GTF_ASG & ~GTF_CALL); // Bashing the GT_COMMA flags here is
// dangerous, clear the GTF_REVERSE_OPS, GT_ASG, and GT_CALL at
// least.
commaNode->gtFlags |= ((commaNode->AsOp()->gtOp1->gtFlags | commaNode->AsOp()->gtOp2->gtFlags) &
(GTF_ASG | GTF_CALL));
#ifdef DEBUG
commaNode->gtDebugFlags |= GTF_DEBUG_NODE_MORPHED;
#endif
}
bool wasArrIndex = (tree->gtFlags & GTF_IND_ARR_INDEX) != 0;
ArrayInfo arrInfo;
if (wasArrIndex)
{
bool b = GetArrayInfoMap()->Lookup(tree, &arrInfo);
assert(b);
GetArrayInfoMap()->Remove(tree);
}
tree = op1;
GenTree* addr = commaNode->AsOp()->gtOp2;
// TODO-1stClassStructs: we often create a struct IND without a handle, fix it.
op1 = gtNewIndir(typ, addr);
// This is very conservative
op1->gtFlags |= treeFlags & ~GTF_ALL_EFFECT & ~GTF_IND_NONFAULTING;
op1->gtFlags |= (addr->gtFlags & GTF_ALL_EFFECT);
if (wasArrIndex)
{
GetArrayInfoMap()->Set(op1, arrInfo);
}
#ifdef DEBUG
op1->gtDebugFlags |= GTF_DEBUG_NODE_MORPHED;
#endif
commaNode->AsOp()->gtOp2 = op1;
commaNode->gtFlags |= (op1->gtFlags & GTF_ALL_EFFECT);
return tree;
}
break;
}
case GT_ADDR:
// Can not remove op1 if it is currently a CSE candidate.
if (gtIsActiveCSE_Candidate(op1))
{
break;
}
if (op1->OperGet() == GT_IND)
{
if ((op1->gtFlags & GTF_IND_ARR_INDEX) == 0)
{
// Can not remove a GT_ADDR if it is currently a CSE candidate.
if (gtIsActiveCSE_Candidate(tree))
{
break;
}
// Perform the transform ADDR(IND(...)) == (...).
GenTree* addr = op1->AsOp()->gtOp1;
// If tree has a zero field sequence annotation, update the annotation
// on addr node.
FieldSeqNode* zeroFieldSeq = nullptr;
if (GetZeroOffsetFieldMap()->Lookup(tree, &zeroFieldSeq))
{
fgAddFieldSeqForZeroOffset(addr, zeroFieldSeq);
}
noway_assert(varTypeIsGC(addr->gtType) || addr->gtType == TYP_I_IMPL);
DEBUG_DESTROY_NODE(op1);
DEBUG_DESTROY_NODE(tree);
return addr;
}
}
else if (op1->OperGet() == GT_OBJ)
{
// Can not remove a GT_ADDR if it is currently a CSE candidate.
if (gtIsActiveCSE_Candidate(tree))
{
break;
}
// Perform the transform ADDR(OBJ(...)) == (...).
GenTree* addr = op1->AsObj()->Addr();
noway_assert(varTypeIsGC(addr->gtType) || addr->gtType == TYP_I_IMPL);
DEBUG_DESTROY_NODE(op1);
DEBUG_DESTROY_NODE(tree);
return addr;
}
else if ((op1->gtOper == GT_COMMA) && !optValnumCSE_phase)
{
// Perform the transform ADDR(COMMA(x, ..., z)) == COMMA(x, ..., ADDR(z)).
// (Be sure to mark "z" as an l-value...)
ArrayStack<GenTree*> commas(getAllocator(CMK_ArrayStack));
for (GenTree* comma = op1; comma != nullptr && comma->gtOper == GT_COMMA; comma = comma->gtGetOp2())
{
commas.Push(comma);
}
GenTree* commaNode = commas.Top();
// The top-level addr might be annotated with a zeroOffset field.
FieldSeqNode* zeroFieldSeq = nullptr;
bool isZeroOffset = GetZeroOffsetFieldMap()->Lookup(tree, &zeroFieldSeq);
tree = op1;
commaNode->AsOp()->gtOp2->gtFlags |= GTF_DONT_CSE;
// If the node we're about to put under a GT_ADDR is an indirection, it
// doesn't need to be materialized, since we only want the addressing mode. Because
// of this, this GT_IND is not a faulting indirection and we don't have to extract it
// as a side effect.
GenTree* commaOp2 = commaNode->AsOp()->gtOp2;
if (commaOp2->OperIsBlk())
{
commaOp2->SetOper(GT_IND);
}
if (commaOp2->gtOper == GT_IND)
{
commaOp2->gtFlags |= GTF_IND_NONFAULTING;
commaOp2->gtFlags &= ~GTF_EXCEPT;
commaOp2->gtFlags |= (commaOp2->AsOp()->gtOp1->gtFlags & GTF_EXCEPT);
}
op1 = gtNewOperNode(GT_ADDR, TYP_BYREF, commaOp2);
if (isZeroOffset)
{
// Transfer the annotation to the new GT_ADDR node.
fgAddFieldSeqForZeroOffset(op1, zeroFieldSeq);
}
commaNode->AsOp()->gtOp2 = op1;
// Originally, I gave all the comma nodes type "byref". But the ADDR(IND(x)) == x transform
// might give op1 a type different from byref (like, say, native int). So now go back and give
// all the comma nodes the type of op1.
// TODO: the comma flag update below is conservative and can be improved.
// For example, if we made the ADDR(IND(x)) == x transformation, we may be able to
// get rid of some of the IND flags on the COMMA nodes (e.g., GTF_GLOB_REF).
while (!commas.Empty())
{
GenTree* comma = commas.Pop();
comma->gtType = op1->gtType;
comma->gtFlags |= op1->gtFlags;
#ifdef DEBUG
comma->gtDebugFlags |= GTF_DEBUG_NODE_MORPHED;
#endif
gtUpdateNodeSideEffects(comma);
}
return tree;
}
break;
case GT_COLON:
if (fgGlobalMorph)
{
/* Mark the nodes that are conditionally executed */
fgWalkTreePre(&tree, gtMarkColonCond);
}
/* Since we're doing this postorder we clear this if it got set by a child */
fgRemoveRestOfBlock = false;
break;
case GT_COMMA:
/* Special case: trees that don't produce a value */
if (op2->OperIs(GT_ASG) || (op2->OperGet() == GT_COMMA && op2->TypeGet() == TYP_VOID) || fgIsThrow(op2))
{
typ = tree->gtType = TYP_VOID;
}
// If we are in the Valuenum CSE phase then don't morph away anything as these
// nodes may have CSE defs/uses in them.
//
if (!optValnumCSE_phase)
{
// Extract the side effects from the left side of the comma. Since they don't "go" anywhere, this
// is all we need.
GenTree* op1SideEffects = nullptr;
// The addition of "GTF_MAKE_CSE" below prevents us from throwing away (for example)
// hoisted expressions in loops.
gtExtractSideEffList(op1, &op1SideEffects, (GTF_SIDE_EFFECT | GTF_MAKE_CSE));
if (op1SideEffects)
{
// Replace the left hand side with the side effect list.
op1 = op1SideEffects;
tree->AsOp()->gtOp1 = op1SideEffects;
gtUpdateNodeSideEffects(tree);
}
else
{
op2->gtFlags |= (tree->gtFlags & (GTF_DONT_CSE | GTF_LATE_ARG));
DEBUG_DESTROY_NODE(tree);
DEBUG_DESTROY_NODE(op1);
return op2;
}
// If the right operand is just a void nop node, throw it away. Unless this is a
// comma throw, in which case we want the top-level morphing loop to recognize it.
if (op2->IsNothingNode() && op1->TypeIs(TYP_VOID) && !fgIsCommaThrow(tree))
{
op1->gtFlags |= (tree->gtFlags & (GTF_DONT_CSE | GTF_LATE_ARG));
DEBUG_DESTROY_NODE(tree);
DEBUG_DESTROY_NODE(op2);
return op1;
}
}
break;
case GT_JTRUE:
/* Special case if fgRemoveRestOfBlock is set to true */
if (fgRemoveRestOfBlock)
{
if (fgIsCommaThrow(op1, true))
{
GenTree* throwNode = op1->AsOp()->gtOp1;
JITDUMP("Removing [%06d] GT_JTRUE as the block now unconditionally throws an exception.\n",
dspTreeID(tree));
DEBUG_DESTROY_NODE(tree);
return throwNode;
}
noway_assert(op1->OperIsCompare());
noway_assert(op1->gtFlags & GTF_EXCEPT);
// We need to keep op1 for the side-effects. Hang it off
// a GT_COMMA node
JITDUMP("Keeping side-effects by bashing [%06d] GT_JTRUE into a GT_COMMA.\n", dspTreeID(tree));
tree->ChangeOper(GT_COMMA);
tree->AsOp()->gtOp2 = op2 = gtNewNothingNode();
// Additionally since we're eliminating the JTRUE
// codegen won't like it if op1 is a RELOP of longs, floats or doubles.
// So we change it into a GT_COMMA as well.
JITDUMP("Also bashing [%06d] (a relop) into a GT_COMMA.\n", dspTreeID(op1));
op1->ChangeOper(GT_COMMA);
op1->gtFlags &= ~GTF_UNSIGNED; // Clear the unsigned flag if it was set on the relop
op1->gtType = op1->AsOp()->gtOp1->gtType;
return tree;
}
break;
case GT_INTRINSIC:
if (tree->AsIntrinsic()->gtIntrinsicName ==
NI_System_Runtime_CompilerServices_RuntimeHelpers_IsKnownConstant)
{
// Should be expanded by the time it reaches CSE phase
assert(!optValnumCSE_phase);
JITDUMP("\nExpanding RuntimeHelpers.IsKnownConstant to ");
if (op1->OperIsConst())
{
// We're lucky to catch a constant here while importer was not
JITDUMP("true\n");
DEBUG_DESTROY_NODE(tree, op1);
tree = gtNewIconNode(1);
}
else
{
GenTree* op1SideEffects = nullptr;
gtExtractSideEffList(op1, &op1SideEffects, GTF_ALL_EFFECT);
if (op1SideEffects != nullptr)
{
DEBUG_DESTROY_NODE(tree);
// Keep side-effects of op1
tree = gtNewOperNode(GT_COMMA, TYP_INT, op1SideEffects, gtNewIconNode(0));
JITDUMP("false with side effects:\n")
DISPTREE(tree);
}
else
{
JITDUMP("false\n");
DEBUG_DESTROY_NODE(tree, op1);
tree = gtNewIconNode(0);
}
}
INDEBUG(tree->gtDebugFlags |= GTF_DEBUG_NODE_MORPHED);
return tree;
}
break;
default:
break;
}
assert(oper == tree->gtOper);
// Propagate comma throws.
// If we are in the Valuenum CSE phase then don't morph away anything as these
// nodes may have CSE defs/uses in them.
if (fgGlobalMorph && (oper != GT_ASG) && (oper != GT_COLON))
{
if ((op1 != nullptr) && fgIsCommaThrow(op1, true))
{
GenTree* propagatedThrow = fgPropagateCommaThrow(tree, op1->AsOp(), GTF_EMPTY);
if (propagatedThrow != nullptr)
{
return propagatedThrow;
}
}
if ((op2 != nullptr) && fgIsCommaThrow(op2, true))
{
GenTree* propagatedThrow = fgPropagateCommaThrow(tree, op2->AsOp(), op1->gtFlags & GTF_ALL_EFFECT);
if (propagatedThrow != nullptr)
{
return propagatedThrow;
}
}
}
/*-------------------------------------------------------------------------
* Optional morphing is done if tree transformations is permitted
*/
if ((opts.compFlags & CLFLG_TREETRANS) == 0)
{
return tree;
}
tree = fgMorphSmpOpOptional(tree->AsOp());
return tree;
}
//------------------------------------------------------------------------
// fgOptimizeCast: Optimizes the supplied GT_CAST tree.
//
// Tries to get rid of the cast, its operand, the GTF_OVERFLOW flag, calls
// calls "optNarrowTree". Called in post-order by "fgMorphSmpOp".
//
// Arguments:
// tree - the cast tree to optimize
//
// Return Value:
// The optimized tree (that can have any shape).
//
GenTree* Compiler::fgOptimizeCast(GenTreeCast* cast)
{
GenTree* src = cast->CastOp();
if (gtIsActiveCSE_Candidate(cast) || gtIsActiveCSE_Candidate(src))
{
return cast;
}
// See if we can discard the cast.
if (varTypeIsIntegral(cast) && varTypeIsIntegral(src))
{
IntegralRange srcRange = IntegralRange::ForNode(src, this);
IntegralRange noOvfRange = IntegralRange::ForCastInput(cast);
if (noOvfRange.Contains(srcRange))
{
// Casting between same-sized types is a no-op,
// given we have proven this cast cannot overflow.
if (genActualType(cast) == genActualType(src))
{
return src;
}
cast->ClearOverflow();
cast->SetAllEffectsFlags(src);
// Try and see if we can make this cast into a cheaper zero-extending version.
if (genActualTypeIsInt(src) && cast->TypeIs(TYP_LONG) && srcRange.IsPositive())
{
cast->SetUnsigned();
}
}
// For checked casts, we're done.
if (cast->gtOverflow())
{
return cast;
}
var_types castToType = cast->CastToType();
// For indir-like nodes, we may be able to change their type to satisfy (and discard) the cast.
if (varTypeIsSmall(castToType) && (genTypeSize(castToType) == genTypeSize(src)) &&
src->OperIs(GT_IND, GT_CLS_VAR, GT_LCL_FLD))
{
// We're changing the type here so we need to update the VN;
// in other cases we discard the cast without modifying src
// so the VN doesn't change.
src->ChangeType(castToType);
src->SetVNsFromNode(cast);
return src;
}
// Try to narrow the operand of the cast and discard the cast.
if (opts.OptEnabled(CLFLG_TREETRANS) && (genTypeSize(src) > genTypeSize(castToType)) &&
optNarrowTree(src, src->TypeGet(), castToType, cast->gtVNPair, false))
{
optNarrowTree(src, src->TypeGet(), castToType, cast->gtVNPair, true);
// "optNarrowTree" may leave a dead cast behind.
if (src->OperIs(GT_CAST) && (src->AsCast()->CastToType() == genActualType(src->AsCast()->CastOp())))
{
src = src->AsCast()->CastOp();
}
return src;
}
// Check for two consecutive casts, we may be able to discard the intermediate one.
if (opts.OptimizationEnabled() && src->OperIs(GT_CAST) && !src->gtOverflow())
{
var_types dstCastToType = castToType;
var_types srcCastToType = src->AsCast()->CastToType();
// CAST(ubyte <- CAST(short <- X)): CAST(ubyte <- X).
// CAST(ushort <- CAST(short <- X)): CAST(ushort <- X).
if (varTypeIsSmall(srcCastToType) && (genTypeSize(dstCastToType) <= genTypeSize(srcCastToType)))
{
cast->CastOp() = src->AsCast()->CastOp();
DEBUG_DESTROY_NODE(src);
}
}
}
return cast;
}
//------------------------------------------------------------------------
// fgOptimizeEqualityComparisonWithConst: optimizes various EQ/NE(OP, CONST) patterns.
//
// Arguments:
// cmp - The GT_NE/GT_EQ tree the second operand of which is an integral constant
//
// Return Value:
// The optimized tree, "cmp" in case no optimizations were done.
// Currently only returns relop trees.
//
GenTree* Compiler::fgOptimizeEqualityComparisonWithConst(GenTreeOp* cmp)
{
assert(cmp->OperIs(GT_EQ, GT_NE));
assert(cmp->gtGetOp2()->IsIntegralConst());
assert(!optValnumCSE_phase);
GenTree* op1 = cmp->gtGetOp1();
GenTreeIntConCommon* op2 = cmp->gtGetOp2()->AsIntConCommon();
// Check for "(expr +/- icon1) ==/!= (non-zero-icon2)".
if (op2->IsCnsIntOrI() && (op2->IconValue() != 0))
{
// Since this can occur repeatedly we use a while loop.
while (op1->OperIs(GT_ADD, GT_SUB) && op1->AsOp()->gtGetOp2()->IsCnsIntOrI() && op1->TypeIs(TYP_INT) &&
!op1->gtOverflow())
{
// Got it; change "x + icon1 == icon2" to "x == icon2 - icon1".
ssize_t op1Value = op1->AsOp()->gtGetOp2()->AsIntCon()->IconValue();
ssize_t op2Value = op2->IconValue();
if (op1->OperIs(GT_ADD))
{
op2Value -= op1Value;
}
else
{
op2Value += op1Value;
}
op1 = op1->AsOp()->gtGetOp1();
op2->SetIconValue(static_cast<int32_t>(op2Value));
}
cmp->gtOp1 = op1;
fgUpdateConstTreeValueNumber(op2);
}
// Here we look for the following tree
//
// EQ/NE
// / \.
// op1 CNS 0/1
//
if (op2->IsIntegralConst(0) || op2->IsIntegralConst(1))
{
ssize_t op2Value = static_cast<ssize_t>(op2->IntegralValue());
if (op1->OperIsCompare())
{
// Here we look for the following tree
//
// EQ/NE -> RELOP/!RELOP
// / \ / \.
// RELOP CNS 0/1
// / \.
//
// Note that we will remove/destroy the EQ/NE node and move
// the RELOP up into it's location.
// Here we reverse the RELOP if necessary.
bool reverse = ((op2Value == 0) == (cmp->OperIs(GT_EQ)));
if (reverse)
{
gtReverseCond(op1);
}
noway_assert((op1->gtFlags & GTF_RELOP_JMP_USED) == 0);
op1->gtFlags |= cmp->gtFlags & (GTF_RELOP_JMP_USED | GTF_DONT_CSE);
op1->SetVNsFromNode(cmp);
DEBUG_DESTROY_NODE(cmp);
return op1;
}
//
// Now we check for a compare with the result of an '&' operator
//
// Here we look for the following transformation:
//
// EQ/NE EQ/NE
// / \ / \.
// AND CNS 0/1 -> AND CNS 0
// / \ / \.
// RSZ/RSH CNS 1 x CNS (1 << y)
// / \.
// x CNS_INT +y
if (fgGlobalMorph && op1->OperIs(GT_AND) && op1->AsOp()->gtGetOp1()->OperIs(GT_RSZ, GT_RSH))
{
GenTreeOp* andOp = op1->AsOp();
GenTreeOp* rshiftOp = andOp->gtGetOp1()->AsOp();
if (!rshiftOp->gtGetOp2()->IsCnsIntOrI())
{
goto SKIP;
}
ssize_t shiftAmount = rshiftOp->gtGetOp2()->AsIntCon()->IconValue();
if (shiftAmount < 0)
{
goto SKIP;
}
if (!andOp->gtGetOp2()->IsIntegralConst(1))
{
goto SKIP;
}
GenTreeIntConCommon* andMask = andOp->gtGetOp2()->AsIntConCommon();
if (andOp->TypeIs(TYP_INT))
{
if (shiftAmount > 31)
{
goto SKIP;
}
andMask->SetIconValue(static_cast<int32_t>(1 << shiftAmount));
// Reverse the condition if necessary.
if (op2Value == 1)
{
gtReverseCond(cmp);
op2->SetIconValue(0);
}
}
else if (andOp->TypeIs(TYP_LONG))
{
if (shiftAmount > 63)
{
goto SKIP;
}
andMask->SetLngValue(1ll << shiftAmount);
// Reverse the cond if necessary
if (op2Value == 1)
{
gtReverseCond(cmp);
op2->SetLngValue(0);
}
}
andOp->gtOp1 = rshiftOp->gtGetOp1();
DEBUG_DESTROY_NODE(rshiftOp->gtGetOp2());
DEBUG_DESTROY_NODE(rshiftOp);
}
}
SKIP:
// Now check for compares with small constant longs that can be cast to int.
// Note that we filter out negative values here so that the transformations
// below are correct. E. g. "EQ(-1L, CAST_UN(int))" is always "false", but were
// we to make it into "EQ(-1, int)", "true" becomes possible for negative inputs.
if (!op2->TypeIs(TYP_LONG) || ((op2->LngValue() >> 31) != 0))
{
return cmp;
}
if (!op1->OperIs(GT_AND))
{
// Another interesting case: cast from int.
if (op1->OperIs(GT_CAST) && op1->AsCast()->CastOp()->TypeIs(TYP_INT) && !op1->gtOverflow())
{
// Simply make this into an integer comparison.
cmp->gtOp1 = op1->AsCast()->CastOp();
op2->BashToConst(static_cast<int32_t>(op2->LngValue()));
fgUpdateConstTreeValueNumber(op2);
}
return cmp;
}
// Now we perform the following optimization:
// EQ/NE(AND(OP long, CNS_LNG), CNS_LNG) =>
// EQ/NE(AND(CAST(int <- OP), CNS_INT), CNS_INT)
// when the constants are sufficiently small.
// This transform cannot preserve VNs.
if (fgGlobalMorph)
{
assert(op1->TypeIs(TYP_LONG) && op1->OperIs(GT_AND));
// Is the result of the mask effectively an INT?
GenTreeOp* andOp = op1->AsOp();
if (!andOp->gtGetOp2()->OperIs(GT_CNS_NATIVELONG))
{
return cmp;
}
GenTreeIntConCommon* andMask = andOp->gtGetOp2()->AsIntConCommon();
if ((andMask->LngValue() >> 32) != 0)
{
return cmp;
}
// Now we narrow the first operand of AND to int.
if (optNarrowTree(andOp->gtGetOp1(), TYP_LONG, TYP_INT, ValueNumPair(), false))
{
optNarrowTree(andOp->gtGetOp1(), TYP_LONG, TYP_INT, ValueNumPair(), true);
}
else
{
andOp->gtOp1 = gtNewCastNode(TYP_INT, andOp->gtGetOp1(), false, TYP_INT);
}
assert(andMask == andOp->gtGetOp2());
// Now replace the mask node.
andMask->BashToConst(static_cast<int32_t>(andMask->LngValue()));
// Now change the type of the AND node.
andOp->ChangeType(TYP_INT);
// Finally we replace the comparand.
op2->BashToConst(static_cast<int32_t>(op2->LngValue()));
}
return cmp;
}
//------------------------------------------------------------------------
// fgOptimizeRelationalComparisonWithConst: optimizes a comparison operation.
//
// Recognizes comparisons against various constant operands and morphs
// them, if possible, into comparisons against zero.
//
// Arguments:
// cmp - the GT_LE/GT_LT/GT_GE/GT_GT tree to morph.
//
// Return Value:
// The "cmp" tree, possibly with a modified oper.
// The second operand's constant value may be modified as well.
//
// Assumptions:
// The operands have been swapped so that any constants are on the right.
// The second operand is an integral constant.
//
GenTree* Compiler::fgOptimizeRelationalComparisonWithConst(GenTreeOp* cmp)
{
assert(cmp->OperIs(GT_LE, GT_LT, GT_GE, GT_GT));
assert(cmp->gtGetOp2()->IsIntegralConst());
assert(!gtIsActiveCSE_Candidate(cmp->gtGetOp2()));
GenTree* op1 = cmp->gtGetOp1();
GenTreeIntConCommon* op2 = cmp->gtGetOp2()->AsIntConCommon();
assert(genActualType(op1) == genActualType(op2));
genTreeOps oper = cmp->OperGet();
int64_t op2Value = op2->IntegralValue();
if (op2Value == 1)
{
// Check for "expr >= 1".
if (oper == GT_GE)
{
// Change to "expr != 0" for unsigned and "expr > 0" for signed.
oper = cmp->IsUnsigned() ? GT_NE : GT_GT;
}
// Check for "expr < 1".
else if (oper == GT_LT)
{
// Change to "expr == 0" for unsigned and "expr <= 0".
oper = cmp->IsUnsigned() ? GT_EQ : GT_LE;
}
}
// Check for "expr relop -1".
else if (!cmp->IsUnsigned() && (op2Value == -1))
{
// Check for "expr <= -1".
if (oper == GT_LE)
{
// Change to "expr < 0".
oper = GT_LT;
}
// Check for "expr > -1".
else if (oper == GT_GT)
{
// Change to "expr >= 0".
oper = GT_GE;
}
}
else if (cmp->IsUnsigned())
{
if ((oper == GT_LE) || (oper == GT_GT))
{
if (op2Value == 0)
{
// IL doesn't have a cne instruction so compilers use cgt.un instead. The JIT
// recognizes certain patterns that involve GT_NE (e.g (x & 4) != 0) and fails
// if GT_GT is used instead. Transform (x GT_GT.unsigned 0) into (x GT_NE 0)
// and (x GT_LE.unsigned 0) into (x GT_EQ 0). The later case is rare, it sometimes
// occurs as a result of branch inversion.
oper = (oper == GT_LE) ? GT_EQ : GT_NE;
cmp->gtFlags &= ~GTF_UNSIGNED;
}
// LE_UN/GT_UN(expr, int/long.MaxValue) => GE/LT(expr, 0).
else if (((op1->TypeIs(TYP_LONG) && (op2Value == INT64_MAX))) ||
((genActualType(op1) == TYP_INT) && (op2Value == INT32_MAX)))
{
oper = (oper == GT_LE) ? GT_GE : GT_LT;
cmp->gtFlags &= ~GTF_UNSIGNED;
}
}
}
if (!cmp->OperIs(oper))
{
// Keep the old ValueNumber for 'tree' as the new expr
// will still compute the same value as before.
cmp->SetOper(oper, GenTree::PRESERVE_VN);
op2->SetIntegralValue(0);
fgUpdateConstTreeValueNumber(op2);
}
return cmp;
}
#ifdef FEATURE_HW_INTRINSICS
//------------------------------------------------------------------------
// fgOptimizeHWIntrinsic: optimize a HW intrinsic node
//
// Arguments:
// node - HWIntrinsic node to examine
//
// Returns:
// The original node if no optimization happened or if tree bashing occured.
// An alternative tree if an optimization happened.
//
// Notes:
// Checks for HWIntrinsic nodes: Vector64.Create/Vector128.Create/Vector256.Create,
// and if the call is one of these, attempt to optimize.
// This is post-order, meaning that it will not morph the children.
//
GenTree* Compiler::fgOptimizeHWIntrinsic(GenTreeHWIntrinsic* node)
{
assert(!optValnumCSE_phase);
if (opts.OptimizationDisabled())
{
return node;
}
switch (node->GetHWIntrinsicId())
{
case NI_Vector128_Create:
#if defined(TARGET_XARCH)
case NI_Vector256_Create:
#elif defined(TARGET_ARM64)
case NI_Vector64_Create:
#endif
{
bool hwAllArgsAreConstZero = true;
for (GenTree* arg : node->Operands())
{
if (!arg->IsIntegralConst(0) && !arg->IsFloatPositiveZero())
{
hwAllArgsAreConstZero = false;
break;
}
}
if (hwAllArgsAreConstZero)
{
switch (node->GetHWIntrinsicId())
{
case NI_Vector128_Create:
{
node->ResetHWIntrinsicId(NI_Vector128_get_Zero);
break;
}
#if defined(TARGET_XARCH)
case NI_Vector256_Create:
{
node->ResetHWIntrinsicId(NI_Vector256_get_Zero);
break;
}
#elif defined(TARGET_ARM64)
case NI_Vector64_Create:
{
node->ResetHWIntrinsicId(NI_Vector64_get_Zero);
break;
}
#endif
default:
unreached();
}
}
break;
}
default:
break;
}
return node;
}
#endif
//------------------------------------------------------------------------
// fgOptimizeCommutativeArithmetic: Optimizes commutative operations.
//
// Arguments:
// tree - the unchecked GT_ADD/GT_MUL/GT_OR/GT_XOR/GT_AND tree to optimize.
//
// Return Value:
// The optimized tree that can have any shape.
//
GenTree* Compiler::fgOptimizeCommutativeArithmetic(GenTreeOp* tree)
{
assert(tree->OperIs(GT_ADD, GT_MUL, GT_OR, GT_XOR, GT_AND));
assert(!tree->gtOverflowEx());
// Commute constants to the right.
if (tree->gtGetOp1()->OperIsConst() && !tree->gtGetOp1()->TypeIs(TYP_REF))
{
// TODO-Review: We used to assert here that "(!op2->OperIsConst() || !opts.OptEnabled(CLFLG_CONSTANTFOLD))".
// This may indicate a missed "remorph". Task is to re-enable this assertion and investigate.
std::swap(tree->gtOp1, tree->gtOp2);
}
if (fgOperIsBitwiseRotationRoot(tree->OperGet()))
{
GenTree* rotationTree = fgRecognizeAndMorphBitwiseRotation(tree);
if (rotationTree != nullptr)
{
return rotationTree;
}
}
if (fgGlobalMorph && tree->OperIs(GT_AND, GT_OR, GT_XOR))
{
GenTree* castTree = fgMorphCastedBitwiseOp(tree->AsOp());
if (castTree != nullptr)
{
return castTree;
}
}
if (varTypeIsIntegralOrI(tree))
{
genTreeOps oldTreeOper = tree->OperGet();
GenTreeOp* optimizedTree = fgMorphCommutative(tree->AsOp());
if (optimizedTree != nullptr)
{
if (!optimizedTree->OperIs(oldTreeOper))
{
// "optimizedTree" could end up being a COMMA.
return optimizedTree;
}
tree = optimizedTree;
}
}
if (!optValnumCSE_phase)
{
GenTree* optimizedTree = nullptr;
if (tree->OperIs(GT_ADD))
{
optimizedTree = fgOptimizeAddition(tree);
}
else if (tree->OperIs(GT_MUL))
{
optimizedTree = fgOptimizeMultiply(tree);
}
else if (tree->OperIs(GT_AND))
{
optimizedTree = fgOptimizeBitwiseAnd(tree);
}
else if (tree->OperIs(GT_XOR))
{
optimizedTree = fgOptimizeBitwiseXor(tree);
}
if (optimizedTree != nullptr)
{
return optimizedTree;
}
}
return tree;
}
//------------------------------------------------------------------------
// fgOptimizeAddition: optimizes addition.
//
// Arguments:
// add - the unchecked GT_ADD tree to optimize.
//
// Return Value:
// The optimized tree, that can have any shape, in case any transformations
// were performed. Otherwise, "nullptr", guaranteeing no state change.
//
GenTree* Compiler::fgOptimizeAddition(GenTreeOp* add)
{
assert(add->OperIs(GT_ADD) && !add->gtOverflow());
assert(!optValnumCSE_phase);
GenTree* op1 = add->gtGetOp1();
GenTree* op2 = add->gtGetOp2();
// Fold "((x + icon1) + (y + icon2))" to ((x + y) + (icon1 + icon2))".
// Be careful not to create a byref pointer that may point outside of the ref object.
// Only do this in global morph as we don't recompute the VN for "(x + y)", the new "op2".
if (op1->OperIs(GT_ADD) && op2->OperIs(GT_ADD) && !op1->gtOverflow() && !op2->gtOverflow() &&
op1->AsOp()->gtGetOp2()->IsCnsIntOrI() && op2->AsOp()->gtGetOp2()->IsCnsIntOrI() &&
!varTypeIsGC(op1->AsOp()->gtGetOp1()) && !varTypeIsGC(op2->AsOp()->gtGetOp1()) && fgGlobalMorph)
{
GenTreeOp* addOne = op1->AsOp();
GenTreeOp* addTwo = op2->AsOp();
GenTreeIntCon* constOne = addOne->gtGetOp2()->AsIntCon();
GenTreeIntCon* constTwo = addTwo->gtGetOp2()->AsIntCon();
addOne->gtOp2 = addTwo->gtGetOp1();
addOne->SetAllEffectsFlags(addOne->gtGetOp1(), addOne->gtGetOp2());
DEBUG_DESTROY_NODE(addTwo);
constOne->SetValueTruncating(constOne->IconValue() + constTwo->IconValue());
op2 = constOne;
add->gtOp2 = constOne;
DEBUG_DESTROY_NODE(constTwo);
}
// Fold (x + 0) - given it won't change the tree type to TYP_REF.
// TODO-Bug: this code will lose the GC-ness of a tree like "native int + byref(0)".
if (op2->IsIntegralConst(0) && ((add->TypeGet() == op1->TypeGet()) || !op1->TypeIs(TYP_REF)))
{
if (op2->IsCnsIntOrI() && varTypeIsI(op1))
{
fgAddFieldSeqForZeroOffset(op1, op2->AsIntCon()->gtFieldSeq);
}
DEBUG_DESTROY_NODE(op2);
DEBUG_DESTROY_NODE(add);
return op1;
}
// Note that these transformations are legal for floating-point ADDs as well.
if (opts.OptimizationEnabled())
{
// - a + b = > b - a
// ADD((NEG(a), b) => SUB(b, a)
// Do not do this if "op2" is constant for canonicalization purposes.
if (op1->OperIs(GT_NEG) && !op2->OperIs(GT_NEG) && !op2->IsIntegralConst() && gtCanSwapOrder(op1, op2))
{
add->SetOper(GT_SUB);
add->gtOp1 = op2;
add->gtOp2 = op1->AsOp()->gtGetOp1();
DEBUG_DESTROY_NODE(op1);
return add;
}
// a + -b = > a - b
// ADD(a, (NEG(b)) => SUB(a, b)
if (!op1->OperIs(GT_NEG) && op2->OperIs(GT_NEG))
{
add->SetOper(GT_SUB);
add->gtOp2 = op2->AsOp()->gtGetOp1();
DEBUG_DESTROY_NODE(op2);
return add;
}
}
return nullptr;
}
//------------------------------------------------------------------------
// fgOptimizeMultiply: optimizes multiplication.
//
// Arguments:
// mul - the unchecked TYP_I_IMPL/TYP_INT GT_MUL tree to optimize.
//
// Return Value:
// The optimized tree, that can have any shape, in case any transformations
// were performed. Otherwise, "nullptr", guaranteeing no state change.
//
GenTree* Compiler::fgOptimizeMultiply(GenTreeOp* mul)
{
assert(mul->OperIs(GT_MUL));
assert(varTypeIsIntOrI(mul) || varTypeIsFloating(mul));
assert(!mul->gtOverflow());
assert(!optValnumCSE_phase);
GenTree* op1 = mul->gtGetOp1();
GenTree* op2 = mul->gtGetOp2();
assert(mul->TypeGet() == genActualType(op1));
assert(mul->TypeGet() == genActualType(op2));
if (opts.OptimizationEnabled() && op2->IsCnsFltOrDbl())
{
double multiplierValue = op2->AsDblCon()->gtDconVal;
if (multiplierValue == 1.0)
{
// Fold "x * 1.0" to "x".
DEBUG_DESTROY_NODE(op2);
DEBUG_DESTROY_NODE(mul);
return op1;
}
// Fold "x * 2.0" to "x + x".
// If op1 is not a local we will have to introduce a temporary via GT_COMMA.
// Unfortunately, it's not optHoistLoopCode-friendly (yet), so we'll only do
// this for locals / after hoisting has run (when rationalization remorphs
// math INTRINSICSs into calls...).
if ((multiplierValue == 2.0) && (op1->IsLocal() || (fgOrder == FGOrderLinear)))
{
op2 = fgMakeMultiUse(&op1);
GenTree* add = gtNewOperNode(GT_ADD, mul->TypeGet(), op1, op2);
INDEBUG(add->gtDebugFlags |= GTF_DEBUG_NODE_MORPHED);
return add;
}
}
if (op2->IsIntegralConst())
{
ssize_t mult = op2->AsIntConCommon()->IconValue();
bool op2IsConstIndex = op2->OperGet() == GT_CNS_INT && op2->AsIntCon()->gtFieldSeq != nullptr &&
op2->AsIntCon()->gtFieldSeq->IsConstantIndexFieldSeq();
assert(!op2IsConstIndex || op2->AsIntCon()->gtFieldSeq->GetNext() == nullptr);
if (mult == 0)
{
// We may be able to throw away op1 (unless it has side-effects)
if ((op1->gtFlags & GTF_SIDE_EFFECT) == 0)
{
DEBUG_DESTROY_NODE(op1);
DEBUG_DESTROY_NODE(mul);
return op2; // Just return the "0" node
}
// We need to keep op1 for the side-effects. Hang it off a GT_COMMA node.
mul->ChangeOper(GT_COMMA, GenTree::PRESERVE_VN);
return mul;
}
#ifdef TARGET_XARCH
// Should we try to replace integer multiplication with lea/add/shift sequences?
bool mulShiftOpt = compCodeOpt() != SMALL_CODE;
#else // !TARGET_XARCH
bool mulShiftOpt = false;
#endif // !TARGET_XARCH
size_t abs_mult = (mult >= 0) ? mult : -mult;
size_t lowestBit = genFindLowestBit(abs_mult);
bool changeToShift = false;
// is it a power of two? (positive or negative)
if (abs_mult == lowestBit)
{
// if negative negate (min-int does not need negation)
if (mult < 0 && mult != SSIZE_T_MIN)
{
op1 = gtNewOperNode(GT_NEG, genActualType(op1), op1);
mul->gtOp1 = op1;
fgMorphTreeDone(op1);
}
// If "op2" is a constant array index, the other multiplicand must be a constant.
// Transfer the annotation to the other one.
if (op2->OperGet() == GT_CNS_INT && op2->AsIntCon()->gtFieldSeq != nullptr &&
op2->AsIntCon()->gtFieldSeq->IsConstantIndexFieldSeq())
{
assert(op2->AsIntCon()->gtFieldSeq->GetNext() == nullptr);
GenTree* otherOp = op1;
if (otherOp->OperGet() == GT_NEG)
{
otherOp = otherOp->AsOp()->gtOp1;
}
assert(otherOp->OperGet() == GT_CNS_INT);
assert(otherOp->AsIntCon()->gtFieldSeq == FieldSeqStore::NotAField());
otherOp->AsIntCon()->gtFieldSeq = op2->AsIntCon()->gtFieldSeq;
}
if (abs_mult == 1)
{
DEBUG_DESTROY_NODE(op2);
DEBUG_DESTROY_NODE(mul);
return op1;
}
// Change the multiplication into a shift by log2(val) bits.
op2->AsIntConCommon()->SetIconValue(genLog2(abs_mult));
changeToShift = true;
}
else if (mulShiftOpt && (lowestBit > 1) && jitIsScaleIndexMul(lowestBit))
{
int shift = genLog2(lowestBit);
ssize_t factor = abs_mult >> shift;
if (factor == 3 || factor == 5 || factor == 9)
{
// if negative negate (min-int does not need negation)
if (mult < 0 && mult != SSIZE_T_MIN)
{
op1 = gtNewOperNode(GT_NEG, genActualType(op1), op1);
mul->gtOp1 = op1;
fgMorphTreeDone(op1);
}
GenTree* factorIcon = gtNewIconNode(factor, mul->TypeGet());
if (op2IsConstIndex)
{
factorIcon->AsIntCon()->gtFieldSeq =
GetFieldSeqStore()->CreateSingleton(FieldSeqStore::ConstantIndexPseudoField);
}
// change the multiplication into a smaller multiplication (by 3, 5 or 9) and a shift
op1 = gtNewOperNode(GT_MUL, mul->TypeGet(), op1, factorIcon);
mul->gtOp1 = op1;
fgMorphTreeDone(op1);
op2->AsIntConCommon()->SetIconValue(shift);
changeToShift = true;
}
}
if (changeToShift)
{
fgUpdateConstTreeValueNumber(op2);
mul->ChangeOper(GT_LSH, GenTree::PRESERVE_VN);
return mul;
}
}
return nullptr;
}
//------------------------------------------------------------------------
// fgOptimizeBitwiseAnd: optimizes the "and" operation.
//
// Arguments:
// andOp - the GT_AND tree to optimize.
//
// Return Value:
// The optimized tree, currently always a relop, in case any transformations
// were performed. Otherwise, "nullptr", guaranteeing no state change.
//
GenTree* Compiler::fgOptimizeBitwiseAnd(GenTreeOp* andOp)
{
assert(andOp->OperIs(GT_AND));
assert(!optValnumCSE_phase);
GenTree* op1 = andOp->gtGetOp1();
GenTree* op2 = andOp->gtGetOp2();
// Fold "cmp & 1" to just "cmp".
if (andOp->TypeIs(TYP_INT) && op1->OperIsCompare() && op2->IsIntegralConst(1))
{
DEBUG_DESTROY_NODE(op2);
DEBUG_DESTROY_NODE(andOp);
return op1;
}
return nullptr;
}
//------------------------------------------------------------------------
// fgOptimizeRelationalComparisonWithCasts: Recognizes comparisons against
// various cast operands and tries to remove them. E.g.:
//
// * GE int
// +--* CAST long <- ulong <- uint
// | \--* X int
// \--* CNS_INT long
//
// to:
//
// * GE_un int
// +--* X int
// \--* CNS_INT int
//
// same for:
//
// * GE int
// +--* CAST long <- ulong <- uint
// | \--* X int
// \--* CAST long <- [u]long <- int
// \--* ARR_LEN int
//
// These patterns quite often show up along with index checks
//
// Arguments:
// cmp - the GT_LE/GT_LT/GT_GE/GT_GT tree to morph.
//
// Return Value:
// Returns the same tree where operands might have narrower types
//
// Notes:
// TODO-Casts: consider unifying this function with "optNarrowTree"
//
GenTree* Compiler::fgOptimizeRelationalComparisonWithCasts(GenTreeOp* cmp)
{
assert(cmp->OperIs(GT_LE, GT_LT, GT_GE, GT_GT));
assert(!optValnumCSE_phase);
GenTree* op1 = cmp->gtGetOp1();
GenTree* op2 = cmp->gtGetOp2();
// Caller is expected to call this function only if we have CAST nodes
assert(op1->OperIs(GT_CAST) || op2->OperIs(GT_CAST));
if (!op1->TypeIs(TYP_LONG))
{
// We can extend this logic to handle small types as well, but currently it's done mostly to
// assist range check elimination
return cmp;
}
GenTree* castOp;
GenTree* knownPositiveOp;
bool knownPositiveIsOp2;
if (op2->IsIntegralConst() || ((op2->OperIs(GT_CAST) && op2->AsCast()->CastOp()->OperIs(GT_ARR_LENGTH))))
{
// op2 is either a LONG constant or (T)ARR_LENGTH
knownPositiveIsOp2 = true;
castOp = cmp->gtGetOp1();
knownPositiveOp = cmp->gtGetOp2();
}
else
{
// op1 is either a LONG constant (yes, it's pretty normal for relops)
// or (T)ARR_LENGTH
castOp = cmp->gtGetOp2();
knownPositiveOp = cmp->gtGetOp1();
knownPositiveIsOp2 = false;
}
if (castOp->OperIs(GT_CAST) && varTypeIsLong(castOp->CastToType()) && castOp->AsCast()->CastOp()->TypeIs(TYP_INT) &&
castOp->IsUnsigned() && !castOp->gtOverflow())
{
bool knownPositiveFitsIntoU32 = false;
if (knownPositiveOp->IsIntegralConst() && FitsIn<UINT32>(knownPositiveOp->AsIntConCommon()->IntegralValue()))
{
// BTW, we can fold the whole condition if op2 doesn't fit into UINT_MAX.
knownPositiveFitsIntoU32 = true;
}
else if (knownPositiveOp->OperIs(GT_CAST) && varTypeIsLong(knownPositiveOp->CastToType()) &&
knownPositiveOp->AsCast()->CastOp()->OperIs(GT_ARR_LENGTH))
{
knownPositiveFitsIntoU32 = true;
// TODO-Casts: recognize Span.Length here as well.
}
if (!knownPositiveFitsIntoU32)
{
return cmp;
}
JITDUMP("Removing redundant cast(s) for:\n")
DISPTREE(cmp)
JITDUMP("\n\nto:\n\n")
cmp->SetUnsigned();
// Drop cast from castOp
if (knownPositiveIsOp2)
{
cmp->gtOp1 = castOp->AsCast()->CastOp();
}
else
{
cmp->gtOp2 = castOp->AsCast()->CastOp();
}
DEBUG_DESTROY_NODE(castOp);
if (knownPositiveOp->OperIs(GT_CAST))
{
// Drop cast from knownPositiveOp too
if (knownPositiveIsOp2)
{
cmp->gtOp2 = knownPositiveOp->AsCast()->CastOp();
}
else
{
cmp->gtOp1 = knownPositiveOp->AsCast()->CastOp();
}
DEBUG_DESTROY_NODE(knownPositiveOp);
}
else
{
// Change type for constant from LONG to INT
knownPositiveOp->ChangeType(TYP_INT);
#ifndef TARGET_64BIT
assert(knownPositiveOp->OperIs(GT_CNS_LNG));
knownPositiveOp->BashToConst(static_cast<int>(knownPositiveOp->AsIntConCommon()->IntegralValue()));
#endif
fgUpdateConstTreeValueNumber(knownPositiveOp);
}
DISPTREE(cmp)
JITDUMP("\n")
}
return cmp;
}
// fgOptimizeBitwiseXor: optimizes the "xor" operation.
//
// Arguments:
// xorOp - the GT_XOR tree to optimize.
//
// Return Value:
// The optimized tree, currently always a local variable, in case any transformations
// were performed. Otherwise, "nullptr", guaranteeing no state change.
//
GenTree* Compiler::fgOptimizeBitwiseXor(GenTreeOp* xorOp)
{
assert(xorOp->OperIs(GT_XOR));
assert(!optValnumCSE_phase);
GenTree* op1 = xorOp->gtGetOp1();
GenTree* op2 = xorOp->gtGetOp2();
if (op2->IsIntegralConst(0))
{
/* "x ^ 0" is "x" */
DEBUG_DESTROY_NODE(xorOp, op2);
return op1;
}
else if (op2->IsIntegralConst(-1))
{
/* "x ^ -1" is "~x" */
xorOp->ChangeOper(GT_NOT);
xorOp->gtOp2 = nullptr;
DEBUG_DESTROY_NODE(op2);
return xorOp;
}
else if (op2->IsIntegralConst(1) && op1->OperIsCompare())
{
/* "binaryVal ^ 1" is "!binaryVal" */
gtReverseCond(op1);
DEBUG_DESTROY_NODE(op2);
DEBUG_DESTROY_NODE(xorOp);
return op1;
}
return nullptr;
}
//------------------------------------------------------------------------
// fgPropagateCommaThrow: propagate a "comma throw" up the tree.
//
// "Comma throws" in the compiler represent the canonical form of an always
// throwing expression. They have the shape of COMMA(THROW, ZERO), to satisfy
// the semantic that the original expression produced some value and are
// generated by "gtFoldExprConst" when it encounters checked arithmetic that
// will determinably overflow.
//
// In the global morphing phase, "comma throws" are "propagated" up the tree,
// in post-order, to eliminate nodes that will never execute. This method,
// called by "fgMorphSmpOp", encapsulates this optimization.
//
// Arguments:
// parent - the node currently being processed.
// commaThrow - the comma throw in question, "parent"'s operand.
// precedingSideEffects - side effects of nodes preceding "comma" in execution order.
//
// Return Value:
// If "parent" is to be replaced with a comma throw, i. e. the propagation was successful,
// the new "parent", otherwise "nullptr", guaranteeing no state change, with one exception:
// the "fgRemoveRestOfBlock" "global" may be set. Note that the new returned tree does not
// have to be a "comma throw", it can be "bare" throw call if the "parent" node did not
// produce any value.
//
// Notes:
// "Comma throws" are very rare.
//
GenTree* Compiler::fgPropagateCommaThrow(GenTree* parent, GenTreeOp* commaThrow, GenTreeFlags precedingSideEffects)
{
// Comma throw propagation does not preserve VNs, and deletes nodes.
assert(fgGlobalMorph);
assert(fgIsCommaThrow(commaThrow));
if ((commaThrow->gtFlags & GTF_COLON_COND) == 0)
{
fgRemoveRestOfBlock = true;
}
if ((precedingSideEffects & GTF_ALL_EFFECT) == 0)
{
if (parent->TypeIs(TYP_VOID))
{
// Return the throw node as the new tree.
return commaThrow->gtGetOp1();
}
// Fix up the COMMA's type if needed.
if (genActualType(parent) != genActualType(commaThrow))
{
commaThrow->gtGetOp2()->BashToZeroConst(genActualType(parent));
commaThrow->ChangeType(genActualType(parent));
}
return commaThrow;
}
return nullptr;
}
//----------------------------------------------------------------------------------------------
// fgMorphRetInd: Try to get rid of extra IND(ADDR()) pairs in a return tree.
//
// Arguments:
// node - The return node that uses an indirection.
//
// Return Value:
// the original op1 of the ret if there was no optimization or an optimized new op1.
//
GenTree* Compiler::fgMorphRetInd(GenTreeUnOp* ret)
{
assert(ret->OperIs(GT_RETURN));
assert(ret->gtGetOp1()->OperIs(GT_IND, GT_BLK, GT_OBJ));
GenTreeIndir* ind = ret->gtGetOp1()->AsIndir();
GenTree* addr = ind->Addr();
if (addr->OperIs(GT_ADDR) && addr->gtGetOp1()->OperIs(GT_LCL_VAR))
{
// If struct promotion was undone, adjust the annotations
if (fgGlobalMorph && fgMorphImplicitByRefArgs(addr))
{
return ind;
}
// If `return` retypes LCL_VAR as a smaller struct it should not set `doNotEnregister` on that
// LclVar.
// Example: in `Vector128:AsVector2` we have RETURN SIMD8(OBJ SIMD8(ADDR byref(LCL_VAR SIMD16))).
GenTreeLclVar* lclVar = addr->gtGetOp1()->AsLclVar();
if (!lvaIsImplicitByRefLocal(lclVar->GetLclNum()))
{
assert(!gtIsActiveCSE_Candidate(addr) && !gtIsActiveCSE_Candidate(ind));
unsigned indSize;
if (ind->OperIs(GT_IND))
{
indSize = genTypeSize(ind);
}
else
{
indSize = ind->AsBlk()->GetLayout()->GetSize();
}
LclVarDsc* varDsc = lvaGetDesc(lclVar);
unsigned lclVarSize;
if (!lclVar->TypeIs(TYP_STRUCT))
{
lclVarSize = genTypeSize(varDsc->TypeGet());
}
else
{
lclVarSize = varDsc->lvExactSize;
}
// TODO: change conditions in `canFold` to `indSize <= lclVarSize`, but currently do not support `BITCAST
// int<-SIMD16` etc.
assert((indSize <= lclVarSize) || varDsc->lvDoNotEnregister);
#if defined(TARGET_64BIT)
bool canFold = (indSize == lclVarSize);
#else // !TARGET_64BIT
// TODO: improve 32 bit targets handling for LONG returns if necessary, nowadays we do not support `BITCAST
// long<->double` there.
bool canFold = (indSize == lclVarSize) && (lclVarSize <= REGSIZE_BYTES);
#endif
// TODO: support `genReturnBB != nullptr`, it requires #11413 to avoid `Incompatible types for
// gtNewTempAssign`.
if (canFold && (genReturnBB == nullptr))
{
// Fold (TYPE1)*(&(TYPE2)x) even if types do not match, lowering will handle it.
// Getting rid of this IND(ADDR()) pair allows to keep lclVar as not address taken
// and enregister it.
DEBUG_DESTROY_NODE(ind);
DEBUG_DESTROY_NODE(addr);
ret->gtOp1 = lclVar;
// We use GTF_DONT_CSE as an "is under GT_ADDR" check. We can
// get rid of it now since the GT_RETURN node should never have
// its address taken.
assert((ret->gtFlags & GTF_DONT_CSE) == 0);
lclVar->gtFlags &= ~GTF_DONT_CSE;
return lclVar;
}
else if (!varDsc->lvDoNotEnregister)
{
lvaSetVarDoNotEnregister(lclVar->GetLclNum() DEBUGARG(DoNotEnregisterReason::BlockOpRet));
}
}
}
return ind;
}
#ifdef _PREFAST_
#pragma warning(pop)
#endif
GenTree* Compiler::fgMorphSmpOpOptional(GenTreeOp* tree)
{
genTreeOps oper = tree->gtOper;
GenTree* op1 = tree->gtOp1;
GenTree* op2 = tree->gtOp2;
var_types typ = tree->TypeGet();
if (fgGlobalMorph && GenTree::OperIsCommutative(oper))
{
/* Swap the operands so that the more expensive one is 'op1' */
if (tree->gtFlags & GTF_REVERSE_OPS)
{
tree->gtOp1 = op2;
tree->gtOp2 = op1;
op2 = op1;
op1 = tree->gtOp1;
tree->gtFlags &= ~GTF_REVERSE_OPS;
}
if (oper == op2->gtOper)
{
/* Reorder nested operators at the same precedence level to be
left-recursive. For example, change "(a+(b+c))" to the
equivalent expression "((a+b)+c)".
*/
/* Things are handled differently for floating-point operators */
if (!varTypeIsFloating(tree->TypeGet()))
{
fgMoveOpsLeft(tree);
op1 = tree->gtOp1;
op2 = tree->gtOp2;
}
}
}
#if REARRANGE_ADDS
/* Change "((x+icon)+y)" to "((x+y)+icon)"
Don't reorder floating-point operations */
if (fgGlobalMorph && (oper == GT_ADD) && !tree->gtOverflow() && (op1->gtOper == GT_ADD) && !op1->gtOverflow() &&
varTypeIsIntegralOrI(typ))
{
GenTree* ad1 = op1->AsOp()->gtOp1;
GenTree* ad2 = op1->AsOp()->gtOp2;
if (!op2->OperIsConst() && ad2->OperIsConst())
{
// This takes
// + (tree)
// / \.
// / \.
// / \.
// + (op1) op2
// / \.
// / \.
// ad1 ad2
//
// and it swaps ad2 and op2.
// Don't create a byref pointer that may point outside of the ref object.
// If a GC happens, the byref won't get updated. This can happen if one
// of the int components is negative. It also requires the address generation
// be in a fully-interruptible code region.
if (!varTypeIsGC(ad1->TypeGet()) && !varTypeIsGC(op2->TypeGet()))
{
tree->gtOp2 = ad2;
op1->AsOp()->gtOp2 = op2;
op1->gtFlags |= op2->gtFlags & GTF_ALL_EFFECT;
op2 = tree->gtOp2;
}
}
}
#endif
/*-------------------------------------------------------------------------
* Perform optional oper-specific postorder morphing
*/
switch (oper)
{
case GT_ASG:
// Make sure we're allowed to do this.
if (optValnumCSE_phase)
{
// It is not safe to reorder/delete CSE's
break;
}
if (varTypeIsStruct(typ) && !tree->IsPhiDefn())
{
if (tree->OperIsCopyBlkOp())
{
return fgMorphCopyBlock(tree);
}
else
{
return fgMorphInitBlock(tree);
}
}
if (typ == TYP_LONG)
{
break;
}
if (op2->gtFlags & GTF_ASG)
{
break;
}
if ((op2->gtFlags & GTF_CALL) && (op1->gtFlags & GTF_ALL_EFFECT))
{
break;
}
/* Special case: a cast that can be thrown away */
// TODO-Cleanup: fgMorphSmp does a similar optimization. However, it removes only
// one cast and sometimes there is another one after it that gets removed by this
// code. fgMorphSmp should be improved to remove all redundant casts so this code
// can be removed.
if (op1->gtOper == GT_IND && op2->gtOper == GT_CAST && !op2->gtOverflow())
{
var_types srct;
var_types cast;
var_types dstt;
srct = op2->AsCast()->CastOp()->TypeGet();
cast = (var_types)op2->CastToType();
dstt = op1->TypeGet();
/* Make sure these are all ints and precision is not lost */
if (genTypeSize(cast) >= genTypeSize(dstt) && dstt <= TYP_INT && srct <= TYP_INT)
{
op2 = tree->gtOp2 = op2->AsCast()->CastOp();
}
}
break;
case GT_MUL:
/* Check for the case "(val + icon) * icon" */
if (op2->gtOper == GT_CNS_INT && op1->gtOper == GT_ADD)
{
GenTree* add = op1->AsOp()->gtOp2;
if (add->IsCnsIntOrI() && (op2->GetScaleIndexMul() != 0))
{
if (tree->gtOverflow() || op1->gtOverflow())
{
break;
}
ssize_t imul = op2->AsIntCon()->gtIconVal;
ssize_t iadd = add->AsIntCon()->gtIconVal;
/* Change '(val + iadd) * imul' -> '(val * imul) + (iadd * imul)' */
oper = GT_ADD;
tree->ChangeOper(oper);
op2->AsIntCon()->SetValueTruncating(iadd * imul);
op1->ChangeOper(GT_MUL);
add->AsIntCon()->SetIconValue(imul);
}
}
break;
case GT_DIV:
/* For "val / 1", just return "val" */
if (op2->IsIntegralConst(1))
{
DEBUG_DESTROY_NODE(tree);
return op1;
}
break;
case GT_UDIV:
case GT_UMOD:
tree->CheckDivideByConstOptimized(this);
break;
case GT_LSH:
/* Check for the case "(val + icon) << icon" */
if (!optValnumCSE_phase && op2->IsCnsIntOrI() && op1->gtOper == GT_ADD && !op1->gtOverflow())
{
GenTree* cns = op1->AsOp()->gtOp2;
if (cns->IsCnsIntOrI() && (op2->GetScaleIndexShf() != 0))
{
ssize_t ishf = op2->AsIntConCommon()->IconValue();
ssize_t iadd = cns->AsIntConCommon()->IconValue();
// printf("Changing '(val+icon1)<<icon2' into '(val<<icon2+icon1<<icon2)'\n");
/* Change "(val + iadd) << ishf" into "(val<<ishf + iadd<<ishf)" */
tree->ChangeOper(GT_ADD);
// we are reusing the shift amount node here, but the type we want is that of the shift result
op2->gtType = op1->gtType;
op2->AsIntConCommon()->SetValueTruncating(iadd << ishf);
if (cns->gtOper == GT_CNS_INT && cns->AsIntCon()->gtFieldSeq != nullptr &&
cns->AsIntCon()->gtFieldSeq->IsConstantIndexFieldSeq())
{
assert(cns->AsIntCon()->gtFieldSeq->GetNext() == nullptr);
op2->AsIntCon()->gtFieldSeq = cns->AsIntCon()->gtFieldSeq;
}
op1->ChangeOper(GT_LSH);
cns->AsIntConCommon()->SetIconValue(ishf);
}
}
break;
case GT_INIT_VAL:
// Initialization values for initBlk have special semantics - their lower
// byte is used to fill the struct. However, we allow 0 as a "bare" value,
// which enables them to get a VNForZero, and be propagated.
if (op1->IsIntegralConst(0))
{
return op1;
}
break;
default:
break;
}
return tree;
}
#if defined(FEATURE_SIMD) || defined(FEATURE_HW_INTRINSICS)
//------------------------------------------------------------------------
// fgMorphMultiOp: Morph a GenTreeMultiOp (SIMD/HWINTRINSIC) tree.
//
// Arguments:
// multiOp - The tree to morph
//
// Return Value:
// The fully morphed tree.
//
GenTree* Compiler::fgMorphMultiOp(GenTreeMultiOp* multiOp)
{
gtUpdateNodeOperSideEffects(multiOp);
bool dontCseConstArguments = false;
#if defined(FEATURE_HW_INTRINSICS)
// Opportunistically, avoid unexpected CSE for hw intrinsics with IMM arguments
if (multiOp->OperIs(GT_HWINTRINSIC))
{
NamedIntrinsic hwIntrinsic = multiOp->AsHWIntrinsic()->GetHWIntrinsicId();
#if defined(TARGET_XARCH)
if (HWIntrinsicInfo::lookupCategory(hwIntrinsic) == HW_Category_IMM)
{
dontCseConstArguments = true;
}
#elif defined(TARGET_ARMARCH)
if (HWIntrinsicInfo::HasImmediateOperand(hwIntrinsic))
{
dontCseConstArguments = true;
}
#endif
}
#endif
for (GenTree** use : multiOp->UseEdges())
{
*use = fgMorphTree(*use);
GenTree* operand = *use;
multiOp->gtFlags |= (operand->gtFlags & GTF_ALL_EFFECT);
if (dontCseConstArguments && operand->OperIsConst())
{
operand->SetDoNotCSE();
}
// Promoted structs after morph must be in one of two states:
// a) Fully eliminated from the IR (independent promotion) OR only be
// used by "special" nodes (e. g. LHS of ASGs for multi-reg structs).
// b) Marked as do-not-enregister (dependent promotion).
//
// So here we preserve this invariant and mark any promoted structs as do-not-enreg.
//
if (operand->OperIs(GT_LCL_VAR) && lvaGetDesc(operand->AsLclVar())->lvPromoted)
{
lvaSetVarDoNotEnregister(operand->AsLclVar()->GetLclNum()
DEBUGARG(DoNotEnregisterReason::SimdUserForcesDep));
}
}
#if defined(FEATURE_HW_INTRINSICS)
if (opts.OptimizationEnabled() && multiOp->OperIs(GT_HWINTRINSIC))
{
GenTreeHWIntrinsic* hw = multiOp->AsHWIntrinsic();
switch (hw->GetHWIntrinsicId())
{
#if defined(TARGET_XARCH)
case NI_SSE_Xor:
case NI_SSE2_Xor:
case NI_AVX_Xor:
case NI_AVX2_Xor:
{
// Transform XOR(X, 0) to X for vectors
GenTree* op1 = hw->Op(1);
GenTree* op2 = hw->Op(2);
if (!gtIsActiveCSE_Candidate(hw))
{
if (op1->IsIntegralConstVector(0) && !gtIsActiveCSE_Candidate(op1))
{
DEBUG_DESTROY_NODE(hw);
DEBUG_DESTROY_NODE(op1);
return op2;
}
if (op2->IsIntegralConstVector(0) && !gtIsActiveCSE_Candidate(op2))
{
DEBUG_DESTROY_NODE(hw);
DEBUG_DESTROY_NODE(op2);
return op1;
}
}
break;
}
#endif
case NI_Vector128_Create:
#if defined(TARGET_XARCH)
case NI_Vector256_Create:
#elif defined(TARGET_ARMARCH)
case NI_Vector64_Create:
#endif
{
bool hwAllArgsAreConst = true;
for (GenTree** use : multiOp->UseEdges())
{
if (!(*use)->OperIsConst())
{
hwAllArgsAreConst = false;
break;
}
}
// Avoid unexpected CSE for constant arguments for Vector_.Create
// but only if all arguments are constants.
if (hwAllArgsAreConst)
{
for (GenTree** use : multiOp->UseEdges())
{
(*use)->SetDoNotCSE();
}
}
}
break;
default:
break;
}
}
#endif // defined(FEATURE_HW_INTRINSICS) && defined(TARGET_XARCH)
#ifdef FEATURE_HW_INTRINSICS
if (multiOp->OperIsHWIntrinsic() && !optValnumCSE_phase)
{
return fgOptimizeHWIntrinsic(multiOp->AsHWIntrinsic());
}
#endif
return multiOp;
}
#endif // defined(FEATURE_SIMD) || defined(FEATURE_HW_INTRINSICS)
//------------------------------------------------------------------------
// fgMorphModToSubMulDiv: Transform a % b into the equivalent a - (a / b) * b
// (see ECMA III 3.55 and III.3.56).
//
// Arguments:
// tree - The GT_MOD/GT_UMOD tree to morph
//
// Returns:
// The morphed tree
//
// Notes:
// For ARM64 we don't have a remainder instruction so this transform is
// always done. For XARCH this transform is done if we know that magic
// division will be used, in that case this transform allows CSE to
// eliminate the redundant div from code like "x = a / 3; y = a % 3;".
//
GenTree* Compiler::fgMorphModToSubMulDiv(GenTreeOp* tree)
{
JITDUMP("\nMorphing MOD/UMOD [%06u] to Sub/Mul/Div\n", dspTreeID(tree));
if (tree->OperGet() == GT_MOD)
{
tree->SetOper(GT_DIV);
}
else if (tree->OperGet() == GT_UMOD)
{
tree->SetOper(GT_UDIV);
}
else
{
noway_assert(!"Illegal gtOper in fgMorphModToSubMulDiv");
}
var_types type = tree->gtType;
GenTree* const copyOfNumeratorValue = fgMakeMultiUse(&tree->gtOp1);
GenTree* const copyOfDenominatorValue = fgMakeMultiUse(&tree->gtOp2);
GenTree* const mul = gtNewOperNode(GT_MUL, type, tree, copyOfDenominatorValue);
GenTree* const sub = gtNewOperNode(GT_SUB, type, copyOfNumeratorValue, mul);
// Ensure "sub" does not evaluate "copyOfNumeratorValue" before it is defined by "mul".
//
sub->gtFlags |= GTF_REVERSE_OPS;
#ifdef DEBUG
sub->gtDebugFlags |= GTF_DEBUG_NODE_MORPHED;
#endif
tree->CheckDivideByConstOptimized(this);
return sub;
}
//------------------------------------------------------------------------
// fgMorphUModToAndSub: Transform a % b into the equivalent a & (b - 1).
// '%' must be unsigned (GT_UMOD).
// 'a' and 'b' must be integers.
// 'b' must be a constant and a power of two.
//
// Arguments:
// tree - The GT_UMOD tree to morph
//
// Returns:
// The morphed tree
//
// Notes:
// This is more optimized than calling fgMorphModToSubMulDiv.
//
GenTree* Compiler::fgMorphUModToAndSub(GenTreeOp* tree)
{
JITDUMP("\nMorphing UMOD [%06u] to And/Sub\n", dspTreeID(tree));
assert(tree->OperIs(GT_UMOD));
assert(tree->gtOp2->IsIntegralConstUnsignedPow2());
const var_types type = tree->TypeGet();
const size_t cnsValue = (static_cast<size_t>(tree->gtOp2->AsIntConCommon()->IntegralValue())) - 1;
GenTree* const newTree = gtNewOperNode(GT_AND, type, tree->gtOp1, gtNewIconNode(cnsValue, type));
INDEBUG(newTree->gtDebugFlags |= GTF_DEBUG_NODE_MORPHED);
DEBUG_DESTROY_NODE(tree->gtOp2);
DEBUG_DESTROY_NODE(tree);
return newTree;
}
//------------------------------------------------------------------------------
// fgOperIsBitwiseRotationRoot : Check if the operation can be a root of a bitwise rotation tree.
//
//
// Arguments:
// oper - Operation to check
//
// Return Value:
// True if the operation can be a root of a bitwise rotation tree; false otherwise.
bool Compiler::fgOperIsBitwiseRotationRoot(genTreeOps oper)
{
return (oper == GT_OR) || (oper == GT_XOR);
}
//------------------------------------------------------------------------------
// fgRecognizeAndMorphBitwiseRotation : Check if the tree represents a left or right rotation. If so, return
// an equivalent GT_ROL or GT_ROR tree; otherwise, return the original tree.
//
// Arguments:
// tree - tree to check for a rotation pattern
//
// Return Value:
// An equivalent GT_ROL or GT_ROR tree if a pattern is found; "nullptr" otherwise.
//
// Assumption:
// The input is a GT_OR or a GT_XOR tree.
GenTree* Compiler::fgRecognizeAndMorphBitwiseRotation(GenTree* tree)
{
//
// Check for a rotation pattern, e.g.,
//
// OR ROL
// / \ / \.
// LSH RSZ -> x y
// / \ / \.
// x AND x AND
// / \ / \.
// y 31 ADD 31
// / \.
// NEG 32
// |
// y
// The patterns recognized:
// (x << (y & M)) op (x >>> ((-y + N) & M))
// (x >>> ((-y + N) & M)) op (x << (y & M))
//
// (x << y) op (x >>> (-y + N))
// (x >> > (-y + N)) op (x << y)
//
// (x >>> (y & M)) op (x << ((-y + N) & M))
// (x << ((-y + N) & M)) op (x >>> (y & M))
//
// (x >>> y) op (x << (-y + N))
// (x << (-y + N)) op (x >>> y)
//
// (x << c1) op (x >>> c2)
// (x >>> c1) op (x << c2)
//
// where
// c1 and c2 are const
// c1 + c2 == bitsize(x)
// N == bitsize(x)
// M is const
// M & (N - 1) == N - 1
// op is either | or ^
if (((tree->gtFlags & GTF_PERSISTENT_SIDE_EFFECTS) != 0) || ((tree->gtFlags & GTF_ORDER_SIDEEFF) != 0))
{
// We can't do anything if the tree has assignments, calls, or volatile
// reads. Note that we allow GTF_EXCEPT side effect since any exceptions
// thrown by the original tree will be thrown by the transformed tree as well.
return nullptr;
}
genTreeOps oper = tree->OperGet();
assert(fgOperIsBitwiseRotationRoot(oper));
// Check if we have an LSH on one side of the OR and an RSZ on the other side.
GenTree* op1 = tree->gtGetOp1();
GenTree* op2 = tree->gtGetOp2();
GenTree* leftShiftTree = nullptr;
GenTree* rightShiftTree = nullptr;
if ((op1->OperGet() == GT_LSH) && (op2->OperGet() == GT_RSZ))
{
leftShiftTree = op1;
rightShiftTree = op2;
}
else if ((op1->OperGet() == GT_RSZ) && (op2->OperGet() == GT_LSH))
{
leftShiftTree = op2;
rightShiftTree = op1;
}
else
{
return nullptr;
}
// Check if the trees representing the value to shift are identical.
// We already checked that there are no side effects above.
if (GenTree::Compare(leftShiftTree->gtGetOp1(), rightShiftTree->gtGetOp1()))
{
GenTree* rotatedValue = leftShiftTree->gtGetOp1();
var_types rotatedValueActualType = genActualType(rotatedValue->gtType);
ssize_t rotatedValueBitSize = genTypeSize(rotatedValueActualType) * 8;
noway_assert((rotatedValueBitSize == 32) || (rotatedValueBitSize == 64));
GenTree* leftShiftIndex = leftShiftTree->gtGetOp2();
GenTree* rightShiftIndex = rightShiftTree->gtGetOp2();
// The shift index may be masked. At least (rotatedValueBitSize - 1) lower bits
// shouldn't be masked for the transformation to be valid. If additional
// higher bits are not masked, the transformation is still valid since the result
// of MSIL shift instructions is unspecified if the shift amount is greater or equal
// than the width of the value being shifted.
ssize_t minimalMask = rotatedValueBitSize - 1;
ssize_t leftShiftMask = -1;
ssize_t rightShiftMask = -1;
if ((leftShiftIndex->OperGet() == GT_AND))
{
if (leftShiftIndex->gtGetOp2()->IsCnsIntOrI())
{
leftShiftMask = leftShiftIndex->gtGetOp2()->AsIntCon()->gtIconVal;
leftShiftIndex = leftShiftIndex->gtGetOp1();
}
else
{
return nullptr;
}
}
if ((rightShiftIndex->OperGet() == GT_AND))
{
if (rightShiftIndex->gtGetOp2()->IsCnsIntOrI())
{
rightShiftMask = rightShiftIndex->gtGetOp2()->AsIntCon()->gtIconVal;
rightShiftIndex = rightShiftIndex->gtGetOp1();
}
else
{
return nullptr;
}
}
if (((minimalMask & leftShiftMask) != minimalMask) || ((minimalMask & rightShiftMask) != minimalMask))
{
// The shift index is overmasked, e.g., we have
// something like (x << y & 15) or
// (x >> (32 - y) & 15 with 32 bit x.
// The transformation is not valid.
return nullptr;
}
GenTree* shiftIndexWithAdd = nullptr;
GenTree* shiftIndexWithoutAdd = nullptr;
genTreeOps rotateOp = GT_NONE;
GenTree* rotateIndex = nullptr;
if (leftShiftIndex->OperGet() == GT_ADD)
{
shiftIndexWithAdd = leftShiftIndex;
shiftIndexWithoutAdd = rightShiftIndex;
rotateOp = GT_ROR;
}
else if (rightShiftIndex->OperGet() == GT_ADD)
{
shiftIndexWithAdd = rightShiftIndex;
shiftIndexWithoutAdd = leftShiftIndex;
rotateOp = GT_ROL;
}
if (shiftIndexWithAdd != nullptr)
{
if (shiftIndexWithAdd->gtGetOp2()->IsCnsIntOrI())
{
if (shiftIndexWithAdd->gtGetOp2()->AsIntCon()->gtIconVal == rotatedValueBitSize)
{
if (shiftIndexWithAdd->gtGetOp1()->OperGet() == GT_NEG)
{
if (GenTree::Compare(shiftIndexWithAdd->gtGetOp1()->gtGetOp1(), shiftIndexWithoutAdd))
{
// We found one of these patterns:
// (x << (y & M)) | (x >>> ((-y + N) & M))
// (x << y) | (x >>> (-y + N))
// (x >>> (y & M)) | (x << ((-y + N) & M))
// (x >>> y) | (x << (-y + N))
// where N == bitsize(x), M is const, and
// M & (N - 1) == N - 1
CLANG_FORMAT_COMMENT_ANCHOR;
#ifndef TARGET_64BIT
if (!shiftIndexWithoutAdd->IsCnsIntOrI() && (rotatedValueBitSize == 64))
{
// TODO-X86-CQ: we need to handle variable-sized long shifts specially on x86.
// GT_LSH, GT_RSH, and GT_RSZ have helpers for this case. We may need
// to add helpers for GT_ROL and GT_ROR.
return nullptr;
}
#endif
rotateIndex = shiftIndexWithoutAdd;
}
}
}
}
}
else if ((leftShiftIndex->IsCnsIntOrI() && rightShiftIndex->IsCnsIntOrI()))
{
if (leftShiftIndex->AsIntCon()->gtIconVal + rightShiftIndex->AsIntCon()->gtIconVal == rotatedValueBitSize)
{
// We found this pattern:
// (x << c1) | (x >>> c2)
// where c1 and c2 are const and c1 + c2 == bitsize(x)
rotateOp = GT_ROL;
rotateIndex = leftShiftIndex;
}
}
if (rotateIndex != nullptr)
{
noway_assert(GenTree::OperIsRotate(rotateOp));
GenTreeFlags inputTreeEffects = tree->gtFlags & GTF_ALL_EFFECT;
// We can use the same tree only during global morph; reusing the tree in a later morph
// may invalidate value numbers.
if (fgGlobalMorph)
{
tree->AsOp()->gtOp1 = rotatedValue;
tree->AsOp()->gtOp2 = rotateIndex;
tree->ChangeOper(rotateOp);
unsigned childFlags = 0;
for (GenTree* op : tree->Operands())
{
childFlags |= (op->gtFlags & GTF_ALL_EFFECT);
}
// The parent's flags should be a superset of its operands' flags
noway_assert((inputTreeEffects & childFlags) == childFlags);
}
else
{
tree = gtNewOperNode(rotateOp, rotatedValueActualType, rotatedValue, rotateIndex);
noway_assert(inputTreeEffects == (tree->gtFlags & GTF_ALL_EFFECT));
}
return tree;
}
}
return nullptr;
}
#if !defined(TARGET_64BIT)
//------------------------------------------------------------------------------
// fgRecognizeAndMorphLongMul : Check for and morph long multiplication with 32 bit operands.
//
// Uses "GenTree::IsValidLongMul" to check for the long multiplication pattern. Will swap
// operands if the first one is a constant and the second one is not, even for trees which
// end up not being eligibile for long multiplication.
//
// Arguments:
// mul - GT_MUL tree to check for a long multiplication opportunity
//
// Return Value:
// The original tree, with operands possibly swapped, if it is not eligible for long multiplication.
// Tree with GTF_MUL_64RSLT set, side effect flags propagated, and children morphed if it is.
//
GenTreeOp* Compiler::fgRecognizeAndMorphLongMul(GenTreeOp* mul)
{
assert(mul->OperIs(GT_MUL));
assert(mul->TypeIs(TYP_LONG));
GenTree* op1 = mul->gtGetOp1();
GenTree* op2 = mul->gtGetOp2();
// "IsValidLongMul" and decomposition do not handle constant op1.
if (op1->IsIntegralConst())
{
std::swap(op1, op2);
mul->gtOp1 = op1;
mul->gtOp2 = op2;
}
if (!mul->IsValidLongMul())
{
return mul;
}
// MUL_LONG needs to do the work the casts would have done.
mul->ClearUnsigned();
if (op1->IsUnsigned())
{
mul->SetUnsigned();
}
// "IsValidLongMul" returned "true", so this GT_MUL cannot overflow.
mul->ClearOverflow();
mul->Set64RsltMul();
return fgMorphLongMul(mul);
}
//------------------------------------------------------------------------------
// fgMorphLongMul : Morphs GT_MUL nodes marked with GTF_MUL_64RSLT.
//
// Morphs *only* the operands of casts that compose the long mul to
// avoid them being folded aways.
//
// Arguments:
// mul - GT_MUL tree to morph operands of
//
// Return Value:
// The original tree, with operands morphed and flags propagated.
//
GenTreeOp* Compiler::fgMorphLongMul(GenTreeOp* mul)
{
INDEBUG(mul->DebugCheckLongMul());
GenTree* op1 = mul->gtGetOp1();
GenTree* op2 = mul->gtGetOp2();
// Morph the operands. We cannot allow the casts to go away, so we morph their operands directly.
op1->AsCast()->CastOp() = fgMorphTree(op1->AsCast()->CastOp());
op1->SetAllEffectsFlags(op1->AsCast()->CastOp());
if (op2->OperIs(GT_CAST))
{
op2->AsCast()->CastOp() = fgMorphTree(op2->AsCast()->CastOp());
op2->SetAllEffectsFlags(op2->AsCast()->CastOp());
}
mul->SetAllEffectsFlags(op1, op2);
op1->SetDoNotCSE();
op2->SetDoNotCSE();
return mul;
}
#endif // !defined(TARGET_64BIT)
/*****************************************************************************
*
* Transform the given tree for code generation and return an equivalent tree.
*/
GenTree* Compiler::fgMorphTree(GenTree* tree, MorphAddrContext* mac)
{
assert(tree);
#ifdef DEBUG
if (verbose)
{
if ((unsigned)JitConfig.JitBreakMorphTree() == tree->gtTreeID)
{
noway_assert(!"JitBreakMorphTree hit");
}
}
#endif
#ifdef DEBUG
int thisMorphNum = 0;
if (verbose && treesBeforeAfterMorph)
{
thisMorphNum = morphNum++;
printf("\nfgMorphTree (before %d):\n", thisMorphNum);
gtDispTree(tree);
}
#endif
if (fgGlobalMorph)
{
// Apply any rewrites for implicit byref arguments before morphing the
// tree.
if (fgMorphImplicitByRefArgs(tree))
{
#ifdef DEBUG
if (verbose && treesBeforeAfterMorph)
{
printf("\nfgMorphTree (%d), after implicit-byref rewrite:\n", thisMorphNum);
gtDispTree(tree);
}
#endif
}
}
/*-------------------------------------------------------------------------
* fgMorphTree() can potentially replace a tree with another, and the
* caller has to store the return value correctly.
* Turn this on to always make copy of "tree" here to shake out
* hidden/unupdated references.
*/
#ifdef DEBUG
if (compStressCompile(STRESS_GENERIC_CHECK, 0))
{
GenTree* copy;
if (GenTree::s_gtNodeSizes[tree->gtOper] == TREE_NODE_SZ_SMALL)
{
copy = gtNewLargeOperNode(GT_ADD, TYP_INT);
}
else
{
copy = new (this, GT_CALL) GenTreeCall(TYP_INT);
}
copy->ReplaceWith(tree, this);
#if defined(LATE_DISASM)
// GT_CNS_INT is considered small, so ReplaceWith() won't copy all fields
if ((tree->gtOper == GT_CNS_INT) && tree->IsIconHandle())
{
copy->AsIntCon()->gtCompileTimeHandle = tree->AsIntCon()->gtCompileTimeHandle;
}
#endif
DEBUG_DESTROY_NODE(tree);
tree = copy;
}
#endif // DEBUG
if (fgGlobalMorph)
{
/* Ensure that we haven't morphed this node already */
assert(((tree->gtDebugFlags & GTF_DEBUG_NODE_MORPHED) == 0) && "ERROR: Already morphed this node!");
/* Before morphing the tree, we try to propagate any active assertions */
if (optLocalAssertionProp)
{
/* Do we have any active assertions? */
if (optAssertionCount > 0)
{
GenTree* newTree = tree;
while (newTree != nullptr)
{
tree = newTree;
/* newTree is non-Null if we propagated an assertion */
newTree = optAssertionProp(apFull, tree, nullptr, nullptr);
}
assert(tree != nullptr);
}
}
PREFAST_ASSUME(tree != nullptr);
}
/* Save the original un-morphed tree for fgMorphTreeDone */
GenTree* oldTree = tree;
/* Figure out what kind of a node we have */
unsigned kind = tree->OperKind();
/* Is this a constant node? */
if (tree->OperIsConst())
{
tree = fgMorphConst(tree);
goto DONE;
}
/* Is this a leaf node? */
if (kind & GTK_LEAF)
{
tree = fgMorphLeaf(tree);
goto DONE;
}
/* Is it a 'simple' unary/binary operator? */
if (kind & GTK_SMPOP)
{
tree = fgMorphSmpOp(tree, mac);
goto DONE;
}
/* See what kind of a special operator we have here */
switch (tree->OperGet())
{
case GT_CALL:
if (tree->OperMayThrow(this))
{
tree->gtFlags |= GTF_EXCEPT;
}
else
{
tree->gtFlags &= ~GTF_EXCEPT;
}
tree = fgMorphCall(tree->AsCall());
break;
#if defined(FEATURE_SIMD) || defined(FEATURE_HW_INTRINSICS)
#if defined(FEATURE_SIMD)
case GT_SIMD:
#endif
#if defined(FEATURE_HW_INTRINSICS)
case GT_HWINTRINSIC:
#endif
tree = fgMorphMultiOp(tree->AsMultiOp());
break;
#endif // defined(FEATURE_SIMD) || defined(FEATURE_HW_INTRINSICS)
case GT_ARR_ELEM:
tree->AsArrElem()->gtArrObj = fgMorphTree(tree->AsArrElem()->gtArrObj);
unsigned dim;
for (dim = 0; dim < tree->AsArrElem()->gtArrRank; dim++)
{
tree->AsArrElem()->gtArrInds[dim] = fgMorphTree(tree->AsArrElem()->gtArrInds[dim]);
}
tree->gtFlags &= ~GTF_CALL;
tree->gtFlags |= tree->AsArrElem()->gtArrObj->gtFlags & GTF_ALL_EFFECT;
for (dim = 0; dim < tree->AsArrElem()->gtArrRank; dim++)
{
tree->gtFlags |= tree->AsArrElem()->gtArrInds[dim]->gtFlags & GTF_ALL_EFFECT;
}
if (fgGlobalMorph)
{
fgSetRngChkTarget(tree, false);
}
break;
case GT_ARR_OFFSET:
tree->AsArrOffs()->gtOffset = fgMorphTree(tree->AsArrOffs()->gtOffset);
tree->AsArrOffs()->gtIndex = fgMorphTree(tree->AsArrOffs()->gtIndex);
tree->AsArrOffs()->gtArrObj = fgMorphTree(tree->AsArrOffs()->gtArrObj);
tree->gtFlags &= ~GTF_CALL;
tree->gtFlags |= tree->AsArrOffs()->gtOffset->gtFlags & GTF_ALL_EFFECT;
tree->gtFlags |= tree->AsArrOffs()->gtIndex->gtFlags & GTF_ALL_EFFECT;
tree->gtFlags |= tree->AsArrOffs()->gtArrObj->gtFlags & GTF_ALL_EFFECT;
if (fgGlobalMorph)
{
fgSetRngChkTarget(tree, false);
}
break;
case GT_PHI:
tree->gtFlags &= ~GTF_ALL_EFFECT;
for (GenTreePhi::Use& use : tree->AsPhi()->Uses())
{
use.SetNode(fgMorphTree(use.GetNode()));
tree->gtFlags |= use.GetNode()->gtFlags & GTF_ALL_EFFECT;
}
break;
case GT_FIELD_LIST:
tree->gtFlags &= ~GTF_ALL_EFFECT;
for (GenTreeFieldList::Use& use : tree->AsFieldList()->Uses())
{
use.SetNode(fgMorphTree(use.GetNode()));
tree->gtFlags |= (use.GetNode()->gtFlags & GTF_ALL_EFFECT);
}
break;
case GT_CMPXCHG:
tree->AsCmpXchg()->gtOpLocation = fgMorphTree(tree->AsCmpXchg()->gtOpLocation);
tree->AsCmpXchg()->gtOpValue = fgMorphTree(tree->AsCmpXchg()->gtOpValue);
tree->AsCmpXchg()->gtOpComparand = fgMorphTree(tree->AsCmpXchg()->gtOpComparand);
tree->gtFlags &= (~GTF_EXCEPT & ~GTF_CALL);
tree->gtFlags |= tree->AsCmpXchg()->gtOpLocation->gtFlags & GTF_ALL_EFFECT;
tree->gtFlags |= tree->AsCmpXchg()->gtOpValue->gtFlags & GTF_ALL_EFFECT;
tree->gtFlags |= tree->AsCmpXchg()->gtOpComparand->gtFlags & GTF_ALL_EFFECT;
break;
case GT_STORE_DYN_BLK:
tree = fgMorphStoreDynBlock(tree->AsStoreDynBlk());
break;
default:
#ifdef DEBUG
gtDispTree(tree);
#endif
noway_assert(!"unexpected operator");
}
DONE:
fgMorphTreeDone(tree, oldTree DEBUGARG(thisMorphNum));
return tree;
}
//------------------------------------------------------------------------
// fgKillDependentAssertionsSingle: Kill all assertions specific to lclNum
//
// Arguments:
// lclNum - The varNum of the lclVar for which we're killing assertions.
// tree - (DEBUG only) the tree responsible for killing its assertions.
//
void Compiler::fgKillDependentAssertionsSingle(unsigned lclNum DEBUGARG(GenTree* tree))
{
/* All dependent assertions are killed here */
ASSERT_TP killed = BitVecOps::MakeCopy(apTraits, GetAssertionDep(lclNum));
if (killed)
{
AssertionIndex index = optAssertionCount;
while (killed && (index > 0))
{
if (BitVecOps::IsMember(apTraits, killed, index - 1))
{
#ifdef DEBUG
AssertionDsc* curAssertion = optGetAssertion(index);
noway_assert((curAssertion->op1.lcl.lclNum == lclNum) ||
((curAssertion->op2.kind == O2K_LCLVAR_COPY) && (curAssertion->op2.lcl.lclNum == lclNum)));
if (verbose)
{
printf("\nThe assignment ");
printTreeID(tree);
printf(" using V%02u removes: ", curAssertion->op1.lcl.lclNum);
optPrintAssertion(curAssertion);
}
#endif
// Remove this bit from the killed mask
BitVecOps::RemoveElemD(apTraits, killed, index - 1);
optAssertionRemove(index);
}
index--;
}
// killed mask should now be zero
noway_assert(BitVecOps::IsEmpty(apTraits, killed));
}
}
//------------------------------------------------------------------------
// fgKillDependentAssertions: Kill all dependent assertions with regard to lclNum.
//
// Arguments:
// lclNum - The varNum of the lclVar for which we're killing assertions.
// tree - (DEBUG only) the tree responsible for killing its assertions.
//
// Notes:
// For structs and struct fields, it will invalidate the children and parent
// respectively.
// Calls fgKillDependentAssertionsSingle to kill the assertions for a single lclVar.
//
void Compiler::fgKillDependentAssertions(unsigned lclNum DEBUGARG(GenTree* tree))
{
LclVarDsc* varDsc = lvaGetDesc(lclNum);
if (varDsc->lvPromoted)
{
noway_assert(varTypeIsStruct(varDsc));
// Kill the field locals.
for (unsigned i = varDsc->lvFieldLclStart; i < varDsc->lvFieldLclStart + varDsc->lvFieldCnt; ++i)
{
fgKillDependentAssertionsSingle(i DEBUGARG(tree));
}
// Kill the struct local itself.
fgKillDependentAssertionsSingle(lclNum DEBUGARG(tree));
}
else if (varDsc->lvIsStructField)
{
// Kill the field local.
fgKillDependentAssertionsSingle(lclNum DEBUGARG(tree));
// Kill the parent struct.
fgKillDependentAssertionsSingle(varDsc->lvParentLcl DEBUGARG(tree));
}
else
{
fgKillDependentAssertionsSingle(lclNum DEBUGARG(tree));
}
}
/*****************************************************************************
*
* This function is called to complete the morphing of a tree node
* It should only be called once for each node.
* If DEBUG is defined the flag GTF_DEBUG_NODE_MORPHED is checked and updated,
* to enforce the invariant that each node is only morphed once.
* If local assertion prop is enabled the result tree may be replaced
* by an equivalent tree.
*
*/
void Compiler::fgMorphTreeDone(GenTree* tree,
GenTree* oldTree /* == NULL */
DEBUGARG(int morphNum))
{
#ifdef DEBUG
if (verbose && treesBeforeAfterMorph)
{
printf("\nfgMorphTree (after %d):\n", morphNum);
gtDispTree(tree);
printf(""); // in our logic this causes a flush
}
#endif
if (!fgGlobalMorph)
{
return;
}
if ((oldTree != nullptr) && (oldTree != tree))
{
/* Ensure that we have morphed this node */
assert((tree->gtDebugFlags & GTF_DEBUG_NODE_MORPHED) && "ERROR: Did not morph this node!");
#ifdef DEBUG
TransferTestDataToNode(oldTree, tree);
#endif
}
else
{
// Ensure that we haven't morphed this node already
assert(((tree->gtDebugFlags & GTF_DEBUG_NODE_MORPHED) == 0) && "ERROR: Already morphed this node!");
}
if (tree->OperIsConst())
{
goto DONE;
}
if (!optLocalAssertionProp)
{
goto DONE;
}
/* Do we have any active assertions? */
if (optAssertionCount > 0)
{
/* Is this an assignment to a local variable */
GenTreeLclVarCommon* lclVarTree = nullptr;
// The check below will miss LIR-style assignments.
//
// But we shouldn't be running local assertion prop on these,
// as local prop gets disabled when we run global prop.
assert(!tree->OperIs(GT_STORE_LCL_VAR, GT_STORE_LCL_FLD));
// DefinesLocal can return true for some BLK op uses, so
// check what gets assigned only when we're at an assignment.
if (tree->OperIsSsaDef() && tree->DefinesLocal(this, &lclVarTree))
{
unsigned lclNum = lclVarTree->GetLclNum();
noway_assert(lclNum < lvaCount);
fgKillDependentAssertions(lclNum DEBUGARG(tree));
}
}
/* If this tree makes a new assertion - make it available */
optAssertionGen(tree);
DONE:;
#ifdef DEBUG
/* Mark this node as being morphed */
tree->gtDebugFlags |= GTF_DEBUG_NODE_MORPHED;
#endif
}
//------------------------------------------------------------------------
// fgFoldConditional: try and fold conditionals and optimize BBJ_COND or
// BBJ_SWITCH blocks.
//
// Argumetns:
// block - block to examine
//
// Returns:
// FoldResult indicating what changes were made, if any
//
Compiler::FoldResult Compiler::fgFoldConditional(BasicBlock* block)
{
FoldResult result = FoldResult::FOLD_DID_NOTHING;
// We don't want to make any code unreachable
//
if (opts.OptimizationDisabled())
{
return result;
}
if (block->bbJumpKind == BBJ_COND)
{
noway_assert(block->bbStmtList != nullptr && block->bbStmtList->GetPrevStmt() != nullptr);
Statement* lastStmt = block->lastStmt();
noway_assert(lastStmt->GetNextStmt() == nullptr);
if (lastStmt->GetRootNode()->gtOper == GT_CALL)
{
noway_assert(fgRemoveRestOfBlock);
// Unconditional throw - transform the basic block into a BBJ_THROW
//
fgConvertBBToThrowBB(block);
result = FoldResult::FOLD_CHANGED_CONTROL_FLOW;
JITDUMP("\nConditional folded at " FMT_BB "\n", block->bbNum);
JITDUMP(FMT_BB " becomes a BBJ_THROW\n", block->bbNum);
return result;
}
noway_assert(lastStmt->GetRootNode()->gtOper == GT_JTRUE);
/* Did we fold the conditional */
noway_assert(lastStmt->GetRootNode()->AsOp()->gtOp1);
GenTree* condTree;
condTree = lastStmt->GetRootNode()->AsOp()->gtOp1;
GenTree* cond;
cond = condTree->gtEffectiveVal(true);
if (cond->OperIsConst())
{
/* Yupee - we folded the conditional!
* Remove the conditional statement */
noway_assert(cond->gtOper == GT_CNS_INT);
noway_assert((block->bbNext->countOfInEdges() > 0) && (block->bbJumpDest->countOfInEdges() > 0));
if (condTree != cond)
{
// Preserve any side effects
assert(condTree->OperIs(GT_COMMA));
lastStmt->SetRootNode(condTree);
result = FoldResult::FOLD_ALTERED_LAST_STMT;
}
else
{
// no side effects, remove the jump entirely
fgRemoveStmt(block, lastStmt);
result = FoldResult::FOLD_REMOVED_LAST_STMT;
}
// block is a BBJ_COND that we are folding the conditional for.
// bTaken is the path that will always be taken from block.
// bNotTaken is the path that will never be taken from block.
//
BasicBlock* bTaken;
BasicBlock* bNotTaken;
if (cond->AsIntCon()->gtIconVal != 0)
{
/* JTRUE 1 - transform the basic block into a BBJ_ALWAYS */
block->bbJumpKind = BBJ_ALWAYS;
bTaken = block->bbJumpDest;
bNotTaken = block->bbNext;
}
else
{
/* Unmark the loop if we are removing a backwards branch */
/* dest block must also be marked as a loop head and */
/* We must be able to reach the backedge block */
if ((block->bbJumpDest->isLoopHead()) && (block->bbJumpDest->bbNum <= block->bbNum) &&
fgReachable(block->bbJumpDest, block))
{
optUnmarkLoopBlocks(block->bbJumpDest, block);
}
/* JTRUE 0 - transform the basic block into a BBJ_NONE */
block->bbJumpKind = BBJ_NONE;
bTaken = block->bbNext;
bNotTaken = block->bbJumpDest;
}
if (fgHaveValidEdgeWeights)
{
// We are removing an edge from block to bNotTaken
// and we have already computed the edge weights, so
// we will try to adjust some of the weights
//
flowList* edgeTaken = fgGetPredForBlock(bTaken, block);
BasicBlock* bUpdated = nullptr; // non-NULL if we updated the weight of an internal block
// We examine the taken edge (block -> bTaken)
// if block has valid profile weight and bTaken does not we try to adjust bTaken's weight
// else if bTaken has valid profile weight and block does not we try to adjust block's weight
// We can only adjust the block weights when (the edge block -> bTaken) is the only edge into bTaken
//
if (block->hasProfileWeight())
{
// The edge weights for (block -> bTaken) are 100% of block's weight
edgeTaken->setEdgeWeights(block->bbWeight, block->bbWeight, bTaken);
if (!bTaken->hasProfileWeight())
{
if ((bTaken->countOfInEdges() == 1) || (bTaken->bbWeight < block->bbWeight))
{
// Update the weight of bTaken
bTaken->inheritWeight(block);
bUpdated = bTaken;
}
}
}
else if (bTaken->hasProfileWeight())
{
if (bTaken->countOfInEdges() == 1)
{
// There is only one in edge to bTaken
edgeTaken->setEdgeWeights(bTaken->bbWeight, bTaken->bbWeight, bTaken);
// Update the weight of block
block->inheritWeight(bTaken);
bUpdated = block;
}
}
if (bUpdated != nullptr)
{
weight_t newMinWeight;
weight_t newMaxWeight;
flowList* edge;
// Now fix the weights of the edges out of 'bUpdated'
switch (bUpdated->bbJumpKind)
{
case BBJ_NONE:
edge = fgGetPredForBlock(bUpdated->bbNext, bUpdated);
newMaxWeight = bUpdated->bbWeight;
newMinWeight = min(edge->edgeWeightMin(), newMaxWeight);
edge->setEdgeWeights(newMinWeight, newMaxWeight, bUpdated->bbNext);
break;
case BBJ_COND:
edge = fgGetPredForBlock(bUpdated->bbNext, bUpdated);
newMaxWeight = bUpdated->bbWeight;
newMinWeight = min(edge->edgeWeightMin(), newMaxWeight);
edge->setEdgeWeights(newMinWeight, newMaxWeight, bUpdated->bbNext);
FALLTHROUGH;
case BBJ_ALWAYS:
edge = fgGetPredForBlock(bUpdated->bbJumpDest, bUpdated);
newMaxWeight = bUpdated->bbWeight;
newMinWeight = min(edge->edgeWeightMin(), newMaxWeight);
edge->setEdgeWeights(newMinWeight, newMaxWeight, bUpdated->bbNext);
break;
default:
// We don't handle BBJ_SWITCH
break;
}
}
}
/* modify the flow graph */
/* Remove 'block' from the predecessor list of 'bNotTaken' */
fgRemoveRefPred(bNotTaken, block);
#ifdef DEBUG
if (verbose)
{
printf("\nConditional folded at " FMT_BB "\n", block->bbNum);
printf(FMT_BB " becomes a %s", block->bbNum,
block->bbJumpKind == BBJ_ALWAYS ? "BBJ_ALWAYS" : "BBJ_NONE");
if (block->bbJumpKind == BBJ_ALWAYS)
{
printf(" to " FMT_BB, block->bbJumpDest->bbNum);
}
printf("\n");
}
#endif
/* if the block was a loop condition we may have to modify
* the loop table */
for (unsigned loopNum = 0; loopNum < optLoopCount; loopNum++)
{
/* Some loops may have been already removed by
* loop unrolling or conditional folding */
if (optLoopTable[loopNum].lpFlags & LPFLG_REMOVED)
{
continue;
}
/* We are only interested in the loop bottom */
if (optLoopTable[loopNum].lpBottom == block)
{
if (cond->AsIntCon()->gtIconVal == 0)
{
/* This was a bogus loop (condition always false)
* Remove the loop from the table */
optMarkLoopRemoved(loopNum);
optLoopTable[loopNum].lpTop->unmarkLoopAlign(this DEBUG_ARG("Bogus loop"));
#ifdef DEBUG
if (verbose)
{
printf("Removing loop " FMT_LP " (from " FMT_BB " to " FMT_BB ")\n\n", loopNum,
optLoopTable[loopNum].lpTop->bbNum, optLoopTable[loopNum].lpBottom->bbNum);
}
#endif
}
}
}
}
}
else if (block->bbJumpKind == BBJ_SWITCH)
{
noway_assert(block->bbStmtList != nullptr && block->bbStmtList->GetPrevStmt() != nullptr);
Statement* lastStmt = block->lastStmt();
noway_assert(lastStmt->GetNextStmt() == nullptr);
if (lastStmt->GetRootNode()->gtOper == GT_CALL)
{
noway_assert(fgRemoveRestOfBlock);
// Unconditional throw - transform the basic block into a BBJ_THROW
//
fgConvertBBToThrowBB(block);
result = FoldResult::FOLD_CHANGED_CONTROL_FLOW;
JITDUMP("\nConditional folded at " FMT_BB "\n", block->bbNum);
JITDUMP(FMT_BB " becomes a BBJ_THROW\n", block->bbNum);
return result;
}
noway_assert(lastStmt->GetRootNode()->gtOper == GT_SWITCH);
/* Did we fold the conditional */
noway_assert(lastStmt->GetRootNode()->AsOp()->gtOp1);
GenTree* condTree;
condTree = lastStmt->GetRootNode()->AsOp()->gtOp1;
GenTree* cond;
cond = condTree->gtEffectiveVal(true);
if (cond->OperIsConst())
{
/* Yupee - we folded the conditional!
* Remove the conditional statement */
noway_assert(cond->gtOper == GT_CNS_INT);
if (condTree != cond)
{
// Preserve any side effects
assert(condTree->OperIs(GT_COMMA));
lastStmt->SetRootNode(condTree);
result = FoldResult::FOLD_ALTERED_LAST_STMT;
}
else
{
// no side effects, remove the switch entirely
fgRemoveStmt(block, lastStmt);
result = FoldResult::FOLD_REMOVED_LAST_STMT;
}
/* modify the flow graph */
/* Find the actual jump target */
unsigned switchVal;
switchVal = (unsigned)cond->AsIntCon()->gtIconVal;
unsigned jumpCnt;
jumpCnt = block->bbJumpSwt->bbsCount;
BasicBlock** jumpTab;
jumpTab = block->bbJumpSwt->bbsDstTab;
bool foundVal;
foundVal = false;
for (unsigned val = 0; val < jumpCnt; val++, jumpTab++)
{
BasicBlock* curJump = *jumpTab;
assert(curJump->countOfInEdges() > 0);
// If val matches switchVal or we are at the last entry and
// we never found the switch value then set the new jump dest
if ((val == switchVal) || (!foundVal && (val == jumpCnt - 1)))
{
if (curJump != block->bbNext)
{
/* transform the basic block into a BBJ_ALWAYS */
block->bbJumpKind = BBJ_ALWAYS;
block->bbJumpDest = curJump;
}
else
{
/* transform the basic block into a BBJ_NONE */
block->bbJumpKind = BBJ_NONE;
}
foundVal = true;
}
else
{
/* Remove 'block' from the predecessor list of 'curJump' */
fgRemoveRefPred(curJump, block);
}
}
assert(foundVal);
#ifdef DEBUG
if (verbose)
{
printf("\nConditional folded at " FMT_BB "\n", block->bbNum);
printf(FMT_BB " becomes a %s", block->bbNum,
block->bbJumpKind == BBJ_ALWAYS ? "BBJ_ALWAYS" : "BBJ_NONE");
if (block->bbJumpKind == BBJ_ALWAYS)
{
printf(" to " FMT_BB, block->bbJumpDest->bbNum);
}
printf("\n");
}
#endif
}
}
return result;
}
//------------------------------------------------------------------------
// fgMorphBlockStmt: morph a single statement in a block.
//
// Arguments:
// block - block containing the statement
// stmt - statement to morph
// msg - string to identify caller in a dump
//
// Returns:
// true if 'stmt' was removed from the block.
// s false if 'stmt' is still in the block (even if other statements were removed).
//
// Notes:
// Can be called anytime, unlike fgMorphStmts() which should only be called once.
//
bool Compiler::fgMorphBlockStmt(BasicBlock* block, Statement* stmt DEBUGARG(const char* msg))
{
assert(block != nullptr);
assert(stmt != nullptr);
// Reset some ambient state
fgRemoveRestOfBlock = false;
compCurBB = block;
compCurStmt = stmt;
GenTree* morph = fgMorphTree(stmt->GetRootNode());
// Bug 1106830 - During the CSE phase we can't just remove
// morph->AsOp()->gtOp2 as it could contain CSE expressions.
// This leads to a noway_assert in OptCSE.cpp when
// searching for the removed CSE ref. (using gtFindLink)
//
if (!optValnumCSE_phase)
{
// Check for morph as a GT_COMMA with an unconditional throw
if (fgIsCommaThrow(morph, true))
{
#ifdef DEBUG
if (verbose)
{
printf("Folding a top-level fgIsCommaThrow stmt\n");
printf("Removing op2 as unreachable:\n");
gtDispTree(morph->AsOp()->gtOp2);
printf("\n");
}
#endif
// Use the call as the new stmt
morph = morph->AsOp()->gtOp1;
noway_assert(morph->gtOper == GT_CALL);
}
// we can get a throw as a statement root
if (fgIsThrow(morph))
{
#ifdef DEBUG
if (verbose)
{
printf("We have a top-level fgIsThrow stmt\n");
printf("Removing the rest of block as unreachable:\n");
}
#endif
noway_assert((morph->gtFlags & GTF_COLON_COND) == 0);
fgRemoveRestOfBlock = true;
}
}
stmt->SetRootNode(morph);
// Can the entire tree be removed?
bool removedStmt = false;
// Defer removing statements during CSE so we don't inadvertently remove any CSE defs.
if (!optValnumCSE_phase)
{
removedStmt = fgCheckRemoveStmt(block, stmt);
}
// Or this is the last statement of a conditional branch that was just folded?
if (!removedStmt && (stmt->GetNextStmt() == nullptr) && !fgRemoveRestOfBlock)
{
FoldResult const fr = fgFoldConditional(block);
removedStmt = (fr == FoldResult::FOLD_REMOVED_LAST_STMT);
}
if (!removedStmt)
{
// Have to re-do the evaluation order since for example some later code does not expect constants as op1
gtSetStmtInfo(stmt);
// Have to re-link the nodes for this statement
fgSetStmtSeq(stmt);
}
#ifdef DEBUG
if (verbose)
{
printf("%s %s tree:\n", msg, (removedStmt ? "removed" : "morphed"));
gtDispTree(morph);
printf("\n");
}
#endif
if (fgRemoveRestOfBlock)
{
// Remove the rest of the stmts in the block
for (Statement* removeStmt : StatementList(stmt->GetNextStmt()))
{
fgRemoveStmt(block, removeStmt);
}
// The rest of block has been removed and we will always throw an exception.
//
// For compDbgCode, we prepend an empty BB as the firstBB, it is BBJ_NONE.
// We should not convert it to a ThrowBB.
if ((block != fgFirstBB) || ((fgFirstBB->bbFlags & BBF_INTERNAL) == 0))
{
// Convert block to a throw bb
fgConvertBBToThrowBB(block);
}
#ifdef DEBUG
if (verbose)
{
printf("\n%s Block " FMT_BB " becomes a throw block.\n", msg, block->bbNum);
}
#endif
fgRemoveRestOfBlock = false;
}
return removedStmt;
}
/*****************************************************************************
*
* Morph the statements of the given block.
* This function should be called just once for a block. Use fgMorphBlockStmt()
* for reentrant calls.
*/
void Compiler::fgMorphStmts(BasicBlock* block)
{
fgRemoveRestOfBlock = false;
fgCurrentlyInUseArgTemps = hashBv::Create(this);
for (Statement* const stmt : block->Statements())
{
if (fgRemoveRestOfBlock)
{
fgRemoveStmt(block, stmt);
continue;
}
#ifdef FEATURE_SIMD
if (opts.OptimizationEnabled() && stmt->GetRootNode()->TypeGet() == TYP_FLOAT &&
stmt->GetRootNode()->OperGet() == GT_ASG)
{
fgMorphCombineSIMDFieldAssignments(block, stmt);
}
#endif
fgMorphStmt = stmt;
compCurStmt = stmt;
GenTree* oldTree = stmt->GetRootNode();
#ifdef DEBUG
unsigned oldHash = verbose ? gtHashValue(oldTree) : DUMMY_INIT(~0);
if (verbose)
{
printf("\nfgMorphTree " FMT_BB ", " FMT_STMT " (before)\n", block->bbNum, stmt->GetID());
gtDispTree(oldTree);
}
#endif
/* Morph this statement tree */
GenTree* morphedTree = fgMorphTree(oldTree);
// mark any outgoing arg temps as free so we can reuse them in the next statement.
fgCurrentlyInUseArgTemps->ZeroAll();
// Has fgMorphStmt been sneakily changed ?
if ((stmt->GetRootNode() != oldTree) || (block != compCurBB))
{
if (stmt->GetRootNode() != oldTree)
{
/* This must be tailcall. Ignore 'morphedTree' and carry on with
the tail-call node */
morphedTree = stmt->GetRootNode();
}
else
{
/* This must be a tailcall that caused a GCPoll to get
injected. We haven't actually morphed the call yet
but the flag still got set, clear it here... */
CLANG_FORMAT_COMMENT_ANCHOR;
#ifdef DEBUG
morphedTree->gtDebugFlags &= ~GTF_DEBUG_NODE_MORPHED;
#endif
}
noway_assert(compTailCallUsed);
noway_assert(morphedTree->gtOper == GT_CALL);
GenTreeCall* call = morphedTree->AsCall();
// Could be
// - a fast call made as jmp in which case block will be ending with
// BBJ_RETURN (as we need epilog) and marked as containing a jmp.
// - a tailcall dispatched via JIT helper, on x86, in which case
// block will be ending with BBJ_THROW.
// - a tail call dispatched via runtime help (IL stubs), in which
// case there will not be any tailcall and the block will be ending
// with BBJ_RETURN (as normal control flow)
noway_assert((call->IsFastTailCall() && (compCurBB->bbJumpKind == BBJ_RETURN) &&
((compCurBB->bbFlags & BBF_HAS_JMP)) != 0) ||
(call->IsTailCallViaJitHelper() && (compCurBB->bbJumpKind == BBJ_THROW)) ||
(!call->IsTailCall() && (compCurBB->bbJumpKind == BBJ_RETURN)));
}
#ifdef DEBUG
if (compStressCompile(STRESS_CLONE_EXPR, 30))
{
// Clone all the trees to stress gtCloneExpr()
if (verbose)
{
printf("\nfgMorphTree (stressClone from):\n");
gtDispTree(morphedTree);
}
morphedTree = gtCloneExpr(morphedTree);
noway_assert(morphedTree != nullptr);
if (verbose)
{
printf("\nfgMorphTree (stressClone to):\n");
gtDispTree(morphedTree);
}
}
/* If the hash value changes. we modified the tree during morphing */
if (verbose)
{
unsigned newHash = gtHashValue(morphedTree);
if (newHash != oldHash)
{
printf("\nfgMorphTree " FMT_BB ", " FMT_STMT " (after)\n", block->bbNum, stmt->GetID());
gtDispTree(morphedTree);
}
}
#endif
/* Check for morphedTree as a GT_COMMA with an unconditional throw */
if (!gtIsActiveCSE_Candidate(morphedTree) && fgIsCommaThrow(morphedTree, true))
{
/* Use the call as the new stmt */
morphedTree = morphedTree->AsOp()->gtOp1;
noway_assert(morphedTree->gtOper == GT_CALL);
noway_assert((morphedTree->gtFlags & GTF_COLON_COND) == 0);
fgRemoveRestOfBlock = true;
}
stmt->SetRootNode(morphedTree);
if (fgRemoveRestOfBlock)
{
continue;
}
/* Has the statement been optimized away */
if (fgCheckRemoveStmt(block, stmt))
{
continue;
}
/* Check if this block ends with a conditional branch that can be folded */
if (fgFoldConditional(block) != FoldResult::FOLD_DID_NOTHING)
{
continue;
}
if (ehBlockHasExnFlowDsc(block))
{
continue;
}
}
if (fgRemoveRestOfBlock)
{
if ((block->bbJumpKind == BBJ_COND) || (block->bbJumpKind == BBJ_SWITCH))
{
Statement* first = block->firstStmt();
noway_assert(first);
Statement* lastStmt = block->lastStmt();
noway_assert(lastStmt && lastStmt->GetNextStmt() == nullptr);
GenTree* last = lastStmt->GetRootNode();
if (((block->bbJumpKind == BBJ_COND) && (last->gtOper == GT_JTRUE)) ||
((block->bbJumpKind == BBJ_SWITCH) && (last->gtOper == GT_SWITCH)))
{
GenTree* op1 = last->AsOp()->gtOp1;
if (op1->OperIsCompare())
{
/* Unmark the comparison node with GTF_RELOP_JMP_USED */
op1->gtFlags &= ~GTF_RELOP_JMP_USED;
}
lastStmt->SetRootNode(fgMorphTree(op1));
}
}
/* Mark block as a BBJ_THROW block */
fgConvertBBToThrowBB(block);
}
#if FEATURE_FASTTAILCALL
GenTree* recursiveTailCall = nullptr;
if (block->endsWithTailCallConvertibleToLoop(this, &recursiveTailCall))
{
fgMorphRecursiveFastTailCallIntoLoop(block, recursiveTailCall->AsCall());
}
#endif
// Reset this back so that it doesn't leak out impacting other blocks
fgRemoveRestOfBlock = false;
}
/*****************************************************************************
*
* Morph the blocks of the method.
* Returns true if the basic block list is modified.
* This function should be called just once.
*/
void Compiler::fgMorphBlocks()
{
#ifdef DEBUG
if (verbose)
{
printf("\n*************** In fgMorphBlocks()\n");
}
#endif
/* Since fgMorphTree can be called after various optimizations to re-arrange
* the nodes we need a global flag to signal if we are during the one-pass
* global morphing */
fgGlobalMorph = true;
//
// Local assertion prop is enabled if we are optimized
//
optLocalAssertionProp = opts.OptimizationEnabled();
if (optLocalAssertionProp)
{
//
// Initialize for local assertion prop
//
optAssertionInit(true);
}
if (!compEnregLocals())
{
// Morph is checking if lvDoNotEnregister is already set for some optimizations.
// If we are running without `CLFLG_REGVAR` flag set (`compEnregLocals() == false`)
// then we already know that we won't enregister any locals and it is better to set
// this flag before we start reading it.
// The main reason why this flag is not set is that we are running in minOpts.
lvSetMinOptsDoNotEnreg();
}
/*-------------------------------------------------------------------------
* Process all basic blocks in the function
*/
BasicBlock* block = fgFirstBB;
noway_assert(block);
do
{
#ifdef DEBUG
if (verbose)
{
printf("\nMorphing " FMT_BB " of '%s'\n", block->bbNum, info.compFullName);
}
#endif
if (optLocalAssertionProp)
{
//
// Clear out any currently recorded assertion candidates
// before processing each basic block,
// also we must handle QMARK-COLON specially
//
optAssertionReset(0);
}
// Make the current basic block address available globally.
compCurBB = block;
// Process all statement trees in the basic block.
fgMorphStmts(block);
// Do we need to merge the result of this block into a single return block?
if ((block->bbJumpKind == BBJ_RETURN) && ((block->bbFlags & BBF_HAS_JMP) == 0))
{
if ((genReturnBB != nullptr) && (genReturnBB != block))
{
fgMergeBlockReturn(block);
}
}
block = block->bbNext;
} while (block != nullptr);
// We are done with the global morphing phase
fgGlobalMorph = false;
compCurBB = nullptr;
// Under OSR, we no longer need to specially protect the original method entry
//
if (opts.IsOSR() && (fgEntryBB != nullptr) && (fgEntryBB->bbFlags & BBF_IMPORTED))
{
JITDUMP("OSR: un-protecting original method entry " FMT_BB "\n", fgEntryBB->bbNum);
assert(fgEntryBB->bbRefs > 0);
fgEntryBB->bbRefs--;
// We don't need to remember this block anymore.
fgEntryBB = nullptr;
}
#ifdef DEBUG
if (verboseTrees)
{
fgDispBasicBlocks(true);
}
#endif
}
//------------------------------------------------------------------------
// fgMergeBlockReturn: assign the block return value (if any) into the single return temp
// and branch to the single return block.
//
// Arguments:
// block - the block to process.
//
// Notes:
// A block is not guaranteed to have a last stmt if its jump kind is BBJ_RETURN.
// For example a method returning void could have an empty block with jump kind BBJ_RETURN.
// Such blocks do materialize as part of in-lining.
//
// A block with jump kind BBJ_RETURN does not necessarily need to end with GT_RETURN.
// It could end with a tail call or rejected tail call or monitor.exit or a GT_INTRINSIC.
// For now it is safe to explicitly check whether last stmt is GT_RETURN if genReturnLocal
// is BAD_VAR_NUM.
//
void Compiler::fgMergeBlockReturn(BasicBlock* block)
{
assert((block->bbJumpKind == BBJ_RETURN) && ((block->bbFlags & BBF_HAS_JMP) == 0));
assert((genReturnBB != nullptr) && (genReturnBB != block));
// TODO: Need to characterize the last top level stmt of a block ending with BBJ_RETURN.
Statement* lastStmt = block->lastStmt();
GenTree* ret = (lastStmt != nullptr) ? lastStmt->GetRootNode() : nullptr;
if ((ret != nullptr) && (ret->OperGet() == GT_RETURN) && ((ret->gtFlags & GTF_RET_MERGED) != 0))
{
// This return was generated during epilog merging, so leave it alone
}
else
{
// We'll jump to the genReturnBB.
CLANG_FORMAT_COMMENT_ANCHOR;
#if !defined(TARGET_X86)
if (info.compFlags & CORINFO_FLG_SYNCH)
{
fgConvertSyncReturnToLeave(block);
}
else
#endif // !TARGET_X86
{
block->bbJumpKind = BBJ_ALWAYS;
block->bbJumpDest = genReturnBB;
fgAddRefPred(genReturnBB, block);
fgReturnCount--;
}
if (genReturnLocal != BAD_VAR_NUM)
{
// replace the GT_RETURN node to be a GT_ASG that stores the return value into genReturnLocal.
// Method must be returning a value other than TYP_VOID.
noway_assert(compMethodHasRetVal());
// This block must be ending with a GT_RETURN
noway_assert(lastStmt != nullptr);
noway_assert(lastStmt->GetNextStmt() == nullptr);
noway_assert(ret != nullptr);
// GT_RETURN must have non-null operand as the method is returning the value assigned to
// genReturnLocal
noway_assert(ret->OperGet() == GT_RETURN);
noway_assert(ret->gtGetOp1() != nullptr);
Statement* pAfterStatement = lastStmt;
const DebugInfo& di = lastStmt->GetDebugInfo();
GenTree* tree = gtNewTempAssign(genReturnLocal, ret->gtGetOp1(), &pAfterStatement, di, block);
if (tree->OperIsCopyBlkOp())
{
tree = fgMorphCopyBlock(tree);
}
else if (tree->OperIsInitBlkOp())
{
tree = fgMorphInitBlock(tree);
}
if (pAfterStatement == lastStmt)
{
lastStmt->SetRootNode(tree);
}
else
{
// gtNewTempAssign inserted additional statements after last
fgRemoveStmt(block, lastStmt);
Statement* newStmt = gtNewStmt(tree, di);
fgInsertStmtAfter(block, pAfterStatement, newStmt);
lastStmt = newStmt;
}
}
else if (ret != nullptr && ret->OperGet() == GT_RETURN)
{
// This block ends with a GT_RETURN
noway_assert(lastStmt != nullptr);
noway_assert(lastStmt->GetNextStmt() == nullptr);
// Must be a void GT_RETURN with null operand; delete it as this block branches to oneReturn
// block
noway_assert(ret->TypeGet() == TYP_VOID);
noway_assert(ret->gtGetOp1() == nullptr);
fgRemoveStmt(block, lastStmt);
}
JITDUMP("\nUpdate " FMT_BB " to jump to common return block.\n", block->bbNum);
DISPBLOCK(block);
if (block->hasProfileWeight())
{
weight_t const oldWeight = genReturnBB->hasProfileWeight() ? genReturnBB->bbWeight : BB_ZERO_WEIGHT;
weight_t const newWeight = oldWeight + block->bbWeight;
JITDUMP("merging profile weight " FMT_WT " from " FMT_BB " to common return " FMT_BB "\n", block->bbWeight,
block->bbNum, genReturnBB->bbNum);
genReturnBB->setBBProfileWeight(newWeight);
DISPBLOCK(genReturnBB);
}
}
}
/*****************************************************************************
*
* Make some decisions about the kind of code to generate.
*/
void Compiler::fgSetOptions()
{
#ifdef DEBUG
/* Should we force fully interruptible code ? */
if (JitConfig.JitFullyInt() || compStressCompile(STRESS_GENERIC_VARN, 30))
{
noway_assert(!codeGen->isGCTypeFixed());
SetInterruptible(true);
}
#endif
if (opts.compDbgCode)
{
assert(!codeGen->isGCTypeFixed());
SetInterruptible(true); // debugging is easier this way ...
}
/* Assume we won't need an explicit stack frame if this is allowed */
if (compLocallocUsed)
{
codeGen->setFramePointerRequired(true);
}
#ifdef TARGET_X86
if (compTailCallUsed)
codeGen->setFramePointerRequired(true);
#endif // TARGET_X86
if (!opts.genFPopt)
{
codeGen->setFramePointerRequired(true);
}
// Assert that the EH table has been initialized by now. Note that
// compHndBBtabAllocCount never decreases; it is a high-water mark
// of table allocation. In contrast, compHndBBtabCount does shrink
// if we delete a dead EH region, and if it shrinks to zero, the
// table pointer compHndBBtab is unreliable.
assert(compHndBBtabAllocCount >= info.compXcptnsCount);
#ifdef TARGET_X86
// Note: this case, and the !X86 case below, should both use the
// !X86 path. This would require a few more changes for X86 to use
// compHndBBtabCount (the current number of EH clauses) instead of
// info.compXcptnsCount (the number of EH clauses in IL), such as
// in ehNeedsShadowSPslots(). This is because sometimes the IL has
// an EH clause that we delete as statically dead code before we
// get here, leaving no EH clauses left, and thus no requirement
// to use a frame pointer because of EH. But until all the code uses
// the same test, leave info.compXcptnsCount here.
if (info.compXcptnsCount > 0)
{
codeGen->setFramePointerRequiredEH(true);
}
#else // !TARGET_X86
if (compHndBBtabCount > 0)
{
codeGen->setFramePointerRequiredEH(true);
}
#endif // TARGET_X86
#ifdef UNIX_X86_ABI
if (info.compXcptnsCount > 0)
{
assert(!codeGen->isGCTypeFixed());
// Enforce fully interruptible codegen for funclet unwinding
SetInterruptible(true);
}
#endif // UNIX_X86_ABI
if (compMethodRequiresPInvokeFrame())
{
codeGen->setFramePointerRequired(true); // Setup of Pinvoke frame currently requires an EBP style frame
}
if (info.compPublishStubParam)
{
codeGen->setFramePointerRequiredGCInfo(true);
}
if (compIsProfilerHookNeeded())
{
codeGen->setFramePointerRequired(true);
}
if (info.compIsVarArgs)
{
// Code that initializes lvaVarargsBaseOfStkArgs requires this to be EBP relative.
codeGen->setFramePointerRequiredGCInfo(true);
}
if (lvaReportParamTypeArg())
{
codeGen->setFramePointerRequiredGCInfo(true);
}
// printf("method will %s be fully interruptible\n", GetInterruptible() ? " " : "not");
}
/*****************************************************************************/
GenTree* Compiler::fgInitThisClass()
{
noway_assert(!compIsForInlining());
CORINFO_LOOKUP_KIND kind;
info.compCompHnd->getLocationOfThisType(info.compMethodHnd, &kind);
if (!kind.needsRuntimeLookup)
{
return fgGetSharedCCtor(info.compClassHnd);
}
else
{
#ifdef FEATURE_READYTORUN
// Only CoreRT understands CORINFO_HELP_READYTORUN_GENERIC_STATIC_BASE. Don't do this on CoreCLR.
if (opts.IsReadyToRun() && IsTargetAbi(CORINFO_CORERT_ABI))
{
CORINFO_RESOLVED_TOKEN resolvedToken;
memset(&resolvedToken, 0, sizeof(resolvedToken));
// We are in a shared method body, but maybe we don't need a runtime lookup after all.
// This covers the case of a generic method on a non-generic type.
if (!(info.compClassAttr & CORINFO_FLG_SHAREDINST))
{
resolvedToken.hClass = info.compClassHnd;
return impReadyToRunHelperToTree(&resolvedToken, CORINFO_HELP_READYTORUN_STATIC_BASE, TYP_BYREF);
}
// We need a runtime lookup.
GenTree* ctxTree = getRuntimeContextTree(kind.runtimeLookupKind);
// CORINFO_HELP_READYTORUN_GENERIC_STATIC_BASE with a zeroed out resolvedToken means "get the static
// base of the class that owns the method being compiled". If we're in this method, it means we're not
// inlining and there's no ambiguity.
return impReadyToRunHelperToTree(&resolvedToken, CORINFO_HELP_READYTORUN_GENERIC_STATIC_BASE, TYP_BYREF,
gtNewCallArgs(ctxTree), &kind);
}
#endif
// Collectible types requires that for shared generic code, if we use the generic context paramter
// that we report it. (This is a conservative approach, we could detect some cases particularly when the
// context parameter is this that we don't need the eager reporting logic.)
lvaGenericsContextInUse = true;
switch (kind.runtimeLookupKind)
{
case CORINFO_LOOKUP_THISOBJ:
{
// This code takes a this pointer; but we need to pass the static method desc to get the right point in
// the hierarchy
GenTree* vtTree = gtNewLclvNode(info.compThisArg, TYP_REF);
vtTree->gtFlags |= GTF_VAR_CONTEXT;
// Vtable pointer of this object
vtTree = gtNewMethodTableLookup(vtTree);
GenTree* methodHnd = gtNewIconEmbMethHndNode(info.compMethodHnd);
return gtNewHelperCallNode(CORINFO_HELP_INITINSTCLASS, TYP_VOID, gtNewCallArgs(vtTree, methodHnd));
}
case CORINFO_LOOKUP_CLASSPARAM:
{
GenTree* vtTree = gtNewLclvNode(info.compTypeCtxtArg, TYP_I_IMPL);
vtTree->gtFlags |= GTF_VAR_CONTEXT;
return gtNewHelperCallNode(CORINFO_HELP_INITCLASS, TYP_VOID, gtNewCallArgs(vtTree));
}
case CORINFO_LOOKUP_METHODPARAM:
{
GenTree* methHndTree = gtNewLclvNode(info.compTypeCtxtArg, TYP_I_IMPL);
methHndTree->gtFlags |= GTF_VAR_CONTEXT;
return gtNewHelperCallNode(CORINFO_HELP_INITINSTCLASS, TYP_VOID,
gtNewCallArgs(gtNewIconNode(0), methHndTree));
}
default:
noway_assert(!"Unknown LOOKUP_KIND");
UNREACHABLE();
}
}
}
#ifdef DEBUG
/*****************************************************************************
*
* Tree walk callback to make sure no GT_QMARK nodes are present in the tree,
* except for the allowed ? 1 : 0; pattern.
*/
Compiler::fgWalkResult Compiler::fgAssertNoQmark(GenTree** tree, fgWalkData* data)
{
if ((*tree)->OperGet() == GT_QMARK)
{
fgCheckQmarkAllowedForm(*tree);
}
return WALK_CONTINUE;
}
void Compiler::fgCheckQmarkAllowedForm(GenTree* tree)
{
assert(tree->OperGet() == GT_QMARK);
assert(!"Qmarks beyond morph disallowed.");
}
/*****************************************************************************
*
* Verify that the importer has created GT_QMARK nodes in a way we can
* process them. The following is allowed:
*
* 1. A top level qmark. Top level qmark is of the form:
* a) (bool) ? (void) : (void) OR
* b) V0N = (bool) ? (type) : (type)
*
* 2. Recursion is allowed at the top level, i.e., a GT_QMARK can be a child
* of either op1 of colon or op2 of colon but not a child of any other
* operator.
*/
void Compiler::fgPreExpandQmarkChecks(GenTree* expr)
{
GenTree* topQmark = fgGetTopLevelQmark(expr);
// If the top level Qmark is null, then scan the tree to make sure
// there are no qmarks within it.
if (topQmark == nullptr)
{
fgWalkTreePre(&expr, Compiler::fgAssertNoQmark, nullptr);
}
else
{
// We could probably expand the cond node also, but don't think the extra effort is necessary,
// so let's just assert the cond node of a top level qmark doesn't have further top level qmarks.
fgWalkTreePre(&topQmark->AsOp()->gtOp1, Compiler::fgAssertNoQmark, nullptr);
fgPreExpandQmarkChecks(topQmark->AsOp()->gtOp2->AsOp()->gtOp1);
fgPreExpandQmarkChecks(topQmark->AsOp()->gtOp2->AsOp()->gtOp2);
}
}
#endif // DEBUG
/*****************************************************************************
*
* Get the top level GT_QMARK node in a given "expr", return NULL if such a
* node is not present. If the top level GT_QMARK node is assigned to a
* GT_LCL_VAR, then return the lcl node in ppDst.
*
*/
GenTree* Compiler::fgGetTopLevelQmark(GenTree* expr, GenTree** ppDst /* = NULL */)
{
if (ppDst != nullptr)
{
*ppDst = nullptr;
}
GenTree* topQmark = nullptr;
if (expr->gtOper == GT_QMARK)
{
topQmark = expr;
}
else if (expr->gtOper == GT_ASG && expr->AsOp()->gtOp2->gtOper == GT_QMARK &&
expr->AsOp()->gtOp1->gtOper == GT_LCL_VAR)
{
topQmark = expr->AsOp()->gtOp2;
if (ppDst != nullptr)
{
*ppDst = expr->AsOp()->gtOp1;
}
}
return topQmark;
}
/*********************************************************************************
*
* For a castclass helper call,
* Importer creates the following tree:
* tmp = (op1 == null) ? op1 : ((*op1 == (cse = op2, cse)) ? op1 : helper());
*
* This method splits the qmark expression created by the importer into the
* following blocks: (block, asg, cond1, cond2, helper, remainder)
* Notice that op1 is the result for both the conditions. So we coalesce these
* assignments into a single block instead of two blocks resulting a nested diamond.
*
* +---------->-----------+
* | | |
* ^ ^ v
* | | |
* block-->asg-->cond1--+-->cond2--+-->helper--+-->remainder
*
* We expect to achieve the following codegen:
* mov rsi, rdx tmp = op1 // asgBlock
* test rsi, rsi goto skip if tmp == null ? // cond1Block
* je SKIP
* mov rcx, 0x76543210 cns = op2 // cond2Block
* cmp qword ptr [rsi], rcx goto skip if *tmp == op2
* je SKIP
* call CORINFO_HELP_CHKCASTCLASS_SPECIAL tmp = helper(cns, tmp) // helperBlock
* mov rsi, rax
* SKIP: // remainderBlock
* tmp has the result.
*
*/
void Compiler::fgExpandQmarkForCastInstOf(BasicBlock* block, Statement* stmt)
{
#ifdef DEBUG
if (verbose)
{
printf("\nExpanding CastInstOf qmark in " FMT_BB " (before)\n", block->bbNum);
fgDispBasicBlocks(block, block, true);
}
#endif // DEBUG
GenTree* expr = stmt->GetRootNode();
GenTree* dst = nullptr;
GenTree* qmark = fgGetTopLevelQmark(expr, &dst);
noway_assert(dst != nullptr);
assert(qmark->gtFlags & GTF_QMARK_CAST_INSTOF);
// Get cond, true, false exprs for the qmark.
GenTree* condExpr = qmark->gtGetOp1();
GenTree* trueExpr = qmark->gtGetOp2()->AsColon()->ThenNode();
GenTree* falseExpr = qmark->gtGetOp2()->AsColon()->ElseNode();
// Get cond, true, false exprs for the nested qmark.
GenTree* nestedQmark = falseExpr;
GenTree* cond2Expr;
GenTree* true2Expr;
GenTree* false2Expr;
if (nestedQmark->gtOper == GT_QMARK)
{
cond2Expr = nestedQmark->gtGetOp1();
true2Expr = nestedQmark->gtGetOp2()->AsColon()->ThenNode();
false2Expr = nestedQmark->gtGetOp2()->AsColon()->ElseNode();
}
else
{
// This is a rare case that arises when we are doing minopts and encounter isinst of null
// gtFoldExpr was still is able to optimize away part of the tree (but not all).
// That means it does not match our pattern.
// Rather than write code to handle this case, just fake up some nodes to make it match the common
// case. Synthesize a comparison that is always true, and for the result-on-true, use the
// entire subtree we expected to be the nested question op.
cond2Expr = gtNewOperNode(GT_EQ, TYP_INT, gtNewIconNode(0, TYP_I_IMPL), gtNewIconNode(0, TYP_I_IMPL));
true2Expr = nestedQmark;
false2Expr = gtNewIconNode(0, TYP_I_IMPL);
}
assert(false2Expr->OperGet() == trueExpr->OperGet());
// Create the chain of blocks. See method header comment.
// The order of blocks after this is the following:
// block ... asgBlock ... cond1Block ... cond2Block ... helperBlock ... remainderBlock
//
// We need to remember flags that exist on 'block' that we want to propagate to 'remainderBlock',
// if they are going to be cleared by fgSplitBlockAfterStatement(). We currently only do this only
// for the GC safe point bit, the logic being that if 'block' was marked gcsafe, then surely
// remainderBlock will still be GC safe.
BasicBlockFlags propagateFlags = block->bbFlags & BBF_GC_SAFE_POINT;
BasicBlock* remainderBlock = fgSplitBlockAfterStatement(block, stmt);
fgRemoveRefPred(remainderBlock, block); // We're going to put more blocks between block and remainderBlock.
BasicBlock* helperBlock = fgNewBBafter(BBJ_NONE, block, true);
BasicBlock* cond2Block = fgNewBBafter(BBJ_COND, block, true);
BasicBlock* cond1Block = fgNewBBafter(BBJ_COND, block, true);
BasicBlock* asgBlock = fgNewBBafter(BBJ_NONE, block, true);
remainderBlock->bbFlags |= propagateFlags;
// These blocks are only internal if 'block' is (but they've been set as internal by fgNewBBafter).
// If they're not internal, mark them as imported to avoid asserts about un-imported blocks.
if ((block->bbFlags & BBF_INTERNAL) == 0)
{
helperBlock->bbFlags &= ~BBF_INTERNAL;
cond2Block->bbFlags &= ~BBF_INTERNAL;
cond1Block->bbFlags &= ~BBF_INTERNAL;
asgBlock->bbFlags &= ~BBF_INTERNAL;
helperBlock->bbFlags |= BBF_IMPORTED;
cond2Block->bbFlags |= BBF_IMPORTED;
cond1Block->bbFlags |= BBF_IMPORTED;
asgBlock->bbFlags |= BBF_IMPORTED;
}
// Chain the flow correctly.
fgAddRefPred(asgBlock, block);
fgAddRefPred(cond1Block, asgBlock);
fgAddRefPred(cond2Block, cond1Block);
fgAddRefPred(helperBlock, cond2Block);
fgAddRefPred(remainderBlock, helperBlock);
fgAddRefPred(remainderBlock, cond1Block);
fgAddRefPred(remainderBlock, cond2Block);
cond1Block->bbJumpDest = remainderBlock;
cond2Block->bbJumpDest = remainderBlock;
// Set the weights; some are guesses.
asgBlock->inheritWeight(block);
cond1Block->inheritWeight(block);
cond2Block->inheritWeightPercentage(cond1Block, 50);
helperBlock->inheritWeightPercentage(cond2Block, 50);
// Append cond1 as JTRUE to cond1Block
GenTree* jmpTree = gtNewOperNode(GT_JTRUE, TYP_VOID, condExpr);
Statement* jmpStmt = fgNewStmtFromTree(jmpTree, stmt->GetDebugInfo());
fgInsertStmtAtEnd(cond1Block, jmpStmt);
// Append cond2 as JTRUE to cond2Block
jmpTree = gtNewOperNode(GT_JTRUE, TYP_VOID, cond2Expr);
jmpStmt = fgNewStmtFromTree(jmpTree, stmt->GetDebugInfo());
fgInsertStmtAtEnd(cond2Block, jmpStmt);
// AsgBlock should get tmp = op1 assignment.
trueExpr = gtNewTempAssign(dst->AsLclVarCommon()->GetLclNum(), trueExpr);
Statement* trueStmt = fgNewStmtFromTree(trueExpr, stmt->GetDebugInfo());
fgInsertStmtAtEnd(asgBlock, trueStmt);
// Since we are adding helper in the JTRUE false path, reverse the cond2 and add the helper.
gtReverseCond(cond2Expr);
GenTree* helperExpr = gtNewTempAssign(dst->AsLclVarCommon()->GetLclNum(), true2Expr);
Statement* helperStmt = fgNewStmtFromTree(helperExpr, stmt->GetDebugInfo());
fgInsertStmtAtEnd(helperBlock, helperStmt);
// Finally remove the nested qmark stmt.
fgRemoveStmt(block, stmt);
if (true2Expr->OperIs(GT_CALL) && (true2Expr->AsCall()->gtCallMoreFlags & GTF_CALL_M_DOES_NOT_RETURN))
{
fgConvertBBToThrowBB(helperBlock);
}
#ifdef DEBUG
if (verbose)
{
printf("\nExpanding CastInstOf qmark in " FMT_BB " (after)\n", block->bbNum);
fgDispBasicBlocks(block, remainderBlock, true);
}
#endif // DEBUG
}
/*****************************************************************************
*
* Expand a statement with a top level qmark node. There are three cases, based
* on whether the qmark has both "true" and "false" arms, or just one of them.
*
* S0;
* C ? T : F;
* S1;
*
* Generates ===>
*
* bbj_always
* +---->------+
* false | |
* S0 -->-- ~C -->-- T F -->-- S1
* | |
* +--->--------+
* bbj_cond(true)
*
* -----------------------------------------
*
* S0;
* C ? T : NOP;
* S1;
*
* Generates ===>
*
* false
* S0 -->-- ~C -->-- T -->-- S1
* | |
* +-->-------------+
* bbj_cond(true)
*
* -----------------------------------------
*
* S0;
* C ? NOP : F;
* S1;
*
* Generates ===>
*
* false
* S0 -->-- C -->-- F -->-- S1
* | |
* +-->------------+
* bbj_cond(true)
*
* If the qmark assigns to a variable, then create tmps for "then"
* and "else" results and assign the temp to the variable as a writeback step.
*/
void Compiler::fgExpandQmarkStmt(BasicBlock* block, Statement* stmt)
{
GenTree* expr = stmt->GetRootNode();
// Retrieve the Qmark node to be expanded.
GenTree* dst = nullptr;
GenTree* qmark = fgGetTopLevelQmark(expr, &dst);
if (qmark == nullptr)
{
return;
}
if (qmark->gtFlags & GTF_QMARK_CAST_INSTOF)
{
fgExpandQmarkForCastInstOf(block, stmt);
return;
}
#ifdef DEBUG
if (verbose)
{
printf("\nExpanding top-level qmark in " FMT_BB " (before)\n", block->bbNum);
fgDispBasicBlocks(block, block, true);
}
#endif // DEBUG
// Retrieve the operands.
GenTree* condExpr = qmark->gtGetOp1();
GenTree* trueExpr = qmark->gtGetOp2()->AsColon()->ThenNode();
GenTree* falseExpr = qmark->gtGetOp2()->AsColon()->ElseNode();
assert(!varTypeIsFloating(condExpr->TypeGet()));
bool hasTrueExpr = (trueExpr->OperGet() != GT_NOP);
bool hasFalseExpr = (falseExpr->OperGet() != GT_NOP);
assert(hasTrueExpr || hasFalseExpr); // We expect to have at least one arm of the qmark!
// Create remainder, cond and "else" blocks. After this, the blocks are in this order:
// block ... condBlock ... elseBlock ... remainderBlock
//
// We need to remember flags that exist on 'block' that we want to propagate to 'remainderBlock',
// if they are going to be cleared by fgSplitBlockAfterStatement(). We currently only do this only
// for the GC safe point bit, the logic being that if 'block' was marked gcsafe, then surely
// remainderBlock will still be GC safe.
BasicBlockFlags propagateFlags = block->bbFlags & BBF_GC_SAFE_POINT;
BasicBlock* remainderBlock = fgSplitBlockAfterStatement(block, stmt);
fgRemoveRefPred(remainderBlock, block); // We're going to put more blocks between block and remainderBlock.
BasicBlock* condBlock = fgNewBBafter(BBJ_COND, block, true);
BasicBlock* elseBlock = fgNewBBafter(BBJ_NONE, condBlock, true);
// These blocks are only internal if 'block' is (but they've been set as internal by fgNewBBafter).
// If they're not internal, mark them as imported to avoid asserts about un-imported blocks.
if ((block->bbFlags & BBF_INTERNAL) == 0)
{
condBlock->bbFlags &= ~BBF_INTERNAL;
elseBlock->bbFlags &= ~BBF_INTERNAL;
condBlock->bbFlags |= BBF_IMPORTED;
elseBlock->bbFlags |= BBF_IMPORTED;
}
remainderBlock->bbFlags |= propagateFlags;
condBlock->inheritWeight(block);
fgAddRefPred(condBlock, block);
fgAddRefPred(elseBlock, condBlock);
fgAddRefPred(remainderBlock, elseBlock);
BasicBlock* thenBlock = nullptr;
if (hasTrueExpr && hasFalseExpr)
{
// bbj_always
// +---->------+
// false | |
// S0 -->-- ~C -->-- T F -->-- S1
// | |
// +--->--------+
// bbj_cond(true)
//
gtReverseCond(condExpr);
condBlock->bbJumpDest = elseBlock;
thenBlock = fgNewBBafter(BBJ_ALWAYS, condBlock, true);
thenBlock->bbJumpDest = remainderBlock;
if ((block->bbFlags & BBF_INTERNAL) == 0)
{
thenBlock->bbFlags &= ~BBF_INTERNAL;
thenBlock->bbFlags |= BBF_IMPORTED;
}
fgAddRefPred(thenBlock, condBlock);
fgAddRefPred(remainderBlock, thenBlock);
thenBlock->inheritWeightPercentage(condBlock, 50);
elseBlock->inheritWeightPercentage(condBlock, 50);
}
else if (hasTrueExpr)
{
// false
// S0 -->-- ~C -->-- T -->-- S1
// | |
// +-->-------------+
// bbj_cond(true)
//
gtReverseCond(condExpr);
condBlock->bbJumpDest = remainderBlock;
fgAddRefPred(remainderBlock, condBlock);
// Since we have no false expr, use the one we'd already created.
thenBlock = elseBlock;
elseBlock = nullptr;
thenBlock->inheritWeightPercentage(condBlock, 50);
}
else if (hasFalseExpr)
{
// false
// S0 -->-- C -->-- F -->-- S1
// | |
// +-->------------+
// bbj_cond(true)
//
condBlock->bbJumpDest = remainderBlock;
fgAddRefPred(remainderBlock, condBlock);
elseBlock->inheritWeightPercentage(condBlock, 50);
}
GenTree* jmpTree = gtNewOperNode(GT_JTRUE, TYP_VOID, qmark->gtGetOp1());
Statement* jmpStmt = fgNewStmtFromTree(jmpTree, stmt->GetDebugInfo());
fgInsertStmtAtEnd(condBlock, jmpStmt);
// Remove the original qmark statement.
fgRemoveStmt(block, stmt);
// Since we have top level qmarks, we either have a dst for it in which case
// we need to create tmps for true and falseExprs, else just don't bother
// assigning.
unsigned lclNum = BAD_VAR_NUM;
if (dst != nullptr)
{
assert(dst->gtOper == GT_LCL_VAR);
lclNum = dst->AsLclVar()->GetLclNum();
}
else
{
assert(qmark->TypeGet() == TYP_VOID);
}
if (hasTrueExpr)
{
if (dst != nullptr)
{
trueExpr = gtNewTempAssign(lclNum, trueExpr);
}
Statement* trueStmt = fgNewStmtFromTree(trueExpr, stmt->GetDebugInfo());
fgInsertStmtAtEnd(thenBlock, trueStmt);
}
// Assign the falseExpr into the dst or tmp, insert in elseBlock
if (hasFalseExpr)
{
if (dst != nullptr)
{
falseExpr = gtNewTempAssign(lclNum, falseExpr);
}
Statement* falseStmt = fgNewStmtFromTree(falseExpr, stmt->GetDebugInfo());
fgInsertStmtAtEnd(elseBlock, falseStmt);
}
#ifdef DEBUG
if (verbose)
{
printf("\nExpanding top-level qmark in " FMT_BB " (after)\n", block->bbNum);
fgDispBasicBlocks(block, remainderBlock, true);
}
#endif // DEBUG
}
/*****************************************************************************
*
* Expand GT_QMARK nodes from the flow graph into basic blocks.
*
*/
void Compiler::fgExpandQmarkNodes()
{
if (compQmarkUsed)
{
for (BasicBlock* const block : Blocks())
{
for (Statement* const stmt : block->Statements())
{
GenTree* expr = stmt->GetRootNode();
#ifdef DEBUG
fgPreExpandQmarkChecks(expr);
#endif
fgExpandQmarkStmt(block, stmt);
}
}
#ifdef DEBUG
fgPostExpandQmarkChecks();
#endif
}
compQmarkRationalized = true;
}
#ifdef DEBUG
/*****************************************************************************
*
* Make sure we don't have any more GT_QMARK nodes.
*
*/
void Compiler::fgPostExpandQmarkChecks()
{
for (BasicBlock* const block : Blocks())
{
for (Statement* const stmt : block->Statements())
{
GenTree* expr = stmt->GetRootNode();
fgWalkTreePre(&expr, Compiler::fgAssertNoQmark, nullptr);
}
}
}
#endif
/*****************************************************************************
*
* Promoting struct locals
*/
void Compiler::fgPromoteStructs()
{
#ifdef DEBUG
if (verbose)
{
printf("*************** In fgPromoteStructs()\n");
}
#endif // DEBUG
if (!opts.OptEnabled(CLFLG_STRUCTPROMOTE))
{
JITDUMP(" promotion opt flag not enabled\n");
return;
}
if (fgNoStructPromotion)
{
JITDUMP(" promotion disabled by JitNoStructPromotion\n");
return;
}
#if 0
// The code in this #if has been useful in debugging struct promotion issues, by
// enabling selective enablement of the struct promotion optimization according to
// method hash.
#ifdef DEBUG
unsigned methHash = info.compMethodHash();
char* lostr = getenv("structpromohashlo");
unsigned methHashLo = 0;
if (lostr != NULL)
{
sscanf_s(lostr, "%x", &methHashLo);
}
char* histr = getenv("structpromohashhi");
unsigned methHashHi = UINT32_MAX;
if (histr != NULL)
{
sscanf_s(histr, "%x", &methHashHi);
}
if (methHash < methHashLo || methHash > methHashHi)
{
return;
}
else
{
printf("Promoting structs for method %s, hash = 0x%x.\n",
info.compFullName, info.compMethodHash());
printf(""); // in our logic this causes a flush
}
#endif // DEBUG
#endif // 0
if (info.compIsVarArgs)
{
JITDUMP(" promotion disabled because of varargs\n");
return;
}
#ifdef DEBUG
if (verbose)
{
printf("\nlvaTable before fgPromoteStructs\n");
lvaTableDump();
}
#endif // DEBUG
// The lvaTable might grow as we grab temps. Make a local copy here.
unsigned startLvaCount = lvaCount;
//
// Loop through the original lvaTable. Looking for struct locals to be promoted.
//
lvaStructPromotionInfo structPromotionInfo;
bool tooManyLocalsReported = false;
// Clear the structPromotionHelper, since it is used during inlining, at which point it
// may be conservative about looking up SIMD info.
// We don't want to preserve those conservative decisions for the actual struct promotion.
structPromotionHelper->Clear();
for (unsigned lclNum = 0; lclNum < startLvaCount; lclNum++)
{
// Whether this var got promoted
bool promotedVar = false;
LclVarDsc* varDsc = lvaGetDesc(lclNum);
// If we have marked this as lvUsedInSIMDIntrinsic, then we do not want to promote
// its fields. Instead, we will attempt to enregister the entire struct.
if (varDsc->lvIsSIMDType() && (varDsc->lvIsUsedInSIMDIntrinsic() || isOpaqueSIMDLclVar(varDsc)))
{
varDsc->lvRegStruct = true;
}
// Don't promote if we have reached the tracking limit.
else if (lvaHaveManyLocals())
{
// Print the message first time when we detected this condition
if (!tooManyLocalsReported)
{
JITDUMP("Stopped promoting struct fields, due to too many locals.\n");
}
tooManyLocalsReported = true;
}
else if (varTypeIsStruct(varDsc))
{
assert(structPromotionHelper != nullptr);
promotedVar = structPromotionHelper->TryPromoteStructVar(lclNum);
}
if (!promotedVar && varDsc->lvIsSIMDType() && !varDsc->lvFieldAccessed)
{
// Even if we have not used this in a SIMD intrinsic, if it is not being promoted,
// we will treat it as a reg struct.
varDsc->lvRegStruct = true;
}
}
#ifdef DEBUG
if (verbose)
{
printf("\nlvaTable after fgPromoteStructs\n");
lvaTableDump();
}
#endif // DEBUG
}
void Compiler::fgMorphStructField(GenTree* tree, GenTree* parent)
{
noway_assert(tree->OperGet() == GT_FIELD);
GenTreeField* field = tree->AsField();
GenTree* objRef = field->GetFldObj();
GenTree* obj = ((objRef != nullptr) && (objRef->gtOper == GT_ADDR)) ? objRef->AsOp()->gtOp1 : nullptr;
noway_assert((tree->gtFlags & GTF_GLOB_REF) || ((obj != nullptr) && (obj->gtOper == GT_LCL_VAR)));
/* Is this an instance data member? */
if ((obj != nullptr) && (obj->gtOper == GT_LCL_VAR))
{
unsigned lclNum = obj->AsLclVarCommon()->GetLclNum();
const LclVarDsc* varDsc = lvaGetDesc(lclNum);
if (varTypeIsStruct(obj))
{
if (varDsc->lvPromoted)
{
// Promoted struct
unsigned fldOffset = field->gtFldOffset;
unsigned fieldLclIndex = lvaGetFieldLocal(varDsc, fldOffset);
if (fieldLclIndex == BAD_VAR_NUM)
{
// Access a promoted struct's field with an offset that doesn't correspond to any field.
// It can happen if the struct was cast to another struct with different offsets.
return;
}
const LclVarDsc* fieldDsc = lvaGetDesc(fieldLclIndex);
var_types fieldType = fieldDsc->TypeGet();
assert(fieldType != TYP_STRUCT); // promoted LCL_VAR can't have a struct type.
if (tree->TypeGet() != fieldType)
{
if (tree->TypeGet() != TYP_STRUCT)
{
// This is going to be an incorrect instruction promotion.
// For example when we try to read int as long.
return;
}
if (field->gtFldHnd != fieldDsc->lvFieldHnd)
{
CORINFO_CLASS_HANDLE fieldTreeClass = nullptr, fieldDscClass = nullptr;
CorInfoType fieldTreeType = info.compCompHnd->getFieldType(field->gtFldHnd, &fieldTreeClass);
CorInfoType fieldDscType = info.compCompHnd->getFieldType(fieldDsc->lvFieldHnd, &fieldDscClass);
if (fieldTreeType != fieldDscType || fieldTreeClass != fieldDscClass)
{
// Access the promoted field with a different class handle, can't check that types match.
return;
}
// Access the promoted field as a field of a non-promoted struct with the same class handle.
}
else
{
// As we already checked this above, we must have a tree with a TYP_STRUCT type
//
assert(tree->TypeGet() == TYP_STRUCT);
// The field tree accesses it as a struct, but the promoted LCL_VAR field
// says that it has another type. This happens when struct promotion unwraps
// a single field struct to get to its ultimate type.
//
// Note that currently, we cannot have a promoted LCL_VAR field with a struct type.
//
// This mismatch in types can lead to problems for some parent node type like GT_RETURN.
// So we check the parent node and only allow this optimization when we have
// a GT_ADDR or a GT_ASG.
//
// Note that for a GT_ASG we have to do some additional work,
// see below after the SetOper(GT_LCL_VAR)
//
if (!parent->OperIs(GT_ADDR, GT_ASG))
{
// Don't transform other operations such as GT_RETURN
//
return;
}
#ifdef DEBUG
// This is an additional DEBUG-only sanity check
//
assert(structPromotionHelper != nullptr);
structPromotionHelper->CheckRetypedAsScalar(field->gtFldHnd, fieldType);
#endif // DEBUG
}
}
tree->SetOper(GT_LCL_VAR);
tree->AsLclVarCommon()->SetLclNum(fieldLclIndex);
tree->gtType = fieldType;
tree->gtFlags &= GTF_NODE_MASK; // Note: that clears all flags except `GTF_COLON_COND`.
if (parent->gtOper == GT_ASG)
{
// If we are changing the left side of an assignment, we need to set
// these two flags:
//
if (parent->AsOp()->gtOp1 == tree)
{
tree->gtFlags |= GTF_VAR_DEF;
tree->gtFlags |= GTF_DONT_CSE;
}
// Promotion of struct containing struct fields where the field
// is a struct with a single pointer sized scalar type field: in
// this case struct promotion uses the type of the underlying
// scalar field as the type of struct field instead of recursively
// promoting. This can lead to a case where we have a block-asgn
// with its RHS replaced with a scalar type. Mark RHS value as
// DONT_CSE so that assertion prop will not do const propagation.
// The reason this is required is that if RHS of a block-asg is a
// constant, then it is interpreted as init-block incorrectly.
//
// TODO - This can also be avoided if we implement recursive struct
// promotion, tracked by #10019.
if (varTypeIsStruct(parent) && parent->AsOp()->gtOp2 == tree && !varTypeIsStruct(tree))
{
tree->gtFlags |= GTF_DONT_CSE;
}
}
#ifdef DEBUG
if (verbose)
{
printf("Replacing the field in promoted struct with local var V%02u\n", fieldLclIndex);
}
#endif // DEBUG
}
}
else
{
// Normed struct
// A "normed struct" is a struct that the VM tells us is a basic type. This can only happen if
// the struct contains a single element, and that element is 4 bytes (on x64 it can also be 8
// bytes). Normally, the type of the local var and the type of GT_FIELD are equivalent. However,
// there is one extremely rare case where that won't be true. An enum type is a special value type
// that contains exactly one element of a primitive integer type (that, for CLS programs is named
// "value__"). The VM tells us that a local var of that enum type is the primitive type of the
// enum's single field. It turns out that it is legal for IL to access this field using ldflda or
// ldfld. For example:
//
// .class public auto ansi sealed mynamespace.e_t extends [mscorlib]System.Enum
// {
// .field public specialname rtspecialname int16 value__
// .field public static literal valuetype mynamespace.e_t one = int16(0x0000)
// }
// .method public hidebysig static void Main() cil managed
// {
// .locals init (valuetype mynamespace.e_t V_0)
// ...
// ldloca.s V_0
// ldflda int16 mynamespace.e_t::value__
// ...
// }
//
// Normally, compilers will not generate the ldflda, since it is superfluous.
//
// In the example, the lclVar is short, but the JIT promotes all trees using this local to the
// "actual type", that is, INT. But the GT_FIELD is still SHORT. So, in the case of a type
// mismatch like this, don't do this morphing. The local var may end up getting marked as
// address taken, and the appropriate SHORT load will be done from memory in that case.
if (tree->TypeGet() == obj->TypeGet())
{
tree->ChangeOper(GT_LCL_VAR);
tree->AsLclVarCommon()->SetLclNum(lclNum);
tree->gtFlags &= GTF_NODE_MASK;
if ((parent->gtOper == GT_ASG) && (parent->AsOp()->gtOp1 == tree))
{
tree->gtFlags |= GTF_VAR_DEF;
tree->gtFlags |= GTF_DONT_CSE;
}
#ifdef DEBUG
if (verbose)
{
printf("Replacing the field in normed struct with local var V%02u\n", lclNum);
}
#endif // DEBUG
}
}
}
}
void Compiler::fgMorphLocalField(GenTree* tree, GenTree* parent)
{
noway_assert(tree->OperGet() == GT_LCL_FLD);
unsigned lclNum = tree->AsLclFld()->GetLclNum();
LclVarDsc* varDsc = lvaGetDesc(lclNum);
if (varTypeIsStruct(varDsc))
{
if (varDsc->lvPromoted)
{
// Promoted struct
unsigned fldOffset = tree->AsLclFld()->GetLclOffs();
unsigned fieldLclIndex = 0;
LclVarDsc* fldVarDsc = nullptr;
if (fldOffset != BAD_VAR_NUM)
{
fieldLclIndex = lvaGetFieldLocal(varDsc, fldOffset);
noway_assert(fieldLclIndex != BAD_VAR_NUM);
fldVarDsc = lvaGetDesc(fieldLclIndex);
}
var_types treeType = tree->TypeGet();
var_types fieldType = fldVarDsc->TypeGet();
if (fldOffset != BAD_VAR_NUM &&
((genTypeSize(fieldType) == genTypeSize(treeType)) || (varDsc->lvFieldCnt == 1)))
{
// There is an existing sub-field we can use.
tree->AsLclFld()->SetLclNum(fieldLclIndex);
// The field must be an enregisterable type; otherwise it would not be a promoted field.
// The tree type may not match, e.g. for return types that have been morphed, but both
// must be enregisterable types.
assert(varTypeIsEnregisterable(treeType) && varTypeIsEnregisterable(fieldType));
tree->ChangeOper(GT_LCL_VAR);
assert(tree->AsLclVarCommon()->GetLclNum() == fieldLclIndex);
tree->gtType = fldVarDsc->TypeGet();
if ((parent->gtOper == GT_ASG) && (parent->AsOp()->gtOp1 == tree))
{
tree->gtFlags |= GTF_VAR_DEF;
tree->gtFlags |= GTF_DONT_CSE;
}
JITDUMP("Replacing the GT_LCL_FLD in promoted struct with local var V%02u\n", fieldLclIndex);
}
else
{
// There is no existing field that has all the parts that we need
// So we must ensure that the struct lives in memory.
lvaSetVarDoNotEnregister(lclNum DEBUGARG(DoNotEnregisterReason::LocalField));
#ifdef DEBUG
// We can't convert this guy to a float because he really does have his
// address taken..
varDsc->lvKeepType = 1;
#endif // DEBUG
}
}
else if (varTypeIsSIMD(varDsc) && (genTypeSize(tree->TypeGet()) == genTypeSize(varDsc)))
{
assert(tree->AsLclFld()->GetLclOffs() == 0);
tree->gtType = varDsc->TypeGet();
tree->ChangeOper(GT_LCL_VAR);
JITDUMP("Replacing GT_LCL_FLD of struct with local var V%02u\n", lclNum);
}
}
}
//------------------------------------------------------------------------
// fgResetImplicitByRefRefCount: Clear the ref count field of all implicit byrefs
void Compiler::fgResetImplicitByRefRefCount()
{
#if (defined(TARGET_AMD64) && !defined(UNIX_AMD64_ABI)) || defined(TARGET_ARM64)
#ifdef DEBUG
if (verbose)
{
printf("\n*************** In fgResetImplicitByRefRefCount()\n");
}
#endif // DEBUG
for (unsigned lclNum = 0; lclNum < info.compArgsCount; ++lclNum)
{
LclVarDsc* varDsc = lvaGetDesc(lclNum);
if (varDsc->lvIsImplicitByRef)
{
// Clear the ref count field; fgMarkAddressTakenLocals will increment it per
// appearance of implicit-by-ref param so that call arg morphing can do an
// optimization for single-use implicit-by-ref params whose single use is as
// an outgoing call argument.
varDsc->setLvRefCnt(0, RCS_EARLY);
varDsc->setLvRefCntWtd(0, RCS_EARLY);
}
}
#endif // (TARGET_AMD64 && !UNIX_AMD64_ABI) || TARGET_ARM64
}
//------------------------------------------------------------------------
// fgRetypeImplicitByRefArgs: Update the types on implicit byref parameters' `LclVarDsc`s (from
// struct to pointer). Also choose (based on address-exposed analysis)
// which struct promotions of implicit byrefs to keep or discard.
// For those which are kept, insert the appropriate initialization code.
// For those which are to be discarded, annotate the promoted field locals
// so that fgMorphImplicitByRefArgs will know to rewrite their appearances
// using indirections off the pointer parameters.
void Compiler::fgRetypeImplicitByRefArgs()
{
#if (defined(TARGET_AMD64) && !defined(UNIX_AMD64_ABI)) || defined(TARGET_ARM64)
#ifdef DEBUG
if (verbose)
{
printf("\n*************** In fgRetypeImplicitByRefArgs()\n");
}
#endif // DEBUG
for (unsigned lclNum = 0; lclNum < info.compArgsCount; lclNum++)
{
LclVarDsc* varDsc = lvaGetDesc(lclNum);
if (lvaIsImplicitByRefLocal(lclNum))
{
unsigned size;
if (varDsc->lvSize() > REGSIZE_BYTES)
{
size = varDsc->lvSize();
}
else
{
CORINFO_CLASS_HANDLE typeHnd = varDsc->GetStructHnd();
size = info.compCompHnd->getClassSize(typeHnd);
}
if (varDsc->lvPromoted)
{
// This implicit-by-ref was promoted; create a new temp to represent the
// promoted struct before rewriting this parameter as a pointer.
unsigned newLclNum = lvaGrabTemp(false DEBUGARG("Promoted implicit byref"));
lvaSetStruct(newLclNum, lvaGetStruct(lclNum), true);
if (info.compIsVarArgs)
{
lvaSetStructUsedAsVarArg(newLclNum);
}
// Update varDsc since lvaGrabTemp might have re-allocated the var dsc array.
varDsc = lvaGetDesc(lclNum);
// Copy the struct promotion annotations to the new temp.
LclVarDsc* newVarDsc = lvaGetDesc(newLclNum);
newVarDsc->lvPromoted = true;
newVarDsc->lvFieldLclStart = varDsc->lvFieldLclStart;
newVarDsc->lvFieldCnt = varDsc->lvFieldCnt;
newVarDsc->lvContainsHoles = varDsc->lvContainsHoles;
newVarDsc->lvCustomLayout = varDsc->lvCustomLayout;
#ifdef DEBUG
newVarDsc->lvKeepType = true;
#endif // DEBUG
// Propagate address-taken-ness and do-not-enregister-ness.
newVarDsc->SetAddressExposed(varDsc->IsAddressExposed() DEBUGARG(varDsc->GetAddrExposedReason()));
newVarDsc->lvDoNotEnregister = varDsc->lvDoNotEnregister;
newVarDsc->lvLiveInOutOfHndlr = varDsc->lvLiveInOutOfHndlr;
newVarDsc->lvSingleDef = varDsc->lvSingleDef;
newVarDsc->lvSingleDefRegCandidate = varDsc->lvSingleDefRegCandidate;
newVarDsc->lvSpillAtSingleDef = varDsc->lvSpillAtSingleDef;
#ifdef DEBUG
newVarDsc->SetDoNotEnregReason(varDsc->GetDoNotEnregReason());
#endif // DEBUG
// If the promotion is dependent, the promoted temp would just be committed
// to memory anyway, so we'll rewrite its appearances to be indirections
// through the pointer parameter, the same as we'd do for this
// parameter if it weren't promoted at all (otherwise the initialization
// of the new temp would just be a needless memcpy at method entry).
//
// Otherwise, see how many appearances there are. We keep two early ref counts: total
// number of references to the struct or some field, and how many of these are
// arguments to calls. We undo promotion unless we see enough non-call uses.
//
const unsigned totalAppearances = varDsc->lvRefCnt(RCS_EARLY);
const unsigned callAppearances = (unsigned)varDsc->lvRefCntWtd(RCS_EARLY);
assert(totalAppearances >= callAppearances);
const unsigned nonCallAppearances = totalAppearances - callAppearances;
bool undoPromotion = ((lvaGetPromotionType(newVarDsc) == PROMOTION_TYPE_DEPENDENT) ||
(nonCallAppearances <= varDsc->lvFieldCnt));
#ifdef DEBUG
// Above is a profitability heurisic; either value of
// undoPromotion should lead to correct code. So,
// under stress, make different decisions at times.
if (compStressCompile(STRESS_BYREF_PROMOTION, 25))
{
undoPromotion = !undoPromotion;
JITDUMP("Stress -- changing byref undo promotion for V%02u to %s undo\n", lclNum,
undoPromotion ? "" : "NOT");
}
#endif // DEBUG
JITDUMP("%s promotion of implicit by-ref V%02u: %s total: %u non-call: %u fields: %u\n",
undoPromotion ? "Undoing" : "Keeping", lclNum,
(lvaGetPromotionType(newVarDsc) == PROMOTION_TYPE_DEPENDENT) ? "dependent;" : "",
totalAppearances, nonCallAppearances, varDsc->lvFieldCnt);
if (!undoPromotion)
{
// Insert IR that initializes the temp from the parameter.
// LHS is a simple reference to the temp.
fgEnsureFirstBBisScratch();
GenTree* lhs = gtNewLclvNode(newLclNum, varDsc->lvType);
// RHS is an indirection (using GT_OBJ) off the parameter.
GenTree* addr = gtNewLclvNode(lclNum, TYP_BYREF);
GenTree* rhs = new (this, GT_BLK) GenTreeBlk(GT_BLK, TYP_STRUCT, addr, typGetBlkLayout(size));
GenTree* assign = gtNewAssignNode(lhs, rhs);
fgNewStmtAtBeg(fgFirstBB, assign);
}
// Update the locals corresponding to the promoted fields.
unsigned fieldLclStart = varDsc->lvFieldLclStart;
unsigned fieldCount = varDsc->lvFieldCnt;
unsigned fieldLclStop = fieldLclStart + fieldCount;
for (unsigned fieldLclNum = fieldLclStart; fieldLclNum < fieldLclStop; ++fieldLclNum)
{
LclVarDsc* fieldVarDsc = lvaGetDesc(fieldLclNum);
if (undoPromotion)
{
// Leave lvParentLcl pointing to the parameter so that fgMorphImplicitByRefArgs
// will know to rewrite appearances of this local.
assert(fieldVarDsc->lvParentLcl == lclNum);
}
else
{
// Set the new parent.
fieldVarDsc->lvParentLcl = newLclNum;
}
fieldVarDsc->lvIsParam = false;
// The fields shouldn't inherit any register preferences from
// the parameter which is really a pointer to the struct.
fieldVarDsc->lvIsRegArg = false;
fieldVarDsc->lvIsMultiRegArg = false;
fieldVarDsc->SetArgReg(REG_NA);
#if FEATURE_MULTIREG_ARGS
fieldVarDsc->SetOtherArgReg(REG_NA);
#endif
}
// Hijack lvFieldLclStart to record the new temp number.
// It will get fixed up in fgMarkDemotedImplicitByRefArgs.
varDsc->lvFieldLclStart = newLclNum;
// Go ahead and clear lvFieldCnt -- either we're promoting
// a replacement temp or we're not promoting this arg, and
// in either case the parameter is now a pointer that doesn't
// have these fields.
varDsc->lvFieldCnt = 0;
// Hijack lvPromoted to communicate to fgMorphImplicitByRefArgs
// whether references to the struct should be rewritten as
// indirections off the pointer (not promoted) or references
// to the new struct local (promoted).
varDsc->lvPromoted = !undoPromotion;
}
else
{
// The "undo promotion" path above clears lvPromoted for args that struct
// promotion wanted to promote but that aren't considered profitable to
// rewrite. It hijacks lvFieldLclStart to communicate to
// fgMarkDemotedImplicitByRefArgs that it needs to clean up annotations left
// on such args for fgMorphImplicitByRefArgs to consult in the interim.
// Here we have an arg that was simply never promoted, so make sure it doesn't
// have nonzero lvFieldLclStart, since that would confuse fgMorphImplicitByRefArgs
// and fgMarkDemotedImplicitByRefArgs.
assert(varDsc->lvFieldLclStart == 0);
}
// Since the parameter in this position is really a pointer, its type is TYP_BYREF.
varDsc->lvType = TYP_BYREF;
// Since this previously was a TYP_STRUCT and we have changed it to a TYP_BYREF
// make sure that the following flag is not set as these will force SSA to
// exclude tracking/enregistering these LclVars. (see SsaBuilder::IncludeInSsa)
//
varDsc->lvOverlappingFields = 0; // This flag could have been set, clear it.
// The struct parameter may have had its address taken, but the pointer parameter
// cannot -- any uses of the struct parameter's address are uses of the pointer
// parameter's value, and there's no way for the MSIL to reference the pointer
// parameter's address. So clear the address-taken bit for the parameter.
varDsc->CleanAddressExposed();
varDsc->lvDoNotEnregister = 0;
#ifdef DEBUG
// This should not be converted to a double in stress mode,
// because it is really a pointer
varDsc->lvKeepType = 1;
if (verbose)
{
printf("Changing the lvType for struct parameter V%02d to TYP_BYREF.\n", lclNum);
}
#endif // DEBUG
}
}
#endif // (TARGET_AMD64 && !UNIX_AMD64_ABI) || TARGET_ARM64
}
//------------------------------------------------------------------------
// fgMarkDemotedImplicitByRefArgs: Clear annotations for any implicit byrefs that struct promotion
// asked to promote. Appearances of these have now been rewritten
// (by fgMorphImplicitByRefArgs) using indirections from the pointer
// parameter or references to the promotion temp, as appropriate.
void Compiler::fgMarkDemotedImplicitByRefArgs()
{
JITDUMP("\n*************** In fgMarkDemotedImplicitByRefArgs()\n");
#if (defined(TARGET_AMD64) && !defined(UNIX_AMD64_ABI)) || defined(TARGET_ARM64)
for (unsigned lclNum = 0; lclNum < info.compArgsCount; lclNum++)
{
LclVarDsc* varDsc = lvaGetDesc(lclNum);
if (lvaIsImplicitByRefLocal(lclNum))
{
JITDUMP("Clearing annotation for V%02d\n", lclNum);
if (varDsc->lvPromoted)
{
// The parameter is simply a pointer now, so clear lvPromoted. It was left set
// by fgRetypeImplicitByRefArgs to communicate to fgMorphImplicitByRefArgs that
// appearances of this arg needed to be rewritten to a new promoted struct local.
varDsc->lvPromoted = false;
// Clear the lvFieldLclStart value that was set by fgRetypeImplicitByRefArgs
// to tell fgMorphImplicitByRefArgs which local is the new promoted struct one.
varDsc->lvFieldLclStart = 0;
}
else if (varDsc->lvFieldLclStart != 0)
{
// We created new temps to represent a promoted struct corresponding to this
// parameter, but decided not to go through with the promotion and have
// rewritten all uses as indirections off the pointer parameter.
// We stashed the pointer to the new struct temp in lvFieldLclStart; make
// note of that and clear the annotation.
unsigned structLclNum = varDsc->lvFieldLclStart;
varDsc->lvFieldLclStart = 0;
// The temp struct is now unused; set flags appropriately so that we
// won't allocate space for it on the stack.
LclVarDsc* structVarDsc = lvaGetDesc(structLclNum);
structVarDsc->CleanAddressExposed();
#ifdef DEBUG
structVarDsc->lvUnusedStruct = true;
structVarDsc->lvUndoneStructPromotion = true;
#endif // DEBUG
unsigned fieldLclStart = structVarDsc->lvFieldLclStart;
unsigned fieldCount = structVarDsc->lvFieldCnt;
unsigned fieldLclStop = fieldLclStart + fieldCount;
for (unsigned fieldLclNum = fieldLclStart; fieldLclNum < fieldLclStop; ++fieldLclNum)
{
JITDUMP("Fixing pointer for field V%02d from V%02d to V%02d\n", fieldLclNum, lclNum, structLclNum);
// Fix the pointer to the parent local.
LclVarDsc* fieldVarDsc = lvaGetDesc(fieldLclNum);
assert(fieldVarDsc->lvParentLcl == lclNum);
fieldVarDsc->lvParentLcl = structLclNum;
// The field local is now unused; set flags appropriately so that
// we won't allocate stack space for it.
fieldVarDsc->CleanAddressExposed();
}
}
}
}
#endif // (TARGET_AMD64 && !UNIX_AMD64_ABI) || TARGET_ARM64
}
/*****************************************************************************
*
* Morph irregular parameters
* for x64 and ARM64 this means turning them into byrefs, adding extra indirs.
*/
bool Compiler::fgMorphImplicitByRefArgs(GenTree* tree)
{
#if (!defined(TARGET_AMD64) || defined(UNIX_AMD64_ABI)) && !defined(TARGET_ARM64)
return false;
#else // (TARGET_AMD64 && !UNIX_AMD64_ABI) || TARGET_ARM64
bool changed = false;
// Implicit byref morphing needs to know if the reference to the parameter is a
// child of GT_ADDR or not, so this method looks one level down and does the
// rewrite whenever a child is a reference to an implicit byref parameter.
if (tree->gtOper == GT_ADDR)
{
if (tree->AsOp()->gtOp1->gtOper == GT_LCL_VAR)
{
GenTree* morphedTree = fgMorphImplicitByRefArgs(tree, true);
changed = (morphedTree != nullptr);
assert(!changed || (morphedTree == tree));
}
}
else
{
for (GenTree** pTree : tree->UseEdges())
{
GenTree** pTreeCopy = pTree;
GenTree* childTree = *pTree;
if (childTree->gtOper == GT_LCL_VAR)
{
GenTree* newChildTree = fgMorphImplicitByRefArgs(childTree, false);
if (newChildTree != nullptr)
{
changed = true;
*pTreeCopy = newChildTree;
}
}
}
}
return changed;
#endif // (TARGET_AMD64 && !UNIX_AMD64_ABI) || TARGET_ARM64
}
GenTree* Compiler::fgMorphImplicitByRefArgs(GenTree* tree, bool isAddr)
{
assert((tree->gtOper == GT_LCL_VAR) || ((tree->gtOper == GT_ADDR) && (tree->AsOp()->gtOp1->gtOper == GT_LCL_VAR)));
assert(isAddr == (tree->gtOper == GT_ADDR));
GenTree* lclVarTree = isAddr ? tree->AsOp()->gtOp1 : tree;
unsigned lclNum = lclVarTree->AsLclVarCommon()->GetLclNum();
LclVarDsc* lclVarDsc = lvaGetDesc(lclNum);
CORINFO_FIELD_HANDLE fieldHnd;
unsigned fieldOffset = 0;
var_types fieldRefType = TYP_UNKNOWN;
if (lvaIsImplicitByRefLocal(lclNum))
{
// The SIMD transformation to coalesce contiguous references to SIMD vector fields will
// re-invoke the traversal to mark address-taken locals.
// So, we may encounter a tree that has already been transformed to TYP_BYREF.
// If we do, leave it as-is.
if (!varTypeIsStruct(lclVarTree))
{
assert(lclVarTree->TypeGet() == TYP_BYREF);
return nullptr;
}
else if (lclVarDsc->lvPromoted)
{
// fgRetypeImplicitByRefArgs created a new promoted struct local to represent this
// arg. Rewrite this to refer to the new local.
assert(lclVarDsc->lvFieldLclStart != 0);
lclVarTree->AsLclVarCommon()->SetLclNum(lclVarDsc->lvFieldLclStart);
return tree;
}
fieldHnd = nullptr;
}
else if (lclVarDsc->lvIsStructField && lvaIsImplicitByRefLocal(lclVarDsc->lvParentLcl))
{
// This was a field reference to an implicit-by-reference struct parameter that was
// dependently promoted; update it to a field reference off the pointer.
// Grab the field handle from the struct field lclVar.
fieldHnd = lclVarDsc->lvFieldHnd;
fieldOffset = lclVarDsc->lvFldOffset;
assert(fieldHnd != nullptr);
// Update lclNum/lclVarDsc to refer to the parameter
lclNum = lclVarDsc->lvParentLcl;
lclVarDsc = lvaGetDesc(lclNum);
fieldRefType = lclVarTree->TypeGet();
}
else
{
// We only need to tranform the 'marked' implicit by ref parameters
return nullptr;
}
// This is no longer a def of the lclVar, even if it WAS a def of the struct.
lclVarTree->gtFlags &= ~(GTF_LIVENESS_MASK);
if (isAddr)
{
if (fieldHnd == nullptr)
{
// change &X into just plain X
tree->ReplaceWith(lclVarTree, this);
tree->gtType = TYP_BYREF;
}
else
{
// change &(X.f) [i.e. GT_ADDR of local for promoted arg field]
// into &(X, f) [i.e. GT_ADDR of GT_FIELD off ptr param]
lclVarTree->AsLclVarCommon()->SetLclNum(lclNum);
lclVarTree->gtType = TYP_BYREF;
tree->AsOp()->gtOp1 = gtNewFieldRef(fieldRefType, fieldHnd, lclVarTree, fieldOffset);
}
#ifdef DEBUG
if (verbose)
{
printf("Replacing address of implicit by ref struct parameter with byref:\n");
}
#endif // DEBUG
}
else
{
// Change X into OBJ(X) or FIELD(X, f)
var_types structType = tree->gtType;
tree->gtType = TYP_BYREF;
if (fieldHnd)
{
tree->AsLclVarCommon()->SetLclNum(lclNum);
tree = gtNewFieldRef(fieldRefType, fieldHnd, tree, fieldOffset);
}
else
{
tree = gtNewObjNode(lclVarDsc->GetStructHnd(), tree);
if (structType == TYP_STRUCT)
{
gtSetObjGcInfo(tree->AsObj());
}
}
// TODO-CQ: If the VM ever stops violating the ABI and passing heap references
// we could remove TGTANYWHERE
tree->gtFlags = ((tree->gtFlags & GTF_COMMON_MASK) | GTF_IND_TGTANYWHERE);
#ifdef DEBUG
if (verbose)
{
printf("Replacing value of implicit by ref struct parameter with indir of parameter:\n");
}
#endif // DEBUG
}
#ifdef DEBUG
if (verbose)
{
gtDispTree(tree);
}
#endif // DEBUG
return tree;
}
//------------------------------------------------------------------------
// fgAddFieldSeqForZeroOffset:
// Associate a fieldSeq (with a zero offset) with the GenTree node 'addr'
//
// Arguments:
// addr - A GenTree node
// fieldSeqZero - a fieldSeq (with a zero offset)
//
// Notes:
// Some GenTree nodes have internal fields that record the field sequence.
// If we have one of these nodes: GT_CNS_INT, GT_LCL_FLD
// we can append the field sequence using the gtFieldSeq
// If we have a GT_ADD of a GT_CNS_INT we can use the
// fieldSeq from child node.
// Otherwise we record 'fieldSeqZero' in the GenTree node using
// a Map: GetFieldSeqStore()
// When doing so we take care to preserve any existing zero field sequence
//
void Compiler::fgAddFieldSeqForZeroOffset(GenTree* addr, FieldSeqNode* fieldSeqZero)
{
// We expect 'addr' to be an address at this point.
assert(addr->TypeGet() == TYP_BYREF || addr->TypeGet() == TYP_I_IMPL || addr->TypeGet() == TYP_REF);
// Tunnel through any commas.
const bool commaOnly = true;
addr = addr->gtEffectiveVal(commaOnly);
// We still expect 'addr' to be an address at this point.
assert(addr->TypeGet() == TYP_BYREF || addr->TypeGet() == TYP_I_IMPL || addr->TypeGet() == TYP_REF);
FieldSeqNode* fieldSeqUpdate = fieldSeqZero;
GenTree* fieldSeqNode = addr;
bool fieldSeqRecorded = false;
#ifdef DEBUG
if (verbose)
{
printf("\nfgAddFieldSeqForZeroOffset for");
gtDispAnyFieldSeq(fieldSeqZero);
printf("\naddr (Before)\n");
gtDispNode(addr, nullptr, nullptr, false);
gtDispCommonEndLine(addr);
}
#endif // DEBUG
switch (addr->OperGet())
{
case GT_CNS_INT:
fieldSeqUpdate = GetFieldSeqStore()->Append(addr->AsIntCon()->gtFieldSeq, fieldSeqZero);
addr->AsIntCon()->gtFieldSeq = fieldSeqUpdate;
fieldSeqRecorded = true;
break;
case GT_ADDR:
if (addr->AsOp()->gtOp1->OperGet() == GT_LCL_FLD)
{
fieldSeqNode = addr->AsOp()->gtOp1;
GenTreeLclFld* lclFld = addr->AsOp()->gtOp1->AsLclFld();
fieldSeqUpdate = GetFieldSeqStore()->Append(lclFld->GetFieldSeq(), fieldSeqZero);
lclFld->SetFieldSeq(fieldSeqUpdate);
fieldSeqRecorded = true;
}
break;
case GT_ADD:
if (addr->AsOp()->gtOp1->OperGet() == GT_CNS_INT)
{
fieldSeqNode = addr->AsOp()->gtOp1;
fieldSeqUpdate = GetFieldSeqStore()->Append(addr->AsOp()->gtOp1->AsIntCon()->gtFieldSeq, fieldSeqZero);
addr->AsOp()->gtOp1->AsIntCon()->gtFieldSeq = fieldSeqUpdate;
fieldSeqRecorded = true;
}
else if (addr->AsOp()->gtOp2->OperGet() == GT_CNS_INT)
{
fieldSeqNode = addr->AsOp()->gtOp2;
fieldSeqUpdate = GetFieldSeqStore()->Append(addr->AsOp()->gtOp2->AsIntCon()->gtFieldSeq, fieldSeqZero);
addr->AsOp()->gtOp2->AsIntCon()->gtFieldSeq = fieldSeqUpdate;
fieldSeqRecorded = true;
}
break;
default:
break;
}
if (fieldSeqRecorded == false)
{
// Record in the general zero-offset map.
// The "addr" node might already be annotated with a zero-offset field sequence.
FieldSeqNode* existingFieldSeq = nullptr;
if (GetZeroOffsetFieldMap()->Lookup(addr, &existingFieldSeq))
{
// Append the zero field sequences
fieldSeqUpdate = GetFieldSeqStore()->Append(existingFieldSeq, fieldSeqZero);
}
// Overwrite the field sequence annotation for op1
GetZeroOffsetFieldMap()->Set(addr, fieldSeqUpdate, NodeToFieldSeqMap::Overwrite);
fieldSeqRecorded = true;
}
#ifdef DEBUG
if (verbose)
{
printf(" (After)\n");
gtDispNode(fieldSeqNode, nullptr, nullptr, false);
gtDispCommonEndLine(fieldSeqNode);
}
#endif // DEBUG
}
#ifdef FEATURE_SIMD
//-----------------------------------------------------------------------------------
// fgMorphCombineSIMDFieldAssignments:
// If the RHS of the input stmt is a read for simd vector X Field, then this function
// will keep reading next few stmts based on the vector size(2, 3, 4).
// If the next stmts LHS are located contiguous and RHS are also located
// contiguous, then we replace those statements with a copyblk.
//
// Argument:
// block - BasicBlock*. block which stmt belongs to
// stmt - Statement*. the stmt node we want to check
//
// return value:
// if this funciton successfully optimized the stmts, then return true. Otherwise
// return false;
bool Compiler::fgMorphCombineSIMDFieldAssignments(BasicBlock* block, Statement* stmt)
{
GenTree* tree = stmt->GetRootNode();
assert(tree->OperGet() == GT_ASG);
GenTree* originalLHS = tree->AsOp()->gtOp1;
GenTree* prevLHS = tree->AsOp()->gtOp1;
GenTree* prevRHS = tree->AsOp()->gtOp2;
unsigned index = 0;
CorInfoType simdBaseJitType = CORINFO_TYPE_UNDEF;
unsigned simdSize = 0;
GenTree* simdStructNode = getSIMDStructFromField(prevRHS, &simdBaseJitType, &index, &simdSize, true);
if (simdStructNode == nullptr || index != 0 || simdBaseJitType != CORINFO_TYPE_FLOAT)
{
// if the RHS is not from a SIMD vector field X, then there is no need to check further.
return false;
}
var_types simdBaseType = JitType2PreciseVarType(simdBaseJitType);
var_types simdType = getSIMDTypeForSize(simdSize);
int assignmentsCount = simdSize / genTypeSize(simdBaseType) - 1;
int remainingAssignments = assignmentsCount;
Statement* curStmt = stmt->GetNextStmt();
Statement* lastStmt = stmt;
while (curStmt != nullptr && remainingAssignments > 0)
{
GenTree* exp = curStmt->GetRootNode();
if (exp->OperGet() != GT_ASG)
{
break;
}
GenTree* curLHS = exp->gtGetOp1();
GenTree* curRHS = exp->gtGetOp2();
if (!areArgumentsContiguous(prevLHS, curLHS) || !areArgumentsContiguous(prevRHS, curRHS))
{
break;
}
remainingAssignments--;
prevLHS = curLHS;
prevRHS = curRHS;
lastStmt = curStmt;
curStmt = curStmt->GetNextStmt();
}
if (remainingAssignments > 0)
{
// if the left assignments number is bigger than zero, then this means
// that the assignments are not assgining to the contiguously memory
// locations from same vector.
return false;
}
#ifdef DEBUG
if (verbose)
{
printf("\nFound contiguous assignments from a SIMD vector to memory.\n");
printf("From " FMT_BB ", stmt ", block->bbNum);
printStmtID(stmt);
printf(" to stmt");
printStmtID(lastStmt);
printf("\n");
}
#endif
for (int i = 0; i < assignmentsCount; i++)
{
fgRemoveStmt(block, stmt->GetNextStmt());
}
GenTree* dstNode;
if (originalLHS->OperIs(GT_LCL_FLD))
{
dstNode = originalLHS;
dstNode->gtType = simdType;
dstNode->AsLclFld()->SetFieldSeq(FieldSeqStore::NotAField());
// This may have changed a partial local field into full local field
if (dstNode->IsPartialLclFld(this))
{
dstNode->gtFlags |= GTF_VAR_USEASG;
}
else
{
dstNode->gtFlags &= ~GTF_VAR_USEASG;
}
}
else
{
GenTree* copyBlkDst = createAddressNodeForSIMDInit(originalLHS, simdSize);
if (simdStructNode->OperIsLocal())
{
setLclRelatedToSIMDIntrinsic(simdStructNode);
}
GenTree* copyBlkAddr = copyBlkDst;
if (copyBlkAddr->gtOper == GT_LEA)
{
copyBlkAddr = copyBlkAddr->AsAddrMode()->Base();
}
GenTreeLclVarCommon* localDst = copyBlkAddr->IsLocalAddrExpr();
if (localDst != nullptr)
{
setLclRelatedToSIMDIntrinsic(localDst);
}
if (simdStructNode->TypeGet() == TYP_BYREF)
{
assert(simdStructNode->OperIsLocal());
assert(lvaIsImplicitByRefLocal(simdStructNode->AsLclVarCommon()->GetLclNum()));
simdStructNode = gtNewIndir(simdType, simdStructNode);
}
else
{
assert(varTypeIsSIMD(simdStructNode));
}
dstNode = gtNewOperNode(GT_IND, simdType, copyBlkDst);
}
#ifdef DEBUG
if (verbose)
{
printf("\n" FMT_BB " stmt ", block->bbNum);
printStmtID(stmt);
printf("(before)\n");
gtDispStmt(stmt);
}
#endif
assert(!simdStructNode->CanCSE());
simdStructNode->ClearDoNotCSE();
tree = gtNewAssignNode(dstNode, simdStructNode);
stmt->SetRootNode(tree);
// Since we generated a new address node which didn't exist before,
// we should expose this address manually here.
// TODO-ADDR: Remove this when LocalAddressVisitor transforms all
// local field access into LCL_FLDs, at that point we would be
// combining 2 existing LCL_FLDs or 2 FIELDs that do not reference
// a local and thus cannot result in a new address exposed local.
fgMarkAddressExposedLocals(stmt);
#ifdef DEBUG
if (verbose)
{
printf("\nReplaced " FMT_BB " stmt", block->bbNum);
printStmtID(stmt);
printf("(after)\n");
gtDispStmt(stmt);
}
#endif
return true;
}
#endif // FEATURE_SIMD
//------------------------------------------------------------------------
// fgCheckStmtAfterTailCall: check that statements after the tail call stmt
// candidate are in one of expected forms, that are desctibed below.
//
// Return Value:
// 'true' if stmts are in the expected form, else 'false'.
//
bool Compiler::fgCheckStmtAfterTailCall()
{
// For void calls, we would have created a GT_CALL in the stmt list.
// For non-void calls, we would have created a GT_RETURN(GT_CAST(GT_CALL)).
// For calls returning structs, we would have a void call, followed by a void return.
// For debuggable code, it would be an assignment of the call to a temp
// We want to get rid of any of this extra trees, and just leave
// the call.
Statement* callStmt = fgMorphStmt;
Statement* nextMorphStmt = callStmt->GetNextStmt();
// Check that the rest stmts in the block are in one of the following pattern:
// 1) ret(void)
// 2) ret(cast*(callResultLclVar))
// 3) lclVar = callResultLclVar, the actual ret(lclVar) in another block
// 4) nop
if (nextMorphStmt != nullptr)
{
GenTree* callExpr = callStmt->GetRootNode();
if (callExpr->gtOper != GT_ASG)
{
// The next stmt can be GT_RETURN(TYP_VOID) or GT_RETURN(lclVar),
// where lclVar was return buffer in the call for structs or simd.
Statement* retStmt = nextMorphStmt;
GenTree* retExpr = retStmt->GetRootNode();
noway_assert(retExpr->gtOper == GT_RETURN);
nextMorphStmt = retStmt->GetNextStmt();
}
else
{
noway_assert(callExpr->gtGetOp1()->OperIsLocal());
unsigned callResultLclNumber = callExpr->gtGetOp1()->AsLclVarCommon()->GetLclNum();
#if FEATURE_TAILCALL_OPT_SHARED_RETURN
// We can have a chain of assignments from the call result to
// various inline return spill temps. These are ok as long
// as the last one ultimately provides the return value or is ignored.
//
// And if we're returning a small type we may see a cast
// on the source side.
while ((nextMorphStmt != nullptr) && (nextMorphStmt->GetRootNode()->OperIs(GT_ASG, GT_NOP)))
{
if (nextMorphStmt->GetRootNode()->OperIs(GT_NOP))
{
nextMorphStmt = nextMorphStmt->GetNextStmt();
continue;
}
Statement* moveStmt = nextMorphStmt;
GenTree* moveExpr = nextMorphStmt->GetRootNode();
GenTree* moveDest = moveExpr->gtGetOp1();
noway_assert(moveDest->OperIsLocal());
// Tunnel through any casts on the source side.
GenTree* moveSource = moveExpr->gtGetOp2();
while (moveSource->OperIs(GT_CAST))
{
noway_assert(!moveSource->gtOverflow());
moveSource = moveSource->gtGetOp1();
}
noway_assert(moveSource->OperIsLocal());
// Verify we're just passing the value from one local to another
// along the chain.
const unsigned srcLclNum = moveSource->AsLclVarCommon()->GetLclNum();
noway_assert(srcLclNum == callResultLclNumber);
const unsigned dstLclNum = moveDest->AsLclVarCommon()->GetLclNum();
callResultLclNumber = dstLclNum;
nextMorphStmt = moveStmt->GetNextStmt();
}
if (nextMorphStmt != nullptr)
#endif
{
Statement* retStmt = nextMorphStmt;
GenTree* retExpr = nextMorphStmt->GetRootNode();
noway_assert(retExpr->gtOper == GT_RETURN);
GenTree* treeWithLcl = retExpr->gtGetOp1();
while (treeWithLcl->gtOper == GT_CAST)
{
noway_assert(!treeWithLcl->gtOverflow());
treeWithLcl = treeWithLcl->gtGetOp1();
}
noway_assert(callResultLclNumber == treeWithLcl->AsLclVarCommon()->GetLclNum());
nextMorphStmt = retStmt->GetNextStmt();
}
}
}
return nextMorphStmt == nullptr;
}
//------------------------------------------------------------------------
// fgCanTailCallViaJitHelper: check whether we can use the faster tailcall
// JIT helper on x86.
//
// Return Value:
// 'true' if we can; or 'false' if we should use the generic tailcall mechanism.
//
bool Compiler::fgCanTailCallViaJitHelper()
{
#if !defined(TARGET_X86) || defined(UNIX_X86_ABI) || defined(FEATURE_READYTORUN)
// On anything except windows X86 we have no faster mechanism available.
return false;
#else
// The JIT helper does not properly handle the case where localloc was used.
if (compLocallocUsed)
return false;
return true;
#endif
}
//------------------------------------------------------------------------
// fgMorphReduceAddOps: reduce successive variable adds into a single multiply,
// e.g., i + i + i + i => i * 4.
//
// Arguments:
// tree - tree for reduction
//
// Return Value:
// reduced tree if pattern matches, original tree otherwise
//
GenTree* Compiler::fgMorphReduceAddOps(GenTree* tree)
{
// ADD(_, V0) starts the pattern match.
if (!tree->OperIs(GT_ADD) || tree->gtOverflow())
{
return tree;
}
#ifndef TARGET_64BIT
// Transforming 64-bit ADD to 64-bit MUL on 32-bit system results in replacing
// ADD ops with a helper function call. Don't apply optimization in that case.
if (tree->TypeGet() == TYP_LONG)
{
return tree;
}
#endif
GenTree* lclVarTree = tree->AsOp()->gtOp2;
GenTree* consTree = tree->AsOp()->gtOp1;
GenTree* op1 = consTree;
GenTree* op2 = lclVarTree;
if (!op2->OperIs(GT_LCL_VAR) || !varTypeIsIntegral(op2))
{
return tree;
}
int foldCount = 0;
unsigned lclNum = op2->AsLclVarCommon()->GetLclNum();
// Search for pattern of shape ADD(ADD(ADD(lclNum, lclNum), lclNum), lclNum).
while (true)
{
// ADD(lclNum, lclNum), end of tree
if (op1->OperIs(GT_LCL_VAR) && op1->AsLclVarCommon()->GetLclNum() == lclNum && op2->OperIs(GT_LCL_VAR) &&
op2->AsLclVarCommon()->GetLclNum() == lclNum)
{
foldCount += 2;
break;
}
// ADD(ADD(X, Y), lclNum), keep descending
else if (op1->OperIs(GT_ADD) && !op1->gtOverflow() && op2->OperIs(GT_LCL_VAR) &&
op2->AsLclVarCommon()->GetLclNum() == lclNum)
{
foldCount++;
op2 = op1->AsOp()->gtOp2;
op1 = op1->AsOp()->gtOp1;
}
// Any other case is a pattern we won't attempt to fold for now.
else
{
return tree;
}
}
// V0 + V0 ... + V0 becomes V0 * foldCount, where postorder transform will optimize
// accordingly
consTree->BashToConst(foldCount, tree->TypeGet());
GenTree* morphed = gtNewOperNode(GT_MUL, tree->TypeGet(), lclVarTree, consTree);
DEBUG_DESTROY_NODE(tree);
return morphed;
}
| // Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
/*XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XX XX
XX Morph XX
XX XX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
*/
#include "jitpch.h"
#ifdef _MSC_VER
#pragma hdrstop
#endif
#include "allocacheck.h" // for alloca
// Convert the given node into a call to the specified helper passing
// the given argument list.
//
// Tries to fold constants and also adds an edge for overflow exception
// returns the morphed tree
GenTree* Compiler::fgMorphCastIntoHelper(GenTree* tree, int helper, GenTree* oper)
{
GenTree* result;
/* If the operand is a constant, we'll try to fold it */
if (oper->OperIsConst())
{
GenTree* oldTree = tree;
tree = gtFoldExprConst(tree); // This may not fold the constant (NaN ...)
if (tree != oldTree)
{
return fgMorphTree(tree);
}
else if (tree->OperIsConst())
{
return fgMorphConst(tree);
}
// assert that oper is unchanged and that it is still a GT_CAST node
noway_assert(tree->AsCast()->CastOp() == oper);
noway_assert(tree->gtOper == GT_CAST);
}
result = fgMorphIntoHelperCall(tree, helper, gtNewCallArgs(oper));
assert(result == tree);
return result;
}
/*****************************************************************************
*
* Convert the given node into a call to the specified helper passing
* the given argument list.
*/
GenTree* Compiler::fgMorphIntoHelperCall(GenTree* tree, int helper, GenTreeCall::Use* args, bool morphArgs)
{
// The helper call ought to be semantically equivalent to the original node, so preserve its VN.
tree->ChangeOper(GT_CALL, GenTree::PRESERVE_VN);
GenTreeCall* call = tree->AsCall();
call->gtCallType = CT_HELPER;
call->gtReturnType = tree->TypeGet();
call->gtCallMethHnd = eeFindHelper(helper);
call->gtCallThisArg = nullptr;
call->gtCallArgs = args;
call->gtCallLateArgs = nullptr;
call->fgArgInfo = nullptr;
call->gtRetClsHnd = nullptr;
call->gtCallMoreFlags = GTF_CALL_M_EMPTY;
call->gtInlineCandidateInfo = nullptr;
call->gtControlExpr = nullptr;
call->gtRetBufArg = nullptr;
#ifdef UNIX_X86_ABI
call->gtFlags |= GTF_CALL_POP_ARGS;
#endif // UNIX_X86_ABI
#if DEBUG
// Helper calls are never candidates.
call->gtInlineObservation = InlineObservation::CALLSITE_IS_CALL_TO_HELPER;
call->callSig = nullptr;
#endif // DEBUG
#ifdef FEATURE_READYTORUN
call->gtEntryPoint.addr = nullptr;
call->gtEntryPoint.accessType = IAT_VALUE;
#endif
#if FEATURE_MULTIREG_RET
call->ResetReturnType();
call->ClearOtherRegs();
call->ClearOtherRegFlags();
#ifndef TARGET_64BIT
if (varTypeIsLong(tree))
{
call->InitializeLongReturnType();
}
#endif // !TARGET_64BIT
#endif // FEATURE_MULTIREG_RET
if (tree->OperMayThrow(this))
{
tree->gtFlags |= GTF_EXCEPT;
}
else
{
tree->gtFlags &= ~GTF_EXCEPT;
}
tree->gtFlags |= GTF_CALL;
for (GenTreeCall::Use& use : GenTreeCall::UseList(args))
{
tree->gtFlags |= (use.GetNode()->gtFlags & GTF_ALL_EFFECT);
}
/* Perform the morphing */
if (morphArgs)
{
tree = fgMorphArgs(call);
}
return tree;
}
//------------------------------------------------------------------------
// fgMorphExpandCast: Performs the pre-order (required) morphing for a cast.
//
// Performs a rich variety of pre-order transformations (and some optimizations).
//
// Notably:
// 1. Splits long -> small type casts into long -> int -> small type
// for 32 bit targets. Does the same for float/double -> small type
// casts for all targets.
// 2. Morphs casts not supported by the target directly into helpers.
// These mostly have to do with casts from and to floating point
// types, especially checked ones. Refer to the implementation for
// what specific casts need to be handled - it is a complex matrix.
// 3. "Casts away" the GC-ness of a tree (for CAST(nint <- byref)) via
// assigning the GC tree to an inline - COMMA(ASG, LCL_VAR) - non-GC
// temporary.
// 3. "Pushes down" truncating long -> int casts for some operations:
// CAST(int <- MUL(long, long)) => MUL(CAST(int <- long), CAST(int <- long)).
// The purpose of this is to allow "optNarrowTree" in the post-order
// traversal to fold the tree into a TYP_INT one, which helps 32 bit
// targets (and AMD64 too since 32 bit instructions are more compact).
// TODO-Arm64-CQ: Re-evaluate the value of this optimization for ARM64.
//
// Arguments:
// tree - the cast tree to morph
//
// Return Value:
// The fully morphed tree, or "nullptr" if it needs further morphing,
// in which case the cast may be transformed into an unchecked one
// and its operand changed (the cast "expanded" into two).
//
GenTree* Compiler::fgMorphExpandCast(GenTreeCast* tree)
{
GenTree* oper = tree->CastOp();
if (fgGlobalMorph && (oper->gtOper == GT_ADDR))
{
// Make sure we've checked if 'oper' is an address of an implicit-byref parameter.
// If it is, fgMorphImplicitByRefArgs will change its type, and we want the cast
// morphing code to see that type.
fgMorphImplicitByRefArgs(oper);
}
var_types srcType = genActualType(oper);
var_types dstType = tree->CastToType();
unsigned dstSize = genTypeSize(dstType);
// See if the cast has to be done in two steps. R -> I
if (varTypeIsFloating(srcType) && varTypeIsIntegral(dstType))
{
if (srcType == TYP_FLOAT
#if defined(TARGET_ARM64)
// Arm64: src = float, dst is overflow conversion.
// This goes through helper and hence src needs to be converted to double.
&& tree->gtOverflow()
#elif defined(TARGET_AMD64)
// Amd64: src = float, dst = uint64 or overflow conversion.
// This goes through helper and hence src needs to be converted to double.
&& (tree->gtOverflow() || (dstType == TYP_ULONG))
#elif defined(TARGET_ARM)
// Arm: src = float, dst = int64/uint64 or overflow conversion.
&& (tree->gtOverflow() || varTypeIsLong(dstType))
#else
// x86: src = float, dst = uint32/int64/uint64 or overflow conversion.
&& (tree->gtOverflow() || varTypeIsLong(dstType) || (dstType == TYP_UINT))
#endif
)
{
oper = gtNewCastNode(TYP_DOUBLE, oper, false, TYP_DOUBLE);
}
// Do we need to do it in two steps R -> I -> smallType?
if (dstSize < genTypeSize(TYP_INT))
{
oper = gtNewCastNodeL(TYP_INT, oper, /* fromUnsigned */ false, TYP_INT);
oper->gtFlags |= (tree->gtFlags & (GTF_OVERFLOW | GTF_EXCEPT));
tree->AsCast()->CastOp() = oper;
// We must not mistreat the original cast, which was from a floating point type,
// as from an unsigned type, since we now have a TYP_INT node for the source and
// CAST_OVF(BYTE <- INT) != CAST_OVF(BYTE <- UINT).
assert(!tree->IsUnsigned());
}
else
{
if (!tree->gtOverflow())
{
#ifdef TARGET_ARM64 // ARM64 supports all non-overflow checking conversions directly.
return nullptr;
#else
switch (dstType)
{
case TYP_INT:
return nullptr;
case TYP_UINT:
#if defined(TARGET_ARM) || defined(TARGET_AMD64)
return nullptr;
#else // TARGET_X86
return fgMorphCastIntoHelper(tree, CORINFO_HELP_DBL2UINT, oper);
#endif // TARGET_X86
case TYP_LONG:
#ifdef TARGET_AMD64
// SSE2 has instructions to convert a float/double directly to a long
return nullptr;
#else // !TARGET_AMD64
return fgMorphCastIntoHelper(tree, CORINFO_HELP_DBL2LNG, oper);
#endif // !TARGET_AMD64
case TYP_ULONG:
return fgMorphCastIntoHelper(tree, CORINFO_HELP_DBL2ULNG, oper);
default:
unreached();
}
#endif // TARGET_ARM64
}
else
{
switch (dstType)
{
case TYP_INT:
return fgMorphCastIntoHelper(tree, CORINFO_HELP_DBL2INT_OVF, oper);
case TYP_UINT:
return fgMorphCastIntoHelper(tree, CORINFO_HELP_DBL2UINT_OVF, oper);
case TYP_LONG:
return fgMorphCastIntoHelper(tree, CORINFO_HELP_DBL2LNG_OVF, oper);
case TYP_ULONG:
return fgMorphCastIntoHelper(tree, CORINFO_HELP_DBL2ULNG_OVF, oper);
default:
unreached();
}
}
}
}
#ifndef TARGET_64BIT
// The code generation phase (for x86 & ARM32) does not handle casts
// directly from [u]long to anything other than [u]int. Insert an
// intermediate cast to native int.
else if (varTypeIsLong(srcType) && varTypeIsSmall(dstType))
{
oper = gtNewCastNode(TYP_I_IMPL, oper, tree->IsUnsigned(), TYP_I_IMPL);
oper->gtFlags |= (tree->gtFlags & (GTF_OVERFLOW | GTF_EXCEPT));
tree->ClearUnsigned();
tree->AsCast()->CastOp() = oper;
}
#endif //! TARGET_64BIT
#ifdef TARGET_ARMARCH
// AArch, unlike x86/amd64, has instructions that can cast directly from
// all integers (except for longs on AArch32 of course) to floats.
// Because there is no IL instruction conv.r4.un, uint/ulong -> float
// casts are always imported as CAST(float <- CAST(double <- uint/ulong)).
// We can eliminate the redundant intermediate cast as an optimization.
else if ((dstType == TYP_FLOAT) && (srcType == TYP_DOUBLE) && oper->OperIs(GT_CAST)
#ifdef TARGET_ARM
&& !varTypeIsLong(oper->AsCast()->CastOp())
#endif
)
{
oper->gtType = TYP_FLOAT;
oper->CastToType() = TYP_FLOAT;
return fgMorphTree(oper);
}
#endif // TARGET_ARMARCH
#ifdef TARGET_ARM
// converts long/ulong --> float/double casts into helper calls.
else if (varTypeIsFloating(dstType) && varTypeIsLong(srcType))
{
if (dstType == TYP_FLOAT)
{
// there is only a double helper, so we
// - change the dsttype to double
// - insert a cast from double to float
// - recurse into the resulting tree
tree->CastToType() = TYP_DOUBLE;
tree->gtType = TYP_DOUBLE;
tree = gtNewCastNode(TYP_FLOAT, tree, false, TYP_FLOAT);
return fgMorphTree(tree);
}
if (tree->gtFlags & GTF_UNSIGNED)
return fgMorphCastIntoHelper(tree, CORINFO_HELP_ULNG2DBL, oper);
return fgMorphCastIntoHelper(tree, CORINFO_HELP_LNG2DBL, oper);
}
#endif // TARGET_ARM
#ifdef TARGET_AMD64
// Do we have to do two step U4/8 -> R4/8 ?
// Codegen supports the following conversion as one-step operation
// a) Long -> R4/R8
// b) U8 -> R8
//
// The following conversions are performed as two-step operations using above.
// U4 -> R4/8 = U4-> Long -> R4/8
// U8 -> R4 = U8 -> R8 -> R4
else if (tree->IsUnsigned() && varTypeIsFloating(dstType))
{
srcType = varTypeToUnsigned(srcType);
if (srcType == TYP_ULONG)
{
if (dstType == TYP_FLOAT)
{
// Codegen can handle U8 -> R8 conversion.
// U8 -> R4 = U8 -> R8 -> R4
// - change the dsttype to double
// - insert a cast from double to float
// - recurse into the resulting tree
tree->CastToType() = TYP_DOUBLE;
tree->gtType = TYP_DOUBLE;
tree = gtNewCastNode(TYP_FLOAT, tree, false, TYP_FLOAT);
return fgMorphTree(tree);
}
}
else if (srcType == TYP_UINT)
{
oper = gtNewCastNode(TYP_LONG, oper, true, TYP_LONG);
oper->gtFlags |= (tree->gtFlags & (GTF_OVERFLOW | GTF_EXCEPT));
tree->ClearUnsigned();
tree->CastOp() = oper;
}
}
#endif // TARGET_AMD64
#ifdef TARGET_X86
// Do we have to do two step U4/8 -> R4/8 ?
else if (tree->IsUnsigned() && varTypeIsFloating(dstType))
{
srcType = varTypeToUnsigned(srcType);
if (srcType == TYP_ULONG)
{
return fgMorphCastIntoHelper(tree, CORINFO_HELP_ULNG2DBL, oper);
}
else if (srcType == TYP_UINT)
{
oper = gtNewCastNode(TYP_LONG, oper, true, TYP_LONG);
oper->gtFlags |= (tree->gtFlags & (GTF_OVERFLOW | GTF_EXCEPT));
tree->gtFlags &= ~GTF_UNSIGNED;
return fgMorphCastIntoHelper(tree, CORINFO_HELP_LNG2DBL, oper);
}
}
else if (((tree->gtFlags & GTF_UNSIGNED) == 0) && (srcType == TYP_LONG) && varTypeIsFloating(dstType))
{
oper = fgMorphCastIntoHelper(tree, CORINFO_HELP_LNG2DBL, oper);
// Since we don't have a Jit Helper that converts to a TYP_FLOAT
// we just use the one that converts to a TYP_DOUBLE
// and then add a cast to TYP_FLOAT
//
if ((dstType == TYP_FLOAT) && (oper->OperGet() == GT_CALL))
{
// Fix the return type to be TYP_DOUBLE
//
oper->gtType = TYP_DOUBLE;
// Add a Cast to TYP_FLOAT
//
tree = gtNewCastNode(TYP_FLOAT, oper, false, TYP_FLOAT);
INDEBUG(tree->gtDebugFlags |= GTF_DEBUG_NODE_MORPHED);
return tree;
}
else
{
return oper;
}
}
#endif // TARGET_X86
else if (varTypeIsGC(srcType) != varTypeIsGC(dstType))
{
// We are casting away GC information. we would like to just
// change the type to int, however this gives the emitter fits because
// it believes the variable is a GC variable at the beginning of the
// instruction group, but is not turned non-gc by the code generator
// we fix this by copying the GC pointer to a non-gc pointer temp.
noway_assert(!varTypeIsGC(dstType) && "How can we have a cast to a GCRef here?");
// We generate an assignment to an int and then do the cast from an int. With this we avoid
// the gc problem and we allow casts to bytes, longs, etc...
unsigned lclNum = lvaGrabTemp(true DEBUGARG("Cast away GC"));
oper->gtType = TYP_I_IMPL;
GenTree* asg = gtNewTempAssign(lclNum, oper);
oper->gtType = srcType;
// do the real cast
GenTree* cast = gtNewCastNode(tree->TypeGet(), gtNewLclvNode(lclNum, TYP_I_IMPL), false, dstType);
// Generate the comma tree
oper = gtNewOperNode(GT_COMMA, tree->TypeGet(), asg, cast);
return fgMorphTree(oper);
}
// Look for narrowing casts ([u]long -> [u]int) and try to push them
// down into the operand before morphing it.
//
// It doesn't matter if this is cast is from ulong or long (i.e. if
// GTF_UNSIGNED is set) because the transformation is only applied to
// overflow-insensitive narrowing casts, which always silently truncate.
//
// Note that casts from [u]long to small integer types are handled above.
if ((srcType == TYP_LONG) && ((dstType == TYP_INT) || (dstType == TYP_UINT)))
{
// As a special case, look for overflow-sensitive casts of an AND
// expression, and see if the second operand is a small constant. Since
// the result of an AND is bound by its smaller operand, it may be
// possible to prove that the cast won't overflow, which will in turn
// allow the cast's operand to be transformed.
if (tree->gtOverflow() && (oper->OperGet() == GT_AND))
{
GenTree* andOp2 = oper->AsOp()->gtOp2;
// Look for a constant less than 2^{32} for a cast to uint, or less
// than 2^{31} for a cast to int.
int maxWidth = (dstType == TYP_UINT) ? 32 : 31;
if ((andOp2->OperGet() == GT_CNS_NATIVELONG) && ((andOp2->AsIntConCommon()->LngValue() >> maxWidth) == 0))
{
tree->ClearOverflow();
tree->SetAllEffectsFlags(oper);
}
}
// Only apply this transformation during global morph,
// when neither the cast node nor the oper node may throw an exception
// based on the upper 32 bits.
//
if (fgGlobalMorph && !tree->gtOverflow() && !oper->gtOverflowEx())
{
// For these operations the lower 32 bits of the result only depends
// upon the lower 32 bits of the operands.
//
bool canPushCast = oper->OperIs(GT_ADD, GT_SUB, GT_MUL, GT_AND, GT_OR, GT_XOR, GT_NOT, GT_NEG);
// For long LSH cast to int, there is a discontinuity in behavior
// when the shift amount is 32 or larger.
//
// CAST(INT, LSH(1LL, 31)) == LSH(1, 31)
// LSH(CAST(INT, 1LL), CAST(INT, 31)) == LSH(1, 31)
//
// CAST(INT, LSH(1LL, 32)) == 0
// LSH(CAST(INT, 1LL), CAST(INT, 32)) == LSH(1, 32) == LSH(1, 0) == 1
//
// So some extra validation is needed.
//
if (oper->OperIs(GT_LSH))
{
GenTree* shiftAmount = oper->AsOp()->gtOp2;
// Expose constant value for shift, if possible, to maximize the number
// of cases we can handle.
shiftAmount = gtFoldExpr(shiftAmount);
oper->AsOp()->gtOp2 = shiftAmount;
#if DEBUG
// We may remorph the shift amount tree again later, so clear any morphed flag.
shiftAmount->gtDebugFlags &= ~GTF_DEBUG_NODE_MORPHED;
#endif // DEBUG
if (shiftAmount->IsIntegralConst())
{
const ssize_t shiftAmountValue = shiftAmount->AsIntCon()->IconValue();
if ((shiftAmountValue >= 64) || (shiftAmountValue < 0))
{
// Shift amount is large enough or negative so result is undefined.
// Don't try to optimize.
assert(!canPushCast);
}
else if (shiftAmountValue >= 32)
{
// We know that we have a narrowing cast ([u]long -> [u]int)
// and that we are casting to a 32-bit value, which will result in zero.
//
// Check to see if we have any side-effects that we must keep
//
if ((tree->gtFlags & GTF_ALL_EFFECT) == 0)
{
// Result of the shift is zero.
DEBUG_DESTROY_NODE(tree);
GenTree* zero = gtNewZeroConNode(TYP_INT);
return fgMorphTree(zero);
}
else // We do have a side-effect
{
// We could create a GT_COMMA node here to keep the side-effect and return a zero
// Instead we just don't try to optimize this case.
canPushCast = false;
}
}
else
{
// Shift amount is positive and small enough that we can push the cast through.
canPushCast = true;
}
}
else
{
// Shift amount is unknown. We can't optimize this case.
assert(!canPushCast);
}
}
if (canPushCast)
{
DEBUG_DESTROY_NODE(tree);
// Insert narrowing casts for op1 and op2.
oper->AsOp()->gtOp1 = gtNewCastNode(TYP_INT, oper->AsOp()->gtOp1, false, dstType);
if (oper->AsOp()->gtOp2 != nullptr)
{
oper->AsOp()->gtOp2 = gtNewCastNode(TYP_INT, oper->AsOp()->gtOp2, false, dstType);
}
// Clear the GT_MUL_64RSLT if it is set.
if (oper->gtOper == GT_MUL && (oper->gtFlags & GTF_MUL_64RSLT))
{
oper->gtFlags &= ~GTF_MUL_64RSLT;
}
// The operation now produces a 32-bit result.
oper->gtType = TYP_INT;
// Remorph the new tree as the casts that we added may be folded away.
return fgMorphTree(oper);
}
}
}
return nullptr;
}
#ifdef DEBUG
const char* getNonStandardArgKindName(NonStandardArgKind kind)
{
switch (kind)
{
case NonStandardArgKind::None:
return "None";
case NonStandardArgKind::PInvokeFrame:
return "PInvokeFrame";
case NonStandardArgKind::PInvokeTarget:
return "PInvokeTarget";
case NonStandardArgKind::PInvokeCookie:
return "PInvokeCookie";
case NonStandardArgKind::WrapperDelegateCell:
return "WrapperDelegateCell";
case NonStandardArgKind::ShiftLow:
return "ShiftLow";
case NonStandardArgKind::ShiftHigh:
return "ShiftHigh";
case NonStandardArgKind::FixedRetBuffer:
return "FixedRetBuffer";
case NonStandardArgKind::VirtualStubCell:
return "VirtualStubCell";
case NonStandardArgKind::R2RIndirectionCell:
return "R2RIndirectionCell";
case NonStandardArgKind::ValidateIndirectCallTarget:
return "ValidateIndirectCallTarget";
default:
unreached();
}
}
void fgArgTabEntry::Dump() const
{
printf("fgArgTabEntry[arg %u", argNum);
printf(" %d.%s", GetNode()->gtTreeID, GenTree::OpName(GetNode()->OperGet()));
printf(" %s", varTypeName(argType));
printf(" (%s)", passedByRef ? "By ref" : "By value");
if (GetRegNum() != REG_STK)
{
printf(", %u reg%s:", numRegs, numRegs == 1 ? "" : "s");
for (unsigned i = 0; i < numRegs; i++)
{
printf(" %s", getRegName(regNums[i]));
}
}
if (GetStackByteSize() > 0)
{
#if defined(DEBUG_ARG_SLOTS)
printf(", numSlots=%u, slotNum=%u, byteSize=%u, byteOffset=%u", numSlots, slotNum, m_byteSize, m_byteOffset);
#else
printf(", byteSize=%u, byteOffset=%u", m_byteSize, m_byteOffset);
#endif
}
printf(", byteAlignment=%u", m_byteAlignment);
if (isLateArg())
{
printf(", lateArgInx=%u", GetLateArgInx());
}
if (IsSplit())
{
printf(", isSplit");
}
if (needTmp)
{
printf(", tmpNum=V%02u", tmpNum);
}
if (needPlace)
{
printf(", needPlace");
}
if (isTmp)
{
printf(", isTmp");
}
if (processed)
{
printf(", processed");
}
if (IsHfaRegArg())
{
printf(", isHfa(%s)", varTypeName(GetHfaType()));
}
if (isBackFilled)
{
printf(", isBackFilled");
}
if (nonStandardArgKind != NonStandardArgKind::None)
{
printf(", nonStandard[%s]", getNonStandardArgKindName(nonStandardArgKind));
}
if (isStruct)
{
printf(", isStruct");
}
printf("]\n");
}
#endif
fgArgInfo::fgArgInfo(Compiler* comp, GenTreeCall* call, unsigned numArgs)
{
compiler = comp;
callTree = call;
argCount = 0; // filled in arg count, starts at zero
DEBUG_ARG_SLOTS_ONLY(nextSlotNum = INIT_ARG_STACK_SLOT;)
nextStackByteOffset = INIT_ARG_STACK_SLOT * TARGET_POINTER_SIZE;
stkLevel = 0;
#if defined(UNIX_X86_ABI)
alignmentDone = false;
stkSizeBytes = 0;
padStkAlign = 0;
#endif
#if FEATURE_FIXED_OUT_ARGS
outArgSize = 0;
#endif
argTableSize = numArgs; // the allocated table size
hasRegArgs = false;
hasStackArgs = false;
argsComplete = false;
argsSorted = false;
needsTemps = false;
if (argTableSize == 0)
{
argTable = nullptr;
}
else
{
argTable = new (compiler, CMK_fgArgInfoPtrArr) fgArgTabEntry*[argTableSize];
}
}
/*****************************************************************************
*
* fgArgInfo Copy Constructor
*
* This method needs to act like a copy constructor for fgArgInfo.
* The newCall needs to have its fgArgInfo initialized such that
* we have newCall that is an exact copy of the oldCall.
* We have to take care since the argument information
* in the argTable contains pointers that must point to the
* new arguments and not the old arguments.
*/
fgArgInfo::fgArgInfo(GenTreeCall* newCall, GenTreeCall* oldCall)
{
fgArgInfo* oldArgInfo = oldCall->AsCall()->fgArgInfo;
compiler = oldArgInfo->compiler;
callTree = newCall;
argCount = 0; // filled in arg count, starts at zero
DEBUG_ARG_SLOTS_ONLY(nextSlotNum = INIT_ARG_STACK_SLOT;)
nextStackByteOffset = INIT_ARG_STACK_SLOT * TARGET_POINTER_SIZE;
stkLevel = oldArgInfo->stkLevel;
#if defined(UNIX_X86_ABI)
alignmentDone = oldArgInfo->alignmentDone;
stkSizeBytes = oldArgInfo->stkSizeBytes;
padStkAlign = oldArgInfo->padStkAlign;
#endif
#if FEATURE_FIXED_OUT_ARGS
outArgSize = oldArgInfo->outArgSize;
#endif
argTableSize = oldArgInfo->argTableSize;
argsComplete = false;
argTable = nullptr;
assert(oldArgInfo->argsComplete);
if (argTableSize > 0)
{
argTable = new (compiler, CMK_fgArgInfoPtrArr) fgArgTabEntry*[argTableSize];
// Copy the old arg entries
for (unsigned i = 0; i < argTableSize; i++)
{
argTable[i] = new (compiler, CMK_fgArgInfo) fgArgTabEntry(*oldArgInfo->argTable[i]);
}
// The copied arg entries contain pointers to old uses, they need
// to be updated to point to new uses.
if (newCall->gtCallThisArg != nullptr)
{
for (unsigned i = 0; i < argTableSize; i++)
{
if (argTable[i]->use == oldCall->gtCallThisArg)
{
argTable[i]->use = newCall->gtCallThisArg;
break;
}
}
}
GenTreeCall::UseIterator newUse = newCall->Args().begin();
GenTreeCall::UseIterator newUseEnd = newCall->Args().end();
GenTreeCall::UseIterator oldUse = oldCall->Args().begin();
GenTreeCall::UseIterator oldUseEnd = newCall->Args().end();
for (; newUse != newUseEnd; ++newUse, ++oldUse)
{
for (unsigned i = 0; i < argTableSize; i++)
{
if (argTable[i]->use == oldUse.GetUse())
{
argTable[i]->use = newUse.GetUse();
break;
}
}
}
newUse = newCall->LateArgs().begin();
newUseEnd = newCall->LateArgs().end();
oldUse = oldCall->LateArgs().begin();
oldUseEnd = newCall->LateArgs().end();
for (; newUse != newUseEnd; ++newUse, ++oldUse)
{
for (unsigned i = 0; i < argTableSize; i++)
{
if (argTable[i]->lateUse == oldUse.GetUse())
{
argTable[i]->lateUse = newUse.GetUse();
break;
}
}
}
}
argCount = oldArgInfo->argCount;
DEBUG_ARG_SLOTS_ONLY(nextSlotNum = oldArgInfo->nextSlotNum;)
nextStackByteOffset = oldArgInfo->nextStackByteOffset;
hasRegArgs = oldArgInfo->hasRegArgs;
hasStackArgs = oldArgInfo->hasStackArgs;
argsComplete = true;
argsSorted = true;
}
void fgArgInfo::AddArg(fgArgTabEntry* curArgTabEntry)
{
assert(argCount < argTableSize);
argTable[argCount] = curArgTabEntry;
argCount++;
}
fgArgTabEntry* fgArgInfo::AddRegArg(unsigned argNum,
GenTree* node,
GenTreeCall::Use* use,
regNumber regNum,
unsigned numRegs,
unsigned byteSize,
unsigned byteAlignment,
bool isStruct,
bool isFloatHfa,
bool isVararg /*=false*/)
{
fgArgTabEntry* curArgTabEntry = new (compiler, CMK_fgArgInfo) fgArgTabEntry;
// Any additional register numbers are set by the caller.
// This is primarily because on ARM we don't yet know if it
// will be split or if it is a double HFA, so the number of registers
// may actually be less.
curArgTabEntry->setRegNum(0, regNum);
curArgTabEntry->argNum = argNum;
curArgTabEntry->argType = node->TypeGet();
curArgTabEntry->use = use;
curArgTabEntry->lateUse = nullptr;
curArgTabEntry->numRegs = numRegs;
#if defined(DEBUG_ARG_SLOTS)
curArgTabEntry->slotNum = 0;
curArgTabEntry->numSlots = 0;
#endif
curArgTabEntry->SetLateArgInx(UINT_MAX);
curArgTabEntry->tmpNum = BAD_VAR_NUM;
curArgTabEntry->SetSplit(false);
curArgTabEntry->isTmp = false;
curArgTabEntry->needTmp = false;
curArgTabEntry->needPlace = false;
curArgTabEntry->processed = false;
if (GlobalJitOptions::compFeatureHfa)
{
curArgTabEntry->SetHfaElemKind(CORINFO_HFA_ELEM_NONE);
}
curArgTabEntry->isBackFilled = false;
curArgTabEntry->nonStandardArgKind = NonStandardArgKind::None;
curArgTabEntry->isStruct = isStruct;
curArgTabEntry->SetIsVararg(isVararg);
curArgTabEntry->SetByteAlignment(byteAlignment);
curArgTabEntry->SetByteSize(byteSize, isStruct, isFloatHfa);
curArgTabEntry->SetByteOffset(0);
hasRegArgs = true;
if (argCount >= argTableSize)
{
fgArgTabEntry** oldTable = argTable;
argTable = new (compiler, CMK_fgArgInfoPtrArr) fgArgTabEntry*[argCount + 1];
memcpy(argTable, oldTable, argCount * sizeof(fgArgTabEntry*));
argTableSize++;
}
AddArg(curArgTabEntry);
return curArgTabEntry;
}
#if defined(UNIX_AMD64_ABI)
fgArgTabEntry* fgArgInfo::AddRegArg(unsigned argNum,
GenTree* node,
GenTreeCall::Use* use,
regNumber regNum,
unsigned numRegs,
unsigned byteSize,
unsigned byteAlignment,
const bool isStruct,
const bool isFloatHfa,
const bool isVararg,
const regNumber otherRegNum,
const unsigned structIntRegs,
const unsigned structFloatRegs,
const SYSTEMV_AMD64_CORINFO_STRUCT_REG_PASSING_DESCRIPTOR* const structDescPtr)
{
fgArgTabEntry* curArgTabEntry =
AddRegArg(argNum, node, use, regNum, numRegs, byteSize, byteAlignment, isStruct, isFloatHfa, isVararg);
assert(curArgTabEntry != nullptr);
curArgTabEntry->isStruct = isStruct; // is this a struct arg
curArgTabEntry->structIntRegs = structIntRegs;
curArgTabEntry->structFloatRegs = structFloatRegs;
INDEBUG(curArgTabEntry->checkIsStruct();)
assert(numRegs <= 2);
if (numRegs == 2)
{
curArgTabEntry->setRegNum(1, otherRegNum);
}
if (isStruct && structDescPtr != nullptr)
{
curArgTabEntry->structDesc.CopyFrom(*structDescPtr);
}
return curArgTabEntry;
}
#endif // defined(UNIX_AMD64_ABI)
fgArgTabEntry* fgArgInfo::AddStkArg(unsigned argNum,
GenTree* node,
GenTreeCall::Use* use,
unsigned numSlots,
unsigned byteSize,
unsigned byteAlignment,
bool isStruct,
bool isFloatHfa,
bool isVararg /*=false*/)
{
fgArgTabEntry* curArgTabEntry = new (compiler, CMK_fgArgInfo) fgArgTabEntry;
#if defined(DEBUG_ARG_SLOTS)
if (!compMacOsArm64Abi())
{
nextSlotNum = roundUp(nextSlotNum, byteAlignment / TARGET_POINTER_SIZE);
}
#endif
nextStackByteOffset = roundUp(nextStackByteOffset, byteAlignment);
DEBUG_ARG_SLOTS_ASSERT(nextStackByteOffset / TARGET_POINTER_SIZE == nextSlotNum);
curArgTabEntry->setRegNum(0, REG_STK);
curArgTabEntry->argNum = argNum;
curArgTabEntry->argType = node->TypeGet();
curArgTabEntry->use = use;
curArgTabEntry->lateUse = nullptr;
#if defined(DEBUG_ARG_SLOTS)
curArgTabEntry->numSlots = numSlots;
curArgTabEntry->slotNum = nextSlotNum;
#endif
curArgTabEntry->numRegs = 0;
#if defined(UNIX_AMD64_ABI)
curArgTabEntry->structIntRegs = 0;
curArgTabEntry->structFloatRegs = 0;
#endif // defined(UNIX_AMD64_ABI)
curArgTabEntry->SetLateArgInx(UINT_MAX);
curArgTabEntry->tmpNum = BAD_VAR_NUM;
curArgTabEntry->SetSplit(false);
curArgTabEntry->isTmp = false;
curArgTabEntry->needTmp = false;
curArgTabEntry->needPlace = false;
curArgTabEntry->processed = false;
if (GlobalJitOptions::compFeatureHfa)
{
curArgTabEntry->SetHfaElemKind(CORINFO_HFA_ELEM_NONE);
}
curArgTabEntry->isBackFilled = false;
curArgTabEntry->nonStandardArgKind = NonStandardArgKind::None;
curArgTabEntry->isStruct = isStruct;
curArgTabEntry->SetIsVararg(isVararg);
curArgTabEntry->SetByteAlignment(byteAlignment);
curArgTabEntry->SetByteSize(byteSize, isStruct, isFloatHfa);
curArgTabEntry->SetByteOffset(nextStackByteOffset);
hasStackArgs = true;
AddArg(curArgTabEntry);
DEBUG_ARG_SLOTS_ONLY(nextSlotNum += numSlots;)
nextStackByteOffset += curArgTabEntry->GetByteSize();
return curArgTabEntry;
}
void fgArgInfo::RemorphReset()
{
DEBUG_ARG_SLOTS_ONLY(nextSlotNum = INIT_ARG_STACK_SLOT;)
nextStackByteOffset = INIT_ARG_STACK_SLOT * TARGET_POINTER_SIZE;
}
//------------------------------------------------------------------------
// UpdateRegArg: Update the given fgArgTabEntry while morphing.
//
// Arguments:
// curArgTabEntry - the fgArgTabEntry to update.
// node - the tree node that defines the argument
// reMorphing - a boolean value indicate whether we are remorphing the call
//
// Assumptions:
// This must have already been determined to be at least partially passed in registers.
//
void fgArgInfo::UpdateRegArg(fgArgTabEntry* curArgTabEntry, GenTree* node, bool reMorphing)
{
bool isLateArg = curArgTabEntry->isLateArg();
// If this is a late arg, we'd better be updating it with a correctly marked node, and vice-versa.
assert((isLateArg && ((node->gtFlags & GTF_LATE_ARG) != 0)) ||
(!isLateArg && ((node->gtFlags & GTF_LATE_ARG) == 0)));
assert(curArgTabEntry->numRegs != 0);
assert(curArgTabEntry->use->GetNode() == node);
}
//------------------------------------------------------------------------
// UpdateStkArg: Update the given fgArgTabEntry while morphing.
//
// Arguments:
// curArgTabEntry - the fgArgTabEntry to update.
// node - the tree node that defines the argument
// reMorphing - a boolean value indicate whether we are remorphing the call
//
// Assumptions:
// This must have already been determined to be passed on the stack.
//
void fgArgInfo::UpdateStkArg(fgArgTabEntry* curArgTabEntry, GenTree* node, bool reMorphing)
{
bool isLateArg = curArgTabEntry->isLateArg();
// If this is a late arg, we'd better be updating it with a correctly marked node, and vice-versa.
assert((isLateArg && ((node->gtFlags & GTF_LATE_ARG) != 0)) ||
(!isLateArg && ((node->gtFlags & GTF_LATE_ARG) == 0)));
noway_assert(curArgTabEntry->use != callTree->gtCallThisArg);
assert((curArgTabEntry->GetRegNum() == REG_STK) || curArgTabEntry->IsSplit());
assert(curArgTabEntry->use->GetNode() == node);
#if defined(DEBUG_ARG_SLOTS)
if (!compMacOsArm64Abi())
{
nextSlotNum = roundUp(nextSlotNum, curArgTabEntry->GetByteAlignment() / TARGET_POINTER_SIZE);
assert(curArgTabEntry->slotNum == nextSlotNum);
nextSlotNum += curArgTabEntry->numSlots;
}
#endif
nextStackByteOffset = roundUp(nextStackByteOffset, curArgTabEntry->GetByteAlignment());
assert(curArgTabEntry->GetByteOffset() == nextStackByteOffset);
nextStackByteOffset += curArgTabEntry->GetStackByteSize();
}
void fgArgInfo::SplitArg(unsigned argNum, unsigned numRegs, unsigned numSlots)
{
fgArgTabEntry* curArgTabEntry = nullptr;
assert(argNum < argCount);
for (unsigned inx = 0; inx < argCount; inx++)
{
curArgTabEntry = argTable[inx];
if (curArgTabEntry->argNum == argNum)
{
break;
}
}
assert(numRegs > 0);
assert(numSlots > 0);
if (argsComplete)
{
assert(curArgTabEntry->IsSplit() == true);
assert(curArgTabEntry->numRegs == numRegs);
DEBUG_ARG_SLOTS_ONLY(assert(curArgTabEntry->numSlots == numSlots);)
assert(hasStackArgs == true);
}
else
{
curArgTabEntry->SetSplit(true);
curArgTabEntry->numRegs = numRegs;
DEBUG_ARG_SLOTS_ONLY(curArgTabEntry->numSlots = numSlots;)
curArgTabEntry->SetByteOffset(0);
hasStackArgs = true;
}
DEBUG_ARG_SLOTS_ONLY(nextSlotNum += numSlots;)
// TODO-Cleanup: structs are aligned to 8 bytes on arm64 apple, so it would work, but pass the precise size.
nextStackByteOffset += numSlots * TARGET_POINTER_SIZE;
}
//------------------------------------------------------------------------
// EvalToTmp: Replace the node in the given fgArgTabEntry with a temp
//
// Arguments:
// curArgTabEntry - the fgArgTabEntry for the argument
// tmpNum - the varNum for the temp
// newNode - the assignment of the argument value to the temp
//
// Notes:
// Although the name of this method is EvalToTmp, it doesn't actually create
// the temp or the copy.
//
void fgArgInfo::EvalToTmp(fgArgTabEntry* curArgTabEntry, unsigned tmpNum, GenTree* newNode)
{
assert(curArgTabEntry->use != callTree->gtCallThisArg);
assert(curArgTabEntry->use->GetNode() == newNode);
assert(curArgTabEntry->GetNode() == newNode);
curArgTabEntry->tmpNum = tmpNum;
curArgTabEntry->isTmp = true;
}
void fgArgInfo::ArgsComplete()
{
bool hasStructRegArg = false;
for (unsigned curInx = 0; curInx < argCount; curInx++)
{
fgArgTabEntry* curArgTabEntry = argTable[curInx];
assert(curArgTabEntry != nullptr);
GenTree* argx = curArgTabEntry->GetNode();
if (curArgTabEntry->GetRegNum() == REG_STK)
{
assert(hasStackArgs == true);
#if !FEATURE_FIXED_OUT_ARGS
// On x86 we use push instructions to pass arguments:
// The non-register arguments are evaluated and pushed in order
// and they are never evaluated into temps
//
continue;
#endif
}
#if FEATURE_ARG_SPLIT
else if (curArgTabEntry->IsSplit())
{
hasStructRegArg = true;
assert(hasStackArgs == true);
}
#endif // FEATURE_ARG_SPLIT
else // we have a register argument, next we look for a struct type.
{
if (varTypeIsStruct(argx) UNIX_AMD64_ABI_ONLY(|| curArgTabEntry->isStruct))
{
hasStructRegArg = true;
}
}
/* If the argument tree contains an assignment (GTF_ASG) then the argument and
and every earlier argument (except constants) must be evaluated into temps
since there may be other arguments that follow and they may use the value being assigned.
EXAMPLE: ArgTab is "a, a=5, a"
-> when we see the second arg "a=5"
we know the first two arguments "a, a=5" have to be evaluated into temps
For the case of an assignment, we only know that there exist some assignment someplace
in the tree. We don't know what is being assigned so we are very conservative here
and assume that any local variable could have been assigned.
*/
if (argx->gtFlags & GTF_ASG)
{
// If this is not the only argument, or it's a copyblk, or it already evaluates the expression to
// a tmp, then we need a temp in the late arg list.
if ((argCount > 1) || argx->OperIsCopyBlkOp()
#ifdef FEATURE_FIXED_OUT_ARGS
|| curArgTabEntry->isTmp // I protect this by "FEATURE_FIXED_OUT_ARGS" to preserve the property
// that we only have late non-register args when that feature is on.
#endif // FEATURE_FIXED_OUT_ARGS
)
{
curArgTabEntry->needTmp = true;
needsTemps = true;
}
// For all previous arguments, unless they are a simple constant
// we require that they be evaluated into temps
for (unsigned prevInx = 0; prevInx < curInx; prevInx++)
{
fgArgTabEntry* prevArgTabEntry = argTable[prevInx];
assert(prevArgTabEntry->argNum < curArgTabEntry->argNum);
if (!prevArgTabEntry->GetNode()->IsInvariant())
{
prevArgTabEntry->needTmp = true;
needsTemps = true;
}
}
}
bool treatLikeCall = ((argx->gtFlags & GTF_CALL) != 0);
#if FEATURE_FIXED_OUT_ARGS
// Like calls, if this argument has a tree that will do an inline throw,
// a call to a jit helper, then we need to treat it like a call (but only
// if there are/were any stack args).
// This means unnesting, sorting, etc. Technically this is overly
// conservative, but I want to avoid as much special-case debug-only code
// as possible, so leveraging the GTF_CALL flag is the easiest.
//
if (!treatLikeCall && (argx->gtFlags & GTF_EXCEPT) && (argCount > 1) && compiler->opts.compDbgCode &&
(compiler->fgWalkTreePre(&argx, Compiler::fgChkThrowCB) == Compiler::WALK_ABORT))
{
for (unsigned otherInx = 0; otherInx < argCount; otherInx++)
{
if (otherInx == curInx)
{
continue;
}
if (argTable[otherInx]->GetRegNum() == REG_STK)
{
treatLikeCall = true;
break;
}
}
}
#endif // FEATURE_FIXED_OUT_ARGS
/* If it contains a call (GTF_CALL) then itself and everything before the call
with a GLOB_EFFECT must eval to temp (this is because everything with SIDE_EFFECT
has to be kept in the right order since we will move the call to the first position)
For calls we don't have to be quite as conservative as we are with an assignment
since the call won't be modifying any non-address taken LclVars.
*/
if (treatLikeCall)
{
if (argCount > 1) // If this is not the only argument
{
curArgTabEntry->needTmp = true;
needsTemps = true;
}
else if (varTypeIsFloating(argx->TypeGet()) && (argx->OperGet() == GT_CALL))
{
// Spill all arguments that are floating point calls
curArgTabEntry->needTmp = true;
needsTemps = true;
}
// All previous arguments may need to be evaluated into temps
for (unsigned prevInx = 0; prevInx < curInx; prevInx++)
{
fgArgTabEntry* prevArgTabEntry = argTable[prevInx];
assert(prevArgTabEntry->argNum < curArgTabEntry->argNum);
// For all previous arguments, if they have any GTF_ALL_EFFECT
// we require that they be evaluated into a temp
if ((prevArgTabEntry->GetNode()->gtFlags & GTF_ALL_EFFECT) != 0)
{
prevArgTabEntry->needTmp = true;
needsTemps = true;
}
#if FEATURE_FIXED_OUT_ARGS
// Or, if they are stored into the FIXED_OUT_ARG area
// we require that they be moved to the gtCallLateArgs
// and replaced with a placeholder node
else if (prevArgTabEntry->GetRegNum() == REG_STK)
{
prevArgTabEntry->needPlace = true;
}
#if FEATURE_ARG_SPLIT
else if (prevArgTabEntry->IsSplit())
{
prevArgTabEntry->needPlace = true;
}
#endif // FEATURE_ARG_SPLIT
#endif
}
}
#if FEATURE_MULTIREG_ARGS
// For RyuJIT backend we will expand a Multireg arg into a GT_FIELD_LIST
// with multiple indirections, so here we consider spilling it into a tmp LclVar.
//
CLANG_FORMAT_COMMENT_ANCHOR;
#ifdef TARGET_ARM
bool isMultiRegArg =
(curArgTabEntry->numRegs > 0) && (curArgTabEntry->numRegs + curArgTabEntry->GetStackSlotsNumber() > 1);
#else
bool isMultiRegArg = (curArgTabEntry->numRegs > 1);
#endif
if ((varTypeIsStruct(argx->TypeGet())) && (curArgTabEntry->needTmp == false))
{
if (isMultiRegArg && ((argx->gtFlags & GTF_PERSISTENT_SIDE_EFFECTS) != 0))
{
// Spill multireg struct arguments that have Assignments or Calls embedded in them
curArgTabEntry->needTmp = true;
needsTemps = true;
}
else
{
// We call gtPrepareCost to measure the cost of evaluating this tree
compiler->gtPrepareCost(argx);
if (isMultiRegArg && (argx->GetCostEx() > (6 * IND_COST_EX)))
{
// Spill multireg struct arguments that are expensive to evaluate twice
curArgTabEntry->needTmp = true;
needsTemps = true;
}
#if defined(FEATURE_SIMD) && defined(TARGET_ARM64)
else if (isMultiRegArg && varTypeIsSIMD(argx->TypeGet()))
{
// SIMD types do not need the optimization below due to their sizes
if (argx->OperIsSimdOrHWintrinsic() ||
(argx->OperIs(GT_OBJ) && argx->AsObj()->gtOp1->OperIs(GT_ADDR) &&
argx->AsObj()->gtOp1->AsOp()->gtOp1->OperIsSimdOrHWintrinsic()))
{
curArgTabEntry->needTmp = true;
needsTemps = true;
}
}
#endif
#ifndef TARGET_ARM
// TODO-Arm: This optimization is not implemented for ARM32
// so we skip this for ARM32 until it is ported to use RyuJIT backend
//
else if (argx->OperGet() == GT_OBJ)
{
GenTreeObj* argObj = argx->AsObj();
unsigned structSize = argObj->GetLayout()->GetSize();
switch (structSize)
{
case 3:
case 5:
case 6:
case 7:
// If we have a stack based LclVar we can perform a wider read of 4 or 8 bytes
//
if (argObj->AsObj()->gtOp1->IsLocalAddrExpr() == nullptr) // Is the source not a LclVar?
{
// If we don't have a LclVar we need to read exactly 3,5,6 or 7 bytes
// For now we use a a GT_CPBLK to copy the exact size into a GT_LCL_VAR temp.
//
curArgTabEntry->needTmp = true;
needsTemps = true;
}
break;
case 11:
case 13:
case 14:
case 15:
// Spill any GT_OBJ multireg structs that are difficult to extract
//
// When we have a GT_OBJ of a struct with the above sizes we would need
// to use 3 or 4 load instructions to load the exact size of this struct.
// Instead we spill the GT_OBJ into a new GT_LCL_VAR temp and this sequence
// will use a GT_CPBLK to copy the exact size into the GT_LCL_VAR temp.
// Then we can just load all 16 bytes of the GT_LCL_VAR temp when passing
// the argument.
//
curArgTabEntry->needTmp = true;
needsTemps = true;
break;
default:
break;
}
}
#endif // !TARGET_ARM
}
}
#endif // FEATURE_MULTIREG_ARGS
}
// We only care because we can't spill structs and qmarks involve a lot of spilling, but
// if we don't have qmarks, then it doesn't matter.
// So check for Qmark's globally once here, instead of inside the loop.
//
const bool hasStructRegArgWeCareAbout = (hasStructRegArg && compiler->compQmarkUsed);
#if FEATURE_FIXED_OUT_ARGS
// For Arm/x64 we only care because we can't reorder a register
// argument that uses GT_LCLHEAP. This is an optimization to
// save a check inside the below loop.
//
const bool hasStackArgsWeCareAbout = (hasStackArgs && compiler->compLocallocUsed);
#else
const bool hasStackArgsWeCareAbout = hasStackArgs;
#endif // FEATURE_FIXED_OUT_ARGS
// If we have any stack args we have to force the evaluation
// of any arguments passed in registers that might throw an exception
//
// Technically we only a required to handle the following two cases:
// a GT_IND with GTF_IND_RNGCHK (only on x86) or
// a GT_LCLHEAP node that allocates stuff on the stack
//
if (hasStackArgsWeCareAbout || hasStructRegArgWeCareAbout)
{
for (unsigned curInx = 0; curInx < argCount; curInx++)
{
fgArgTabEntry* curArgTabEntry = argTable[curInx];
assert(curArgTabEntry != nullptr);
GenTree* argx = curArgTabEntry->GetNode();
// Examine the register args that are currently not marked needTmp
//
if (!curArgTabEntry->needTmp && (curArgTabEntry->GetRegNum() != REG_STK))
{
if (hasStackArgsWeCareAbout)
{
#if !FEATURE_FIXED_OUT_ARGS
// On x86 we previously recorded a stack depth of zero when
// morphing the register arguments of any GT_IND with a GTF_IND_RNGCHK flag
// Thus we can not reorder the argument after any stack based argument
// (Note that GT_LCLHEAP sets the GTF_EXCEPT flag so we don't need to
// check for it explicitly.)
//
if (argx->gtFlags & GTF_EXCEPT)
{
curArgTabEntry->needTmp = true;
needsTemps = true;
continue;
}
#else
// For Arm/X64 we can't reorder a register argument that uses a GT_LCLHEAP
//
if (argx->gtFlags & GTF_EXCEPT)
{
assert(compiler->compLocallocUsed);
// Returns WALK_ABORT if a GT_LCLHEAP node is encountered in the argx tree
//
if (compiler->fgWalkTreePre(&argx, Compiler::fgChkLocAllocCB) == Compiler::WALK_ABORT)
{
curArgTabEntry->needTmp = true;
needsTemps = true;
continue;
}
}
#endif
}
if (hasStructRegArgWeCareAbout)
{
// Returns true if a GT_QMARK node is encountered in the argx tree
//
if (compiler->fgWalkTreePre(&argx, Compiler::fgChkQmarkCB) == Compiler::WALK_ABORT)
{
curArgTabEntry->needTmp = true;
needsTemps = true;
continue;
}
}
}
}
}
// When CFG is enabled and this is a delegate call or vtable call we must
// compute the call target before all late args. However this will
// effectively null-check 'this', which should happen only after all
// arguments are evaluated. Thus we must evaluate all args with side
// effects to a temp.
if (compiler->opts.IsCFGEnabled() && (callTree->IsVirtualVtable() || callTree->IsDelegateInvoke()))
{
// Always evaluate 'this' to temp.
argTable[0]->needTmp = true;
needsTemps = true;
for (unsigned curInx = 1; curInx < argCount; curInx++)
{
fgArgTabEntry* curArgTabEntry = argTable[curInx];
GenTree* arg = curArgTabEntry->GetNode();
if ((arg->gtFlags & GTF_ALL_EFFECT) != 0)
{
curArgTabEntry->needTmp = true;
needsTemps = true;
}
}
}
argsComplete = true;
}
void fgArgInfo::SortArgs()
{
assert(argsComplete == true);
#ifdef DEBUG
if (compiler->verbose)
{
printf("\nSorting the arguments:\n");
}
#endif
/* Shuffle the arguments around before we build the gtCallLateArgs list.
The idea is to move all "simple" arguments like constants and local vars
to the end of the table, and move the complex arguments towards the beginning
of the table. This will help prevent registers from being spilled by
allowing us to evaluate the more complex arguments before the simpler arguments.
The argTable ends up looking like:
+------------------------------------+ <--- argTable[argCount - 1]
| constants |
+------------------------------------+
| local var / local field |
+------------------------------------+
| remaining arguments sorted by cost |
+------------------------------------+
| temps (argTable[].needTmp = true) |
+------------------------------------+
| args with calls (GTF_CALL) |
+------------------------------------+ <--- argTable[0]
*/
/* Set the beginning and end for the new argument table */
unsigned curInx;
int regCount = 0;
unsigned begTab = 0;
unsigned endTab = argCount - 1;
unsigned argsRemaining = argCount;
// First take care of arguments that are constants.
// [We use a backward iterator pattern]
//
curInx = argCount;
do
{
curInx--;
fgArgTabEntry* curArgTabEntry = argTable[curInx];
if (curArgTabEntry->GetRegNum() != REG_STK)
{
regCount++;
}
assert(curArgTabEntry->lateUse == nullptr);
// Skip any already processed args
//
if (!curArgTabEntry->processed)
{
GenTree* argx = curArgTabEntry->GetNode();
// put constants at the end of the table
//
if (argx->gtOper == GT_CNS_INT)
{
noway_assert(curInx <= endTab);
curArgTabEntry->processed = true;
// place curArgTabEntry at the endTab position by performing a swap
//
if (curInx != endTab)
{
argTable[curInx] = argTable[endTab];
argTable[endTab] = curArgTabEntry;
}
endTab--;
argsRemaining--;
}
}
} while (curInx > 0);
if (argsRemaining > 0)
{
// Next take care of arguments that are calls.
// [We use a forward iterator pattern]
//
for (curInx = begTab; curInx <= endTab; curInx++)
{
fgArgTabEntry* curArgTabEntry = argTable[curInx];
// Skip any already processed args
//
if (!curArgTabEntry->processed)
{
GenTree* argx = curArgTabEntry->GetNode();
// put calls at the beginning of the table
//
if (argx->gtFlags & GTF_CALL)
{
curArgTabEntry->processed = true;
// place curArgTabEntry at the begTab position by performing a swap
//
if (curInx != begTab)
{
argTable[curInx] = argTable[begTab];
argTable[begTab] = curArgTabEntry;
}
begTab++;
argsRemaining--;
}
}
}
}
if (argsRemaining > 0)
{
// Next take care arguments that are temps.
// These temps come before the arguments that are
// ordinary local vars or local fields
// since this will give them a better chance to become
// enregistered into their actual argument register.
// [We use a forward iterator pattern]
//
for (curInx = begTab; curInx <= endTab; curInx++)
{
fgArgTabEntry* curArgTabEntry = argTable[curInx];
// Skip any already processed args
//
if (!curArgTabEntry->processed)
{
if (curArgTabEntry->needTmp)
{
curArgTabEntry->processed = true;
// place curArgTabEntry at the begTab position by performing a swap
//
if (curInx != begTab)
{
argTable[curInx] = argTable[begTab];
argTable[begTab] = curArgTabEntry;
}
begTab++;
argsRemaining--;
}
}
}
}
if (argsRemaining > 0)
{
// Next take care of local var and local field arguments.
// These are moved towards the end of the argument evaluation.
// [We use a backward iterator pattern]
//
curInx = endTab + 1;
do
{
curInx--;
fgArgTabEntry* curArgTabEntry = argTable[curInx];
// Skip any already processed args
//
if (!curArgTabEntry->processed)
{
GenTree* argx = curArgTabEntry->GetNode();
if ((argx->gtOper == GT_LCL_VAR) || (argx->gtOper == GT_LCL_FLD))
{
noway_assert(curInx <= endTab);
curArgTabEntry->processed = true;
// place curArgTabEntry at the endTab position by performing a swap
//
if (curInx != endTab)
{
argTable[curInx] = argTable[endTab];
argTable[endTab] = curArgTabEntry;
}
endTab--;
argsRemaining--;
}
}
} while (curInx > begTab);
}
// Finally, take care of all the remaining arguments.
// Note that we fill in one arg at a time using a while loop.
bool costsPrepared = false; // Only prepare tree costs once, the first time through this loop
while (argsRemaining > 0)
{
/* Find the most expensive arg remaining and evaluate it next */
fgArgTabEntry* expensiveArgTabEntry = nullptr;
unsigned expensiveArg = UINT_MAX;
unsigned expensiveArgCost = 0;
// [We use a forward iterator pattern]
//
for (curInx = begTab; curInx <= endTab; curInx++)
{
fgArgTabEntry* curArgTabEntry = argTable[curInx];
// Skip any already processed args
//
if (!curArgTabEntry->processed)
{
GenTree* argx = curArgTabEntry->GetNode();
// We should have already handled these kinds of args
assert(argx->gtOper != GT_LCL_VAR);
assert(argx->gtOper != GT_LCL_FLD);
assert(argx->gtOper != GT_CNS_INT);
// This arg should either have no persistent side effects or be the last one in our table
// assert(((argx->gtFlags & GTF_PERSISTENT_SIDE_EFFECTS) == 0) || (curInx == (argCount-1)));
if (argsRemaining == 1)
{
// This is the last arg to place
expensiveArg = curInx;
expensiveArgTabEntry = curArgTabEntry;
assert(begTab == endTab);
break;
}
else
{
if (!costsPrepared)
{
/* We call gtPrepareCost to measure the cost of evaluating this tree */
compiler->gtPrepareCost(argx);
}
if (argx->GetCostEx() > expensiveArgCost)
{
// Remember this arg as the most expensive one that we have yet seen
expensiveArgCost = argx->GetCostEx();
expensiveArg = curInx;
expensiveArgTabEntry = curArgTabEntry;
}
}
}
}
noway_assert(expensiveArg != UINT_MAX);
// put the most expensive arg towards the beginning of the table
expensiveArgTabEntry->processed = true;
// place expensiveArgTabEntry at the begTab position by performing a swap
//
if (expensiveArg != begTab)
{
argTable[expensiveArg] = argTable[begTab];
argTable[begTab] = expensiveArgTabEntry;
}
begTab++;
argsRemaining--;
costsPrepared = true; // If we have more expensive arguments, don't re-evaluate the tree cost on the next loop
}
// The table should now be completely filled and thus begTab should now be adjacent to endTab
// and regArgsRemaining should be zero
assert(begTab == (endTab + 1));
assert(argsRemaining == 0);
argsSorted = true;
}
#ifdef DEBUG
void fgArgInfo::Dump(Compiler* compiler) const
{
for (unsigned curInx = 0; curInx < ArgCount(); curInx++)
{
fgArgTabEntry* curArgEntry = ArgTable()[curInx];
curArgEntry->Dump();
}
}
#endif
//------------------------------------------------------------------------------
// fgMakeTmpArgNode : This function creates a tmp var only if needed.
// We need this to be done in order to enforce ordering
// of the evaluation of arguments.
//
// Arguments:
// curArgTabEntry
//
// Return Value:
// the newly created temp var tree.
GenTree* Compiler::fgMakeTmpArgNode(fgArgTabEntry* curArgTabEntry)
{
unsigned tmpVarNum = curArgTabEntry->tmpNum;
LclVarDsc* varDsc = lvaGetDesc(tmpVarNum);
assert(varDsc->lvIsTemp);
var_types type = varDsc->TypeGet();
// Create a copy of the temp to go into the late argument list
GenTree* arg = gtNewLclvNode(tmpVarNum, type);
GenTree* addrNode = nullptr;
if (varTypeIsStruct(type))
{
#if defined(TARGET_AMD64) || defined(TARGET_ARM64) || defined(TARGET_ARM)
// Can this type be passed as a primitive type?
// If so, the following call will return the corresponding primitive type.
// Otherwise, it will return TYP_UNKNOWN and we will pass it as a struct type.
bool passedAsPrimitive = false;
if (curArgTabEntry->TryPassAsPrimitive())
{
CORINFO_CLASS_HANDLE clsHnd = varDsc->GetStructHnd();
var_types structBaseType =
getPrimitiveTypeForStruct(lvaLclExactSize(tmpVarNum), clsHnd, curArgTabEntry->IsVararg());
if (structBaseType != TYP_UNKNOWN)
{
passedAsPrimitive = true;
#if defined(UNIX_AMD64_ABI)
// TODO-Cleanup: This is inelegant, but eventually we'll track this in the fgArgTabEntry,
// and otherwise we'd have to either modify getPrimitiveTypeForStruct() to take
// a structDesc or call eeGetSystemVAmd64PassStructInRegisterDescriptor yet again.
//
if (genIsValidFloatReg(curArgTabEntry->GetRegNum()))
{
if (structBaseType == TYP_INT)
{
structBaseType = TYP_FLOAT;
}
else
{
assert(structBaseType == TYP_LONG);
structBaseType = TYP_DOUBLE;
}
}
#endif
type = structBaseType;
}
}
// If it is passed in registers, don't get the address of the var. Make it a
// field instead. It will be loaded in registers with putarg_reg tree in lower.
if (passedAsPrimitive)
{
arg->ChangeOper(GT_LCL_FLD);
arg->gtType = type;
lvaSetVarDoNotEnregister(tmpVarNum DEBUGARG(DoNotEnregisterReason::SwizzleArg));
}
else
{
var_types addrType = TYP_BYREF;
arg = gtNewOperNode(GT_ADDR, addrType, arg);
lvaSetVarAddrExposed(tmpVarNum DEBUGARG(AddressExposedReason::ESCAPE_ADDRESS));
addrNode = arg;
#if FEATURE_MULTIREG_ARGS
#ifdef TARGET_ARM64
assert(varTypeIsStruct(type));
if (lvaIsMultiregStruct(varDsc, curArgTabEntry->IsVararg()))
{
// We will create a GT_OBJ for the argument below.
// This will be passed by value in two registers.
assert(addrNode != nullptr);
// Create an Obj of the temp to use it as a call argument.
arg = gtNewObjNode(lvaGetStruct(tmpVarNum), arg);
}
#else
// Always create an Obj of the temp to use it as a call argument.
arg = gtNewObjNode(lvaGetStruct(tmpVarNum), arg);
#endif // !TARGET_ARM64
#endif // FEATURE_MULTIREG_ARGS
}
#else // not (TARGET_AMD64 or TARGET_ARM64 or TARGET_ARM)
// other targets, we pass the struct by value
assert(varTypeIsStruct(type));
addrNode = gtNewOperNode(GT_ADDR, TYP_BYREF, arg);
// Get a new Obj node temp to use it as a call argument.
// gtNewObjNode will set the GTF_EXCEPT flag if this is not a local stack object.
arg = gtNewObjNode(lvaGetStruct(tmpVarNum), addrNode);
#endif // not (TARGET_AMD64 or TARGET_ARM64 or TARGET_ARM)
} // (varTypeIsStruct(type))
if (addrNode != nullptr)
{
assert(addrNode->gtOper == GT_ADDR);
// the child of a GT_ADDR is required to have this flag set
addrNode->AsOp()->gtOp1->gtFlags |= GTF_DONT_CSE;
}
return arg;
}
//------------------------------------------------------------------------------
// EvalArgsToTemps : Create temp assignments and populate the LateArgs list.
void fgArgInfo::EvalArgsToTemps()
{
assert(argsSorted);
unsigned regArgInx = 0;
// Now go through the argument table and perform the necessary evaluation into temps
GenTreeCall::Use* tmpRegArgNext = nullptr;
for (unsigned curInx = 0; curInx < argCount; curInx++)
{
fgArgTabEntry* curArgTabEntry = argTable[curInx];
assert(curArgTabEntry->lateUse == nullptr);
GenTree* argx = curArgTabEntry->GetNode();
GenTree* setupArg = nullptr;
GenTree* defArg;
#if !FEATURE_FIXED_OUT_ARGS
// Only ever set for FEATURE_FIXED_OUT_ARGS
assert(curArgTabEntry->needPlace == false);
// On x86 and other archs that use push instructions to pass arguments:
// Only the register arguments need to be replaced with placeholder nodes.
// Stacked arguments are evaluated and pushed (or stored into the stack) in order.
//
if (curArgTabEntry->GetRegNum() == REG_STK)
continue;
#endif
if (curArgTabEntry->needTmp)
{
if (curArgTabEntry->isTmp)
{
// Create a copy of the temp to go into the late argument list
defArg = compiler->fgMakeTmpArgNode(curArgTabEntry);
// mark the original node as a late argument
argx->gtFlags |= GTF_LATE_ARG;
}
else
{
// Create a temp assignment for the argument
// Put the temp in the gtCallLateArgs list
CLANG_FORMAT_COMMENT_ANCHOR;
#ifdef DEBUG
if (compiler->verbose)
{
printf("Argument with 'side effect'...\n");
compiler->gtDispTree(argx);
}
#endif
#if defined(TARGET_AMD64) && !defined(UNIX_AMD64_ABI)
noway_assert(argx->gtType != TYP_STRUCT);
#endif
unsigned tmpVarNum = compiler->lvaGrabTemp(true DEBUGARG("argument with side effect"));
if (argx->gtOper == GT_MKREFANY)
{
// For GT_MKREFANY, typically the actual struct copying does
// not have any side-effects and can be delayed. So instead
// of using a temp for the whole struct, we can just use a temp
// for operand that that has a side-effect
GenTree* operand;
if ((argx->AsOp()->gtOp2->gtFlags & GTF_ALL_EFFECT) == 0)
{
operand = argx->AsOp()->gtOp1;
// In the early argument evaluation, place an assignment to the temp
// from the source operand of the mkrefany
setupArg = compiler->gtNewTempAssign(tmpVarNum, operand);
// Replace the operand for the mkrefany with the new temp.
argx->AsOp()->gtOp1 = compiler->gtNewLclvNode(tmpVarNum, operand->TypeGet());
}
else if ((argx->AsOp()->gtOp1->gtFlags & GTF_ALL_EFFECT) == 0)
{
operand = argx->AsOp()->gtOp2;
// In the early argument evaluation, place an assignment to the temp
// from the source operand of the mkrefany
setupArg = compiler->gtNewTempAssign(tmpVarNum, operand);
// Replace the operand for the mkrefany with the new temp.
argx->AsOp()->gtOp2 = compiler->gtNewLclvNode(tmpVarNum, operand->TypeGet());
}
}
if (setupArg != nullptr)
{
// Now keep the mkrefany for the late argument list
defArg = argx;
// Clear the side-effect flags because now both op1 and op2 have no side-effects
defArg->gtFlags &= ~GTF_ALL_EFFECT;
}
else
{
setupArg = compiler->gtNewTempAssign(tmpVarNum, argx);
LclVarDsc* varDsc = compiler->lvaGetDesc(tmpVarNum);
var_types lclVarType = genActualType(argx->gtType);
var_types scalarType = TYP_UNKNOWN;
if (setupArg->OperIsCopyBlkOp())
{
setupArg = compiler->fgMorphCopyBlock(setupArg);
#if defined(TARGET_ARMARCH) || defined(UNIX_AMD64_ABI)
if (lclVarType == TYP_STRUCT)
{
// This scalar LclVar widening step is only performed for ARM architectures.
//
CORINFO_CLASS_HANDLE clsHnd = compiler->lvaGetStruct(tmpVarNum);
unsigned structSize = varDsc->lvExactSize;
scalarType =
compiler->getPrimitiveTypeForStruct(structSize, clsHnd, curArgTabEntry->IsVararg());
}
#endif // TARGET_ARMARCH || defined (UNIX_AMD64_ABI)
}
// scalarType can be set to a wider type for ARM or unix amd64 architectures: (3 => 4) or (5,6,7 =>
// 8)
if ((scalarType != TYP_UNKNOWN) && (scalarType != lclVarType))
{
// Create a GT_LCL_FLD using the wider type to go to the late argument list
defArg = compiler->gtNewLclFldNode(tmpVarNum, scalarType, 0);
}
else
{
// Create a copy of the temp to go to the late argument list
defArg = compiler->gtNewLclvNode(tmpVarNum, lclVarType);
}
curArgTabEntry->isTmp = true;
curArgTabEntry->tmpNum = tmpVarNum;
#ifdef TARGET_ARM
// Previously we might have thought the local was promoted, and thus the 'COPYBLK'
// might have left holes in the used registers (see
// fgAddSkippedRegsInPromotedStructArg).
// Too bad we're not that smart for these intermediate temps...
if (isValidIntArgReg(curArgTabEntry->GetRegNum()) && (curArgTabEntry->numRegs > 1))
{
regNumber argReg = curArgTabEntry->GetRegNum();
regMaskTP allUsedRegs = genRegMask(curArgTabEntry->GetRegNum());
for (unsigned i = 1; i < curArgTabEntry->numRegs; i++)
{
argReg = genRegArgNext(argReg);
allUsedRegs |= genRegMask(argReg);
}
}
#endif // TARGET_ARM
}
/* mark the assignment as a late argument */
setupArg->gtFlags |= GTF_LATE_ARG;
#ifdef DEBUG
if (compiler->verbose)
{
printf("\n Evaluate to a temp:\n");
compiler->gtDispTree(setupArg);
}
#endif
}
}
else // curArgTabEntry->needTmp == false
{
// On x86 -
// Only register args are replaced with placeholder nodes
// and the stack based arguments are evaluated and pushed in order.
//
// On Arm/x64 - When needTmp is false and needPlace is false,
// the non-register arguments are evaluated and stored in order.
// When needPlace is true we have a nested call that comes after
// this argument so we have to replace it in the gtCallArgs list
// (the initial argument evaluation list) with a placeholder.
//
if ((curArgTabEntry->GetRegNum() == REG_STK) && (curArgTabEntry->needPlace == false))
{
continue;
}
/* No temp needed - move the whole node to the gtCallLateArgs list */
/* The argument is deferred and put in the late argument list */
defArg = argx;
// Create a placeholder node to put in its place in gtCallLateArgs.
// For a struct type we also need to record the class handle of the arg.
CORINFO_CLASS_HANDLE clsHnd = NO_CLASS_HANDLE;
#if defined(TARGET_AMD64) && !defined(UNIX_AMD64_ABI)
// All structs are either passed (and retyped) as integral types, OR they
// are passed by reference.
noway_assert(argx->gtType != TYP_STRUCT);
#else // !defined(TARGET_AMD64) || defined(UNIX_AMD64_ABI)
if (defArg->TypeGet() == TYP_STRUCT)
{
clsHnd = compiler->gtGetStructHandleIfPresent(defArg);
noway_assert(clsHnd != NO_CLASS_HANDLE);
}
#endif // !(defined(TARGET_AMD64) && !defined(UNIX_AMD64_ABI))
setupArg = compiler->gtNewArgPlaceHolderNode(defArg->gtType, clsHnd);
/* mark the placeholder node as a late argument */
setupArg->gtFlags |= GTF_LATE_ARG;
#ifdef DEBUG
if (compiler->verbose)
{
if (curArgTabEntry->GetRegNum() == REG_STK)
{
printf("Deferred stack argument :\n");
}
else
{
printf("Deferred argument ('%s'):\n", getRegName(curArgTabEntry->GetRegNum()));
}
compiler->gtDispTree(argx);
printf("Replaced with placeholder node:\n");
compiler->gtDispTree(setupArg);
}
#endif
}
if (setupArg != nullptr)
{
noway_assert(curArgTabEntry->use->GetNode() == argx);
curArgTabEntry->use->SetNode(setupArg);
}
/* deferred arg goes into the late argument list */
if (tmpRegArgNext == nullptr)
{
tmpRegArgNext = compiler->gtNewCallArgs(defArg);
callTree->AsCall()->gtCallLateArgs = tmpRegArgNext;
}
else
{
noway_assert(tmpRegArgNext->GetNode() != nullptr);
tmpRegArgNext->SetNext(compiler->gtNewCallArgs(defArg));
tmpRegArgNext = tmpRegArgNext->GetNext();
}
curArgTabEntry->lateUse = tmpRegArgNext;
curArgTabEntry->SetLateArgInx(regArgInx++);
if ((setupArg != nullptr) && setupArg->OperIs(GT_ARGPLACE) && (callTree->gtRetBufArg == curArgTabEntry->use))
{
callTree->SetLclRetBufArg(tmpRegArgNext);
}
}
#ifdef DEBUG
if (compiler->verbose)
{
printf("\nShuffled argument table: ");
for (unsigned curInx = 0; curInx < argCount; curInx++)
{
fgArgTabEntry* curArgTabEntry = argTable[curInx];
if (curArgTabEntry->GetRegNum() != REG_STK)
{
printf("%s ", getRegName(curArgTabEntry->GetRegNum()));
}
}
printf("\n");
}
#endif
}
//------------------------------------------------------------------------------
// fgMakeMultiUse : If the node is an unaliased local or constant clone it,
// otherwise insert a comma form temp
//
// Arguments:
// ppTree - a pointer to the child node we will be replacing with the comma expression that
// evaluates ppTree to a temp and returns the result
//
// Return Value:
// A fresh GT_LCL_VAR node referencing the temp which has not been used
//
// Notes:
// Caller must ensure that if the node is an unaliased local, the second use this
// creates will be evaluated before the local can be reassigned.
//
// Can be safely called in morph preorder, before GTF_GLOB_REF is reliable.
//
GenTree* Compiler::fgMakeMultiUse(GenTree** pOp)
{
GenTree* const tree = *pOp;
if (tree->IsInvariant())
{
return gtClone(tree);
}
else if (tree->IsLocal())
{
// Can't rely on GTF_GLOB_REF here.
//
if (!lvaGetDesc(tree->AsLclVarCommon())->IsAddressExposed())
{
return gtClone(tree);
}
}
return fgInsertCommaFormTemp(pOp);
}
//------------------------------------------------------------------------------
// fgInsertCommaFormTemp: Create a new temporary variable to hold the result of *ppTree,
// and replace *ppTree with comma(asg(newLcl, *ppTree), newLcl)
//
// Arguments:
// ppTree - a pointer to the child node we will be replacing with the comma expression that
// evaluates ppTree to a temp and returns the result
//
// structType - value type handle if the temp created is of TYP_STRUCT.
//
// Return Value:
// A fresh GT_LCL_VAR node referencing the temp which has not been used
//
GenTree* Compiler::fgInsertCommaFormTemp(GenTree** ppTree, CORINFO_CLASS_HANDLE structType /*= nullptr*/)
{
GenTree* subTree = *ppTree;
unsigned lclNum = lvaGrabTemp(true DEBUGARG("fgInsertCommaFormTemp is creating a new local variable"));
if (varTypeIsStruct(subTree))
{
assert(structType != nullptr);
lvaSetStruct(lclNum, structType, false);
}
// If subTree->TypeGet() == TYP_STRUCT, gtNewTempAssign() will create a GT_COPYBLK tree.
// The type of GT_COPYBLK is TYP_VOID. Therefore, we should use subTree->TypeGet() for
// setting type of lcl vars created.
GenTree* asg = gtNewTempAssign(lclNum, subTree);
GenTree* load = new (this, GT_LCL_VAR) GenTreeLclVar(GT_LCL_VAR, subTree->TypeGet(), lclNum);
GenTree* comma = gtNewOperNode(GT_COMMA, subTree->TypeGet(), asg, load);
*ppTree = comma;
return new (this, GT_LCL_VAR) GenTreeLclVar(GT_LCL_VAR, subTree->TypeGet(), lclNum);
}
//------------------------------------------------------------------------
// fgInitArgInfo: Construct the fgArgInfo for the call with the fgArgEntry for each arg
//
// Arguments:
// callNode - the call for which we are generating the fgArgInfo
//
// Return Value:
// None
//
// Notes:
// This method is idempotent in that it checks whether the fgArgInfo has already been
// constructed, and just returns.
// This method only computes the arg table and arg entries for the call (the fgArgInfo),
// and makes no modification of the args themselves.
//
// The IR for the call args can change for calls with non-standard arguments: some non-standard
// arguments add new call argument IR nodes.
//
void Compiler::fgInitArgInfo(GenTreeCall* call)
{
GenTreeCall::Use* args;
GenTree* argx;
unsigned argIndex = 0;
unsigned intArgRegNum = 0;
unsigned fltArgRegNum = 0;
DEBUG_ARG_SLOTS_ONLY(unsigned argSlots = 0;)
bool callHasRetBuffArg = call->HasRetBufArg();
bool callIsVararg = call->IsVarargs();
#ifdef TARGET_ARM
regMaskTP argSkippedRegMask = RBM_NONE;
regMaskTP fltArgSkippedRegMask = RBM_NONE;
#endif // TARGET_ARM
#if defined(TARGET_X86)
unsigned maxRegArgs = MAX_REG_ARG; // X86: non-const, must be calculated
#else
const unsigned maxRegArgs = MAX_REG_ARG; // other arch: fixed constant number
#endif
if (call->fgArgInfo != nullptr)
{
// We've already initialized and set the fgArgInfo.
return;
}
JITDUMP("Initializing arg info for %d.%s:\n", call->gtTreeID, GenTree::OpName(call->gtOper));
// At this point, we should never have gtCallLateArgs, as this needs to be done before those are determined.
assert(call->gtCallLateArgs == nullptr);
if (TargetOS::IsUnix && callIsVararg)
{
// Currently native varargs is not implemented on non windows targets.
//
// Note that some targets like Arm64 Unix should not need much work as
// the ABI is the same. While other targets may only need small changes
// such as amd64 Unix, which just expects RAX to pass numFPArguments.
NYI("Morphing Vararg call not yet implemented on non Windows targets.");
}
// Data structure for keeping track of non-standard args. Non-standard args are those that are not passed
// following the normal calling convention or in the normal argument registers. We either mark existing
// arguments as non-standard (such as the x8 return buffer register on ARM64), or we manually insert the
// non-standard arguments into the argument list, below.
class NonStandardArgs
{
struct NonStandardArg
{
GenTree* node; // The tree node representing this non-standard argument.
// Note that this must be updated if the tree node changes due to morphing!
regNumber reg; // The register to be assigned to this non-standard argument.
NonStandardArgKind kind; // The kind of the non-standard arg
};
ArrayStack<NonStandardArg> args;
public:
NonStandardArgs(CompAllocator alloc) : args(alloc, 3) // We will have at most 3 non-standard arguments
{
}
//-----------------------------------------------------------------------------
// Add: add a non-standard argument to the table of non-standard arguments
//
// Arguments:
// node - a GenTree node that has a non-standard argument.
// reg - the register to assign to this node.
//
// Return Value:
// None.
//
void Add(GenTree* node, regNumber reg, NonStandardArgKind kind)
{
NonStandardArg nsa = {node, reg, kind};
args.Push(nsa);
}
//-----------------------------------------------------------------------------
// Find: Look for a GenTree* in the set of non-standard args.
//
// Arguments:
// node - a GenTree node to look for
//
// Return Value:
// The index of the non-standard argument (a non-negative, unique, stable number).
// If the node is not a non-standard argument, return -1.
//
int Find(GenTree* node)
{
for (int i = 0; i < args.Height(); i++)
{
if (node == args.Top(i).node)
{
return i;
}
}
return -1;
}
//-----------------------------------------------------------------------------
// Find: Look for a GenTree node in the non-standard arguments set. If found,
// set the register to use for the node.
//
// Arguments:
// node - a GenTree node to look for
// pReg - an OUT argument. *pReg is set to the non-standard register to use if
// 'node' is found in the non-standard argument set.
// pKind - an OUT argument. *pKind is set to the kind of the non-standard arg.
//
// Return Value:
// 'true' if 'node' is a non-standard argument. In this case, *pReg and *pKing are set.
// 'false' otherwise (in this case, *pReg and *pKind are unmodified).
//
bool Find(GenTree* node, regNumber* pReg, NonStandardArgKind* pKind)
{
for (int i = 0; i < args.Height(); i++)
{
NonStandardArg& nsa = args.TopRef(i);
if (node == nsa.node)
{
*pReg = nsa.reg;
*pKind = nsa.kind;
return true;
}
}
return false;
}
//-----------------------------------------------------------------------------
// Replace: Replace the non-standard argument node at a given index. This is done when
// the original node was replaced via morphing, but we need to continue to assign a
// particular non-standard arg to it.
//
// Arguments:
// index - the index of the non-standard arg. It must exist.
// node - the new GenTree node.
//
// Return Value:
// None.
//
void Replace(int index, GenTree* node)
{
args.TopRef(index).node = node;
}
} nonStandardArgs(getAllocator(CMK_ArrayStack));
// Count of args. On first morph, this is counted before we've filled in the arg table.
// On remorph, we grab it from the arg table.
unsigned numArgs = 0;
// First we need to count the args
if (call->gtCallThisArg != nullptr)
{
numArgs++;
}
for (GenTreeCall::Use& use : call->Args())
{
numArgs++;
}
// Insert or mark non-standard args. These are either outside the normal calling convention, or
// arguments registers that don't follow the normal progression of argument registers in the calling
// convention (such as for the ARM64 fixed return buffer argument x8).
//
// *********** NOTE *************
// The logic here must remain in sync with GetNonStandardAddedArgCount(), which is used to map arguments
// in the implementation of fast tail call.
// *********** END NOTE *********
CLANG_FORMAT_COMMENT_ANCHOR;
#if defined(TARGET_X86) || defined(TARGET_ARM)
// The x86 and arm32 CORINFO_HELP_INIT_PINVOKE_FRAME helpers has a custom calling convention.
// Set the argument registers correctly here.
if (call->IsHelperCall(this, CORINFO_HELP_INIT_PINVOKE_FRAME))
{
GenTreeCall::Use* args = call->gtCallArgs;
GenTree* arg1 = args->GetNode();
assert(arg1 != nullptr);
nonStandardArgs.Add(arg1, REG_PINVOKE_FRAME, NonStandardArgKind::PInvokeFrame);
}
#endif // defined(TARGET_X86) || defined(TARGET_ARM)
#if defined(TARGET_ARM)
// A non-standard calling convention using wrapper delegate invoke is used on ARM, only, for wrapper
// delegates. It is used for VSD delegate calls where the VSD custom calling convention ABI requires passing
// R4, a callee-saved register, with a special value. Since R4 is a callee-saved register, its value needs
// to be preserved. Thus, the VM uses a wrapper delegate IL stub, which preserves R4 and also sets up R4
// correctly for the VSD call. The VM is simply reusing an existing mechanism (wrapper delegate IL stub)
// to achieve its goal for delegate VSD call. See COMDelegate::NeedsWrapperDelegate() in the VM for details.
else if (call->gtCallMoreFlags & GTF_CALL_M_WRAPPER_DELEGATE_INV)
{
GenTree* arg = call->gtCallThisArg->GetNode();
if (arg->OperIsLocal())
{
arg = gtClone(arg, true);
}
else
{
GenTree* tmp = fgInsertCommaFormTemp(&arg);
call->gtCallThisArg->SetNode(arg);
call->gtFlags |= GTF_ASG;
arg = tmp;
}
noway_assert(arg != nullptr);
GenTree* newArg = new (this, GT_ADDR)
GenTreeAddrMode(TYP_BYREF, arg, nullptr, 0, eeGetEEInfo()->offsetOfWrapperDelegateIndirectCell);
// Append newArg as the last arg
GenTreeCall::Use** insertionPoint = &call->gtCallArgs;
for (; *insertionPoint != nullptr; insertionPoint = &((*insertionPoint)->NextRef()))
{
}
*insertionPoint = gtNewCallArgs(newArg);
numArgs++;
nonStandardArgs.Add(newArg, virtualStubParamInfo->GetReg(), NonStandardArgKind::WrapperDelegateCell);
}
#endif // defined(TARGET_ARM)
#if defined(TARGET_X86)
// The x86 shift helpers have custom calling conventions and expect the lo part of the long to be in EAX and the
// hi part to be in EDX. This sets the argument registers up correctly.
else if (call->IsHelperCall(this, CORINFO_HELP_LLSH) || call->IsHelperCall(this, CORINFO_HELP_LRSH) ||
call->IsHelperCall(this, CORINFO_HELP_LRSZ))
{
GenTreeCall::Use* args = call->gtCallArgs;
GenTree* arg1 = args->GetNode();
assert(arg1 != nullptr);
nonStandardArgs.Add(arg1, REG_LNGARG_LO, NonStandardArgKind::ShiftLow);
args = args->GetNext();
GenTree* arg2 = args->GetNode();
assert(arg2 != nullptr);
nonStandardArgs.Add(arg2, REG_LNGARG_HI, NonStandardArgKind::ShiftHigh);
}
#else // !TARGET_X86
// TODO-X86-CQ: Currently RyuJIT/x86 passes args on the stack, so this is not needed.
// If/when we change that, the following code needs to be changed to correctly support the (TBD) managed calling
// convention for x86/SSE.
// If we have a Fixed Return Buffer argument register then we setup a non-standard argument for it.
//
// We don't use the fixed return buffer argument if we have the special unmanaged instance call convention.
// That convention doesn't use the fixed return buffer register.
//
CLANG_FORMAT_COMMENT_ANCHOR;
if (call->HasFixedRetBufArg())
{
args = call->gtCallArgs;
assert(args != nullptr);
argx = call->gtCallArgs->GetNode();
// We don't increment numArgs here, since we already counted this argument above.
nonStandardArgs.Add(argx, theFixedRetBuffReg(), NonStandardArgKind::FixedRetBuffer);
}
// We are allowed to have a Fixed Return Buffer argument combined
// with any of the remaining non-standard arguments
//
CLANG_FORMAT_COMMENT_ANCHOR;
if (call->IsVirtualStub())
{
if (!call->IsTailCallViaJitHelper())
{
GenTree* stubAddrArg = fgGetStubAddrArg(call);
// And push the stub address onto the list of arguments
call->gtCallArgs = gtPrependNewCallArg(stubAddrArg, call->gtCallArgs);
numArgs++;
nonStandardArgs.Add(stubAddrArg, stubAddrArg->GetRegNum(), NonStandardArgKind::VirtualStubCell);
}
else
{
// If it is a VSD call getting dispatched via tail call helper,
// fgMorphTailCallViaJitHelper() would materialize stub addr as an additional
// parameter added to the original arg list and hence no need to
// add as a non-standard arg.
}
}
else
#endif // !TARGET_X86
if (call->gtCallType == CT_INDIRECT && (call->gtCallCookie != nullptr))
{
assert(!call->IsUnmanaged());
GenTree* arg = call->gtCallCookie;
noway_assert(arg != nullptr);
call->gtCallCookie = nullptr;
// All architectures pass the cookie in a register.
call->gtCallArgs = gtPrependNewCallArg(arg, call->gtCallArgs);
nonStandardArgs.Add(arg, REG_PINVOKE_COOKIE_PARAM, NonStandardArgKind::PInvokeCookie);
numArgs++;
// put destination into R10/EAX
arg = gtClone(call->gtCallAddr, true);
call->gtCallArgs = gtPrependNewCallArg(arg, call->gtCallArgs);
numArgs++;
nonStandardArgs.Add(arg, REG_PINVOKE_TARGET_PARAM, NonStandardArgKind::PInvokeTarget);
// finally change this call to a helper call
call->gtCallType = CT_HELPER;
call->gtCallMethHnd = eeFindHelper(CORINFO_HELP_PINVOKE_CALLI);
}
#if defined(FEATURE_READYTORUN)
// For arm/arm64, we dispatch code same as VSD using virtualStubParamInfo->GetReg()
// for indirection cell address, which ZapIndirectHelperThunk expects.
// For x64/x86 we use return address to get the indirection cell by disassembling the call site.
// That is not possible for fast tailcalls, so we only need this logic for fast tailcalls on xarch.
// Note that we call this before we know if something will be a fast tailcall or not.
// That's ok; after making something a tailcall, we will invalidate this information
// and reconstruct it if necessary. The tailcalling decision does not change since
// this is a non-standard arg in a register.
bool needsIndirectionCell = call->IsR2RRelativeIndir() && !call->IsDelegateInvoke();
#if defined(TARGET_XARCH)
needsIndirectionCell &= call->IsFastTailCall();
#endif
if (needsIndirectionCell)
{
assert(call->gtEntryPoint.addr != nullptr);
size_t addrValue = (size_t)call->gtEntryPoint.addr;
GenTree* indirectCellAddress = gtNewIconHandleNode(addrValue, GTF_ICON_FTN_ADDR);
#ifdef DEBUG
indirectCellAddress->AsIntCon()->gtTargetHandle = (size_t)call->gtCallMethHnd;
#endif
indirectCellAddress->SetRegNum(REG_R2R_INDIRECT_PARAM);
#ifdef TARGET_ARM
// Issue #xxxx : Don't attempt to CSE this constant on ARM32
//
// This constant has specific register requirements, and LSRA doesn't currently correctly
// handle them when the value is in a CSE'd local.
indirectCellAddress->SetDoNotCSE();
#endif // TARGET_ARM
// Push the stub address onto the list of arguments.
call->gtCallArgs = gtPrependNewCallArg(indirectCellAddress, call->gtCallArgs);
numArgs++;
nonStandardArgs.Add(indirectCellAddress, indirectCellAddress->GetRegNum(),
NonStandardArgKind::R2RIndirectionCell);
}
#endif
if ((REG_VALIDATE_INDIRECT_CALL_ADDR != REG_ARG_0) && call->IsHelperCall(this, CORINFO_HELP_VALIDATE_INDIRECT_CALL))
{
assert(call->gtCallArgs != nullptr);
GenTreeCall::Use* args = call->gtCallArgs;
GenTree* tar = args->GetNode();
nonStandardArgs.Add(tar, REG_VALIDATE_INDIRECT_CALL_ADDR, NonStandardArgKind::ValidateIndirectCallTarget);
}
// Allocate the fgArgInfo for the call node;
//
call->fgArgInfo = new (this, CMK_Unknown) fgArgInfo(this, call, numArgs);
// Add the 'this' argument value, if present.
if (call->gtCallThisArg != nullptr)
{
argx = call->gtCallThisArg->GetNode();
assert(argIndex == 0);
assert(call->gtCallType == CT_USER_FUNC || call->gtCallType == CT_INDIRECT);
assert(varTypeIsGC(argx) || (argx->gtType == TYP_I_IMPL));
const regNumber regNum = genMapIntRegArgNumToRegNum(intArgRegNum);
const unsigned numRegs = 1;
const unsigned byteSize = TARGET_POINTER_SIZE;
const unsigned byteAlignment = TARGET_POINTER_SIZE;
const bool isStruct = false;
const bool isFloatHfa = false;
// This is a register argument - put it in the table.
call->fgArgInfo->AddRegArg(argIndex, argx, call->gtCallThisArg, regNum, numRegs, byteSize, byteAlignment,
isStruct, isFloatHfa,
callIsVararg UNIX_AMD64_ABI_ONLY_ARG(REG_STK) UNIX_AMD64_ABI_ONLY_ARG(0)
UNIX_AMD64_ABI_ONLY_ARG(0) UNIX_AMD64_ABI_ONLY_ARG(nullptr));
intArgRegNum++;
#ifdef WINDOWS_AMD64_ABI
// Whenever we pass an integer register argument
// we skip the corresponding floating point register argument
fltArgRegNum++;
#endif // WINDOWS_AMD64_ABI
argIndex++;
DEBUG_ARG_SLOTS_ONLY(argSlots++;)
}
#ifdef TARGET_X86
// Compute the maximum number of arguments that can be passed in registers.
// For X86 we handle the varargs and unmanaged calling conventions
#ifndef UNIX_X86_ABI
if (call->gtFlags & GTF_CALL_POP_ARGS)
{
noway_assert(intArgRegNum < MAX_REG_ARG);
// No more register arguments for varargs (CALL_POP_ARGS)
maxRegArgs = intArgRegNum;
// Add in the ret buff arg
if (callHasRetBuffArg)
maxRegArgs++;
}
#endif // UNIX_X86_ABI
if (call->IsUnmanaged())
{
noway_assert(intArgRegNum == 0);
if (call->gtCallMoreFlags & GTF_CALL_M_UNMGD_THISCALL)
{
noway_assert(call->gtCallArgs->GetNode()->TypeGet() == TYP_I_IMPL ||
call->gtCallArgs->GetNode()->TypeGet() == TYP_BYREF ||
call->gtCallArgs->GetNode()->gtOper ==
GT_NOP); // the arg was already morphed to a register (fgMorph called twice)
maxRegArgs = 1;
}
else
{
maxRegArgs = 0;
}
#ifdef UNIX_X86_ABI
// Add in the ret buff arg
if (callHasRetBuffArg &&
call->unmgdCallConv != CorInfoCallConvExtension::C && // C and Stdcall calling conventions do not
call->unmgdCallConv != CorInfoCallConvExtension::Stdcall) // use registers to pass arguments.
maxRegArgs++;
#endif
}
#endif // TARGET_X86
/* Morph the user arguments */
CLANG_FORMAT_COMMENT_ANCHOR;
#if defined(TARGET_ARM)
// The ARM ABI has a concept of back-filling of floating-point argument registers, according
// to the "Procedure Call Standard for the ARM Architecture" document, especially
// section 6.1.2.3 "Parameter passing". Back-filling is where floating-point argument N+1 can
// appear in a lower-numbered register than floating point argument N. That is, argument
// register allocation is not strictly increasing. To support this, we need to keep track of unused
// floating-point argument registers that we can back-fill. We only support 4-byte float and
// 8-byte double types, and one to four element HFAs composed of these types. With this, we will
// only back-fill single registers, since there is no way with these types to create
// an alignment hole greater than one register. However, there can be up to 3 back-fill slots
// available (with 16 FP argument registers). Consider this code:
//
// struct HFA { float x, y, z; }; // a three element HFA
// void bar(float a1, // passed in f0
// double a2, // passed in f2/f3; skip f1 for alignment
// HFA a3, // passed in f4/f5/f6
// double a4, // passed in f8/f9; skip f7 for alignment. NOTE: it doesn't fit in the f1 back-fill slot
// HFA a5, // passed in f10/f11/f12
// double a6, // passed in f14/f15; skip f13 for alignment. NOTE: it doesn't fit in the f1 or f7 back-fill
// // slots
// float a7, // passed in f1 (back-filled)
// float a8, // passed in f7 (back-filled)
// float a9, // passed in f13 (back-filled)
// float a10) // passed on the stack in [OutArg+0]
//
// Note that if we ever support FP types with larger alignment requirements, then there could
// be more than single register back-fills.
//
// Once we assign a floating-pointer register to the stack, they all must be on the stack.
// See "Procedure Call Standard for the ARM Architecture", section 6.1.2.3, "The back-filling
// continues only so long as no VFP CPRC has been allocated to a slot on the stack."
// We set anyFloatStackArgs to true when a floating-point argument has been assigned to the stack
// and prevent any additional floating-point arguments from going in registers.
bool anyFloatStackArgs = false;
#endif // TARGET_ARM
#ifdef UNIX_AMD64_ABI
SYSTEMV_AMD64_CORINFO_STRUCT_REG_PASSING_DESCRIPTOR structDesc;
#endif // UNIX_AMD64_ABI
#if defined(DEBUG)
// Check that we have valid information about call's argument types.
// For example:
// load byte; call(int) -> CALL(PUTARG_TYPE byte(IND byte));
// load int; call(byte) -> CALL(PUTARG_TYPE int (IND int));
// etc.
if (call->callSig != nullptr)
{
CORINFO_SIG_INFO* sig = call->callSig;
const unsigned sigArgsCount = sig->numArgs;
GenTreeCall::Use* nodeArgs = call->gtCallArgs;
// It could include many arguments not included in `sig->numArgs`, for example, `this`, runtime lookup, cookie
// etc.
unsigned nodeArgsCount = 0;
call->VisitOperands([&nodeArgsCount](GenTree* operand) -> GenTree::VisitResult {
nodeArgsCount++;
return GenTree::VisitResult::Continue;
});
if (call->gtCallThisArg != nullptr)
{
// Handle the most common argument not in the `sig->numArgs`.
// so the following check works on more methods.
nodeArgsCount--;
}
assert(nodeArgsCount >= sigArgsCount);
if ((nodeArgsCount == sigArgsCount) &&
((Target::g_tgtArgOrder == Target::ARG_ORDER_R2L) || (nodeArgsCount == 1)))
{
CORINFO_ARG_LIST_HANDLE sigArg = sig->args;
for (unsigned i = 0; i < sig->numArgs; ++i)
{
CORINFO_CLASS_HANDLE argClass;
const CorInfoType corType = strip(info.compCompHnd->getArgType(sig, sigArg, &argClass));
const var_types sigType = JITtype2varType(corType);
assert(nodeArgs != nullptr);
const GenTree* nodeArg = nodeArgs->GetNode();
assert(nodeArg != nullptr);
const var_types nodeType = nodeArg->TypeGet();
assert((nodeType == sigType) || varTypeIsStruct(sigType) ||
genTypeSize(nodeType) == genTypeSize(sigType));
sigArg = info.compCompHnd->getArgNext(sigArg);
nodeArgs = nodeArgs->GetNext();
}
assert(nodeArgs == nullptr);
}
}
#endif // DEBUG
for (args = call->gtCallArgs; args != nullptr; args = args->GetNext(), argIndex++)
{
argx = args->GetNode()->gtSkipPutArgType();
// Change the node to TYP_I_IMPL so we don't report GC info
// NOTE: We deferred this from the importer because of the inliner.
if (argx->IsLocalAddrExpr() != nullptr)
{
argx->gtType = TYP_I_IMPL;
}
// We should never have any ArgPlaceHolder nodes at this point.
assert(!argx->IsArgPlaceHolderNode());
// Setup any HFA information about 'argx'
bool isHfaArg = false;
var_types hfaType = TYP_UNDEF;
unsigned hfaSlots = 0;
bool passUsingFloatRegs;
unsigned argAlignBytes = TARGET_POINTER_SIZE;
unsigned size = 0;
unsigned byteSize = 0;
if (GlobalJitOptions::compFeatureHfa)
{
hfaType = GetHfaType(argx);
isHfaArg = varTypeIsValidHfaType(hfaType);
#if defined(TARGET_ARM64)
if (TargetOS::IsWindows)
{
// Make sure for vararg methods isHfaArg is not true.
isHfaArg = callIsVararg ? false : isHfaArg;
}
#endif // defined(TARGET_ARM64)
if (isHfaArg)
{
isHfaArg = true;
hfaSlots = GetHfaCount(argx);
// If we have a HFA struct it's possible we transition from a method that originally
// only had integer types to now start having FP types. We have to communicate this
// through this flag since LSRA later on will use this flag to determine whether
// or not to track the FP register set.
//
compFloatingPointUsed = true;
}
}
const bool isFloatHfa = (hfaType == TYP_FLOAT);
#ifdef TARGET_ARM
passUsingFloatRegs = !callIsVararg && (isHfaArg || varTypeUsesFloatReg(argx)) && !opts.compUseSoftFP;
bool passUsingIntRegs = passUsingFloatRegs ? false : (intArgRegNum < MAX_REG_ARG);
// We don't use the "size" return value from InferOpSizeAlign().
codeGen->InferOpSizeAlign(argx, &argAlignBytes);
argAlignBytes = roundUp(argAlignBytes, TARGET_POINTER_SIZE);
if (argAlignBytes == 2 * TARGET_POINTER_SIZE)
{
if (passUsingFloatRegs)
{
if (fltArgRegNum % 2 == 1)
{
fltArgSkippedRegMask |= genMapArgNumToRegMask(fltArgRegNum, TYP_FLOAT);
fltArgRegNum++;
}
}
else if (passUsingIntRegs)
{
if (intArgRegNum % 2 == 1)
{
argSkippedRegMask |= genMapArgNumToRegMask(intArgRegNum, TYP_I_IMPL);
intArgRegNum++;
}
}
#if defined(DEBUG)
if (argSlots % 2 == 1)
{
argSlots++;
}
#endif
}
#elif defined(TARGET_ARM64)
assert(!callIsVararg || !isHfaArg);
passUsingFloatRegs = !callIsVararg && (isHfaArg || varTypeUsesFloatReg(argx));
#elif defined(TARGET_AMD64)
passUsingFloatRegs = varTypeIsFloating(argx);
#elif defined(TARGET_X86)
passUsingFloatRegs = false;
#else
#error Unsupported or unset target architecture
#endif // TARGET*
bool isBackFilled = false;
unsigned nextFltArgRegNum = fltArgRegNum; // This is the next floating-point argument register number to use
var_types structBaseType = TYP_STRUCT;
unsigned structSize = 0;
bool passStructByRef = false;
bool isStructArg;
GenTree* actualArg = argx->gtEffectiveVal(true /* Commas only */);
//
// Figure out the size of the argument. This is either in number of registers, or number of
// TARGET_POINTER_SIZE stack slots, or the sum of these if the argument is split between the registers and
// the stack.
//
isStructArg = varTypeIsStruct(argx);
CORINFO_CLASS_HANDLE objClass = NO_CLASS_HANDLE;
if (isStructArg)
{
objClass = gtGetStructHandle(argx);
if (argx->TypeGet() == TYP_STRUCT)
{
// For TYP_STRUCT arguments we must have an OBJ, LCL_VAR or MKREFANY
switch (actualArg->OperGet())
{
case GT_OBJ:
structSize = actualArg->AsObj()->GetLayout()->GetSize();
assert(structSize == info.compCompHnd->getClassSize(objClass));
break;
case GT_LCL_VAR:
structSize = lvaGetDesc(actualArg->AsLclVarCommon())->lvExactSize;
break;
case GT_MKREFANY:
structSize = info.compCompHnd->getClassSize(objClass);
break;
default:
BADCODE("illegal argument tree in fgInitArgInfo");
break;
}
}
else
{
structSize = genTypeSize(argx);
assert(structSize == info.compCompHnd->getClassSize(objClass));
}
}
#if defined(TARGET_AMD64)
#ifdef UNIX_AMD64_ABI
if (!isStructArg)
{
size = 1; // On AMD64, all primitives fit in a single (64-bit) 'slot'
byteSize = genTypeSize(argx);
}
else
{
size = (unsigned)(roundUp(structSize, TARGET_POINTER_SIZE)) / TARGET_POINTER_SIZE;
byteSize = structSize;
eeGetSystemVAmd64PassStructInRegisterDescriptor(objClass, &structDesc);
}
#else // !UNIX_AMD64_ABI
size = 1; // On AMD64 Windows, all args fit in a single (64-bit) 'slot'
if (!isStructArg)
{
byteSize = genTypeSize(argx);
}
#endif // UNIX_AMD64_ABI
#elif defined(TARGET_ARM64)
if (isStructArg)
{
if (isHfaArg)
{
// HFA structs are passed by value in multiple registers.
// The "size" in registers may differ the size in pointer-sized units.
CORINFO_CLASS_HANDLE structHnd = gtGetStructHandle(argx);
size = GetHfaCount(structHnd);
byteSize = info.compCompHnd->getClassSize(structHnd);
}
else
{
// Structs are either passed in 1 or 2 (64-bit) slots.
// Structs that are the size of 2 pointers are passed by value in multiple registers,
// if sufficient registers are available.
// Structs that are larger than 2 pointers (except for HFAs) are passed by
// reference (to a copy)
size = (unsigned)(roundUp(structSize, TARGET_POINTER_SIZE)) / TARGET_POINTER_SIZE;
byteSize = structSize;
if (size > 2)
{
size = 1;
}
}
// Note that there are some additional rules for multireg structs.
// (i.e they cannot be split between registers and the stack)
}
else
{
size = 1; // Otherwise, all primitive types fit in a single (64-bit) 'slot'
byteSize = genTypeSize(argx);
}
#elif defined(TARGET_ARM) || defined(TARGET_X86)
if (isStructArg)
{
size = (unsigned)(roundUp(structSize, TARGET_POINTER_SIZE)) / TARGET_POINTER_SIZE;
byteSize = structSize;
}
else
{
// The typical case.
// Long/double type argument(s) will be modified as needed in Lowering.
size = genTypeStSz(argx->gtType);
byteSize = genTypeSize(argx);
}
#else
#error Unsupported or unset target architecture
#endif // TARGET_XXX
if (isStructArg)
{
assert(argx == args->GetNode());
assert(structSize != 0);
structPassingKind howToPassStruct;
structBaseType = getArgTypeForStruct(objClass, &howToPassStruct, callIsVararg, structSize);
passStructByRef = (howToPassStruct == SPK_ByReference);
if (howToPassStruct == SPK_ByReference)
{
byteSize = TARGET_POINTER_SIZE;
}
else
{
byteSize = structSize;
}
if (howToPassStruct == SPK_PrimitiveType)
{
#ifdef TARGET_ARM
// TODO-CQ: getArgTypeForStruct should *not* return TYP_DOUBLE for a double struct,
// or for a struct of two floats. This causes the struct to be address-taken.
if (structBaseType == TYP_DOUBLE)
{
size = 2;
}
else
#endif // TARGET_ARM
{
size = 1;
}
}
else if (passStructByRef)
{
size = 1;
}
}
const var_types argType = args->GetNode()->TypeGet();
if (args->GetNode()->OperIs(GT_PUTARG_TYPE))
{
byteSize = genTypeSize(argType);
}
// The 'size' value has now must have been set. (the original value of zero is an invalid value)
assert(size != 0);
assert(byteSize != 0);
if (compMacOsArm64Abi())
{
// Arm64 Apple has a special ABI for passing small size arguments on stack,
// bytes are aligned to 1-byte, shorts to 2-byte, int/float to 4-byte, etc.
// It means passing 8 1-byte arguments on stack can take as small as 8 bytes.
argAlignBytes = eeGetArgSizeAlignment(argType, isFloatHfa);
}
//
// Figure out if the argument will be passed in a register.
//
bool isRegArg = false;
NonStandardArgKind nonStandardArgKind = NonStandardArgKind::None;
regNumber nonStdRegNum = REG_NA;
if (isRegParamType(genActualType(argx->TypeGet()))
#ifdef UNIX_AMD64_ABI
&& (!isStructArg || structDesc.passedInRegisters)
#elif defined(TARGET_X86)
|| (isStructArg && isTrivialPointerSizedStruct(objClass))
#endif
)
{
#ifdef TARGET_ARM
if (passUsingFloatRegs)
{
// First, see if it can be back-filled
if (!anyFloatStackArgs && // Is it legal to back-fill? (We haven't put any FP args on the stack yet)
(fltArgSkippedRegMask != RBM_NONE) && // Is there an available back-fill slot?
(size == 1)) // The size to back-fill is one float register
{
// Back-fill the register.
isBackFilled = true;
regMaskTP backFillBitMask = genFindLowestBit(fltArgSkippedRegMask);
fltArgSkippedRegMask &=
~backFillBitMask; // Remove the back-filled register(s) from the skipped mask
nextFltArgRegNum = genMapFloatRegNumToRegArgNum(genRegNumFromMask(backFillBitMask));
assert(nextFltArgRegNum < MAX_FLOAT_REG_ARG);
}
// Does the entire float, double, or HFA fit in the FP arg registers?
// Check if the last register needed is still in the argument register range.
isRegArg = (nextFltArgRegNum + size - 1) < MAX_FLOAT_REG_ARG;
if (!isRegArg)
{
anyFloatStackArgs = true;
}
}
else
{
isRegArg = intArgRegNum < MAX_REG_ARG;
}
#elif defined(TARGET_ARM64)
if (passUsingFloatRegs)
{
// Check if the last register needed is still in the fp argument register range.
isRegArg = (nextFltArgRegNum + (size - 1)) < MAX_FLOAT_REG_ARG;
// Do we have a HFA arg that we wanted to pass in registers, but we ran out of FP registers?
if (isHfaArg && !isRegArg)
{
// recompute the 'size' so that it represent the number of stack slots rather than the number of
// registers
//
unsigned roundupSize = (unsigned)roundUp(structSize, TARGET_POINTER_SIZE);
size = roundupSize / TARGET_POINTER_SIZE;
// We also must update fltArgRegNum so that we no longer try to
// allocate any new floating point registers for args
// This prevents us from backfilling a subsequent arg into d7
//
fltArgRegNum = MAX_FLOAT_REG_ARG;
}
}
else
{
// Check if the last register needed is still in the int argument register range.
isRegArg = (intArgRegNum + (size - 1)) < maxRegArgs;
// Did we run out of registers when we had a 16-byte struct (size===2) ?
// (i.e we only have one register remaining but we needed two registers to pass this arg)
// This prevents us from backfilling a subsequent arg into x7
//
if (!isRegArg && (size > 1))
{
// Arm64 windows native varargs allows splitting a 16 byte struct between stack
// and the last general purpose register.
if (TargetOS::IsWindows && callIsVararg)
{
// Override the decision and force a split.
isRegArg = (intArgRegNum + (size - 1)) <= maxRegArgs;
}
else
{
// We also must update intArgRegNum so that we no longer try to
// allocate any new general purpose registers for args
//
intArgRegNum = maxRegArgs;
}
}
}
#else // not TARGET_ARM or TARGET_ARM64
#if defined(UNIX_AMD64_ABI)
// Here a struct can be passed in register following the classifications of its members and size.
// Now make sure there are actually enough registers to do so.
if (isStructArg)
{
unsigned int structFloatRegs = 0;
unsigned int structIntRegs = 0;
for (unsigned int i = 0; i < structDesc.eightByteCount; i++)
{
if (structDesc.IsIntegralSlot(i))
{
structIntRegs++;
}
else if (structDesc.IsSseSlot(i))
{
structFloatRegs++;
}
}
isRegArg = ((nextFltArgRegNum + structFloatRegs) <= MAX_FLOAT_REG_ARG) &&
((intArgRegNum + structIntRegs) <= MAX_REG_ARG);
}
else
{
if (passUsingFloatRegs)
{
isRegArg = nextFltArgRegNum < MAX_FLOAT_REG_ARG;
}
else
{
isRegArg = intArgRegNum < MAX_REG_ARG;
}
}
#else // !defined(UNIX_AMD64_ABI)
isRegArg = (intArgRegNum + (size - 1)) < maxRegArgs;
#endif // !defined(UNIX_AMD64_ABI)
#endif // TARGET_ARM
}
else
{
isRegArg = false;
}
// If there are nonstandard args (outside the calling convention) they were inserted above
// and noted them in a table so we can recognize them here and build their argInfo.
//
// They should not affect the placement of any other args or stack space required.
// Example: on AMD64 R10 and R11 are used for indirect VSD (generic interface) and cookie calls.
bool isNonStandard = nonStandardArgs.Find(argx, &nonStdRegNum, &nonStandardArgKind);
if (isNonStandard)
{
isRegArg = (nonStdRegNum != REG_STK);
}
else if (call->IsTailCallViaJitHelper())
{
// We have already (before calling fgMorphArgs()) appended the 4 special args
// required by the x86 tailcall helper. These args are required to go on the
// stack. Force them to the stack here.
assert(numArgs >= 4);
if (argIndex >= numArgs - 4)
{
isRegArg = false;
}
}
// Now we know if the argument goes in registers or not and how big it is.
CLANG_FORMAT_COMMENT_ANCHOR;
#ifdef TARGET_ARM
// If we ever allocate a floating point argument to the stack, then all
// subsequent HFA/float/double arguments go on the stack.
if (!isRegArg && passUsingFloatRegs)
{
for (; fltArgRegNum < MAX_FLOAT_REG_ARG; ++fltArgRegNum)
{
fltArgSkippedRegMask |= genMapArgNumToRegMask(fltArgRegNum, TYP_FLOAT);
}
}
// If we think we're going to split a struct between integer registers and the stack, check to
// see if we've already assigned a floating-point arg to the stack.
if (isRegArg && // We decided above to use a register for the argument
!passUsingFloatRegs && // We're using integer registers
(intArgRegNum + size > MAX_REG_ARG) && // We're going to split a struct type onto registers and stack
anyFloatStackArgs) // We've already used the stack for a floating-point argument
{
isRegArg = false; // Change our mind; don't pass this struct partially in registers
// Skip the rest of the integer argument registers
for (; intArgRegNum < MAX_REG_ARG; ++intArgRegNum)
{
argSkippedRegMask |= genMapArgNumToRegMask(intArgRegNum, TYP_I_IMPL);
}
}
#endif // TARGET_ARM
// Now create the fgArgTabEntry.
fgArgTabEntry* newArgEntry;
if (isRegArg)
{
regNumber nextRegNum = REG_STK;
#if defined(UNIX_AMD64_ABI)
regNumber nextOtherRegNum = REG_STK;
unsigned int structFloatRegs = 0;
unsigned int structIntRegs = 0;
#endif // defined(UNIX_AMD64_ABI)
if (isNonStandard)
{
nextRegNum = nonStdRegNum;
}
#if defined(UNIX_AMD64_ABI)
else if (isStructArg && structDesc.passedInRegisters)
{
// It is a struct passed in registers. Assign the next available register.
assert((structDesc.eightByteCount <= 2) && "Too many eightbytes.");
regNumber* nextRegNumPtrs[2] = {&nextRegNum, &nextOtherRegNum};
for (unsigned int i = 0; i < structDesc.eightByteCount; i++)
{
if (structDesc.IsIntegralSlot(i))
{
*nextRegNumPtrs[i] = genMapIntRegArgNumToRegNum(intArgRegNum + structIntRegs);
++structIntRegs;
}
else if (structDesc.IsSseSlot(i))
{
*nextRegNumPtrs[i] = genMapFloatRegArgNumToRegNum(nextFltArgRegNum + structFloatRegs);
++structFloatRegs;
}
}
}
#endif // defined(UNIX_AMD64_ABI)
else
{
// fill in or update the argInfo table
nextRegNum = passUsingFloatRegs ? genMapFloatRegArgNumToRegNum(nextFltArgRegNum)
: genMapIntRegArgNumToRegNum(intArgRegNum);
}
#ifdef TARGET_AMD64
#ifndef UNIX_AMD64_ABI
assert(size == 1);
#endif
#endif
// This is a register argument - put it in the table
newArgEntry =
call->fgArgInfo->AddRegArg(argIndex, argx, args, nextRegNum, size, byteSize, argAlignBytes, isStructArg,
isFloatHfa, callIsVararg UNIX_AMD64_ABI_ONLY_ARG(nextOtherRegNum)
UNIX_AMD64_ABI_ONLY_ARG(structIntRegs)
UNIX_AMD64_ABI_ONLY_ARG(structFloatRegs)
UNIX_AMD64_ABI_ONLY_ARG(&structDesc));
newArgEntry->SetIsBackFilled(isBackFilled);
// Set up the next intArgRegNum and fltArgRegNum values.
if (!isBackFilled)
{
#if defined(UNIX_AMD64_ABI)
if (isStructArg)
{
// For this case, we've already set the regNums in the argTabEntry
intArgRegNum += structIntRegs;
fltArgRegNum += structFloatRegs;
}
else
#endif // defined(UNIX_AMD64_ABI)
{
if (!isNonStandard)
{
#if FEATURE_ARG_SPLIT
// Check for a split (partially enregistered) struct
if (compFeatureArgSplit() && !passUsingFloatRegs && ((intArgRegNum + size) > MAX_REG_ARG))
{
// This indicates a partial enregistration of a struct type
assert((isStructArg) || argx->OperIs(GT_FIELD_LIST) || argx->OperIsCopyBlkOp() ||
(argx->gtOper == GT_COMMA && (argx->gtFlags & GTF_ASG)));
unsigned numRegsPartial = MAX_REG_ARG - intArgRegNum;
assert((unsigned char)numRegsPartial == numRegsPartial);
call->fgArgInfo->SplitArg(argIndex, numRegsPartial, size - numRegsPartial);
}
#endif // FEATURE_ARG_SPLIT
if (passUsingFloatRegs)
{
fltArgRegNum += size;
#ifdef WINDOWS_AMD64_ABI
// Whenever we pass an integer register argument
// we skip the corresponding floating point register argument
intArgRegNum = min(intArgRegNum + size, MAX_REG_ARG);
#endif // WINDOWS_AMD64_ABI
// No supported architecture supports partial structs using float registers.
assert(fltArgRegNum <= MAX_FLOAT_REG_ARG);
}
else
{
// Increment intArgRegNum by 'size' registers
intArgRegNum += size;
#ifdef WINDOWS_AMD64_ABI
fltArgRegNum = min(fltArgRegNum + size, MAX_FLOAT_REG_ARG);
#endif // WINDOWS_AMD64_ABI
}
}
}
}
}
else // We have an argument that is not passed in a register
{
// This is a stack argument - put it in the table
newArgEntry = call->fgArgInfo->AddStkArg(argIndex, argx, args, size, byteSize, argAlignBytes, isStructArg,
isFloatHfa, callIsVararg);
#ifdef UNIX_AMD64_ABI
// TODO-Amd64-Unix-CQ: This is temporary (see also in fgMorphArgs).
if (structDesc.passedInRegisters)
{
newArgEntry->structDesc.CopyFrom(structDesc);
}
#endif
}
newArgEntry->nonStandardArgKind = nonStandardArgKind;
if (GlobalJitOptions::compFeatureHfa)
{
if (isHfaArg)
{
newArgEntry->SetHfaType(hfaType, hfaSlots);
}
}
newArgEntry->SetMultiRegNums();
noway_assert(newArgEntry != nullptr);
if (newArgEntry->isStruct)
{
newArgEntry->passedByRef = passStructByRef;
newArgEntry->argType = (structBaseType == TYP_UNKNOWN) ? argx->TypeGet() : structBaseType;
}
else
{
newArgEntry->argType = argx->TypeGet();
}
DEBUG_ARG_SLOTS_ONLY(argSlots += size;)
} // end foreach argument loop
#ifdef DEBUG
if (verbose)
{
JITDUMP("ArgTable for %d.%s after fgInitArgInfo:\n", call->gtTreeID, GenTree::OpName(call->gtOper));
call->fgArgInfo->Dump(this);
JITDUMP("\n");
}
#endif
}
//------------------------------------------------------------------------
// fgMorphArgs: Walk and transform (morph) the arguments of a call
//
// Arguments:
// callNode - the call for which we are doing the argument morphing
//
// Return Value:
// Like most morph methods, this method returns the morphed node,
// though in this case there are currently no scenarios where the
// node itself is re-created.
//
// Notes:
// This calls fgInitArgInfo to create the 'fgArgInfo' for the call.
// If it has already been created, that method will simply return.
//
// This method changes the state of the call node. It uses the existence
// of gtCallLateArgs (the late arguments list) to determine if it has
// already done the first round of morphing.
//
// The first time it is called (i.e. during global morphing), this method
// computes the "late arguments". This is when it determines which arguments
// need to be evaluated to temps prior to the main argument setup, and which
// can be directly evaluated into the argument location. It also creates a
// second argument list (gtCallLateArgs) that does the final placement of the
// arguments, e.g. into registers or onto the stack.
//
// The "non-late arguments", aka the gtCallArgs, are doing the in-order
// evaluation of the arguments that might have side-effects, such as embedded
// assignments, calls or possible throws. In these cases, it and earlier
// arguments must be evaluated to temps.
//
// On targets with a fixed outgoing argument area (FEATURE_FIXED_OUT_ARGS),
// if we have any nested calls, we need to defer the copying of the argument
// into the fixed argument area until after the call. If the argument did not
// otherwise need to be computed into a temp, it is moved to gtCallLateArgs and
// replaced in the "early" arg list (gtCallArgs) with a placeholder node.
#ifdef _PREFAST_
#pragma warning(push)
#pragma warning(disable : 21000) // Suppress PREFast warning about overly large function
#endif
GenTreeCall* Compiler::fgMorphArgs(GenTreeCall* call)
{
GenTreeCall::Use* args;
GenTree* argx;
GenTreeFlags flagsSummary = GTF_EMPTY;
unsigned argIndex = 0;
DEBUG_ARG_SLOTS_ONLY(unsigned argSlots = 0;)
bool reMorphing = call->AreArgsComplete();
// Set up the fgArgInfo.
fgInitArgInfo(call);
JITDUMP("%sMorphing args for %d.%s:\n", (reMorphing) ? "Re" : "", call->gtTreeID, GenTree::OpName(call->gtOper));
// If we are remorphing, process the late arguments (which were determined by a previous caller).
if (reMorphing)
{
for (GenTreeCall::Use& use : call->LateArgs())
{
use.SetNode(fgMorphTree(use.GetNode()));
flagsSummary |= use.GetNode()->gtFlags;
}
assert(call->fgArgInfo != nullptr);
}
call->fgArgInfo->RemorphReset();
// First we morph the argument subtrees ('this' pointer, arguments, etc.).
// During the first call to fgMorphArgs we also record the
// information about late arguments we have in 'fgArgInfo'.
// This information is used later to contruct the gtCallLateArgs */
// Process the 'this' argument value, if present.
if (call->gtCallThisArg != nullptr)
{
argx = call->gtCallThisArg->GetNode();
fgArgTabEntry* thisArgEntry = call->fgArgInfo->GetArgEntry(0, reMorphing);
argx = fgMorphTree(argx);
call->gtCallThisArg->SetNode(argx);
// This is a register argument - possibly update it in the table.
call->fgArgInfo->UpdateRegArg(thisArgEntry, argx, reMorphing);
flagsSummary |= argx->gtFlags;
if (!reMorphing && call->IsExpandedEarly() && call->IsVirtualVtable())
{
if (!argx->OperIsLocal())
{
thisArgEntry->needTmp = true;
call->fgArgInfo->SetNeedsTemps();
}
}
assert(argIndex == 0);
argIndex++;
DEBUG_ARG_SLOTS_ONLY(argSlots++;)
}
// Note that this name is a bit of a misnomer - it indicates that there are struct args
// that occupy more than a single slot that are passed by value (not necessarily in regs).
bool hasMultiregStructArgs = false;
for (args = call->gtCallArgs; args != nullptr; args = args->GetNext(), argIndex++)
{
GenTree** parentArgx = &args->NodeRef();
fgArgTabEntry* argEntry = call->fgArgInfo->GetArgEntry(argIndex, reMorphing);
// Morph the arg node, and update the parent and argEntry pointers.
argx = *parentArgx;
argx = fgMorphTree(argx);
*parentArgx = argx;
assert(argx == args->GetNode());
DEBUG_ARG_SLOTS_ONLY(unsigned size = argEntry->getSize();)
CORINFO_CLASS_HANDLE copyBlkClass = NO_CLASS_HANDLE;
#if defined(DEBUG_ARG_SLOTS)
if (!compMacOsArm64Abi())
{
if (argEntry->GetByteAlignment() == 2 * TARGET_POINTER_SIZE)
{
if (argSlots % 2 == 1)
{
argSlots++;
}
}
}
#endif // DEBUG
if (argEntry->isNonStandard() && argEntry->isPassedInRegisters())
{
// We need to update the node field for this nonStandard arg here
// as it may have been changed by the call to fgMorphTree.
call->fgArgInfo->UpdateRegArg(argEntry, argx, reMorphing);
flagsSummary |= argx->gtFlags;
continue;
}
DEBUG_ARG_SLOTS_ASSERT(size != 0);
DEBUG_ARG_SLOTS_ONLY(argSlots += argEntry->getSlotCount();)
if (argx->IsLocalAddrExpr() != nullptr)
{
argx->gtType = TYP_I_IMPL;
}
// Get information about this argument.
var_types hfaType = argEntry->GetHfaType();
bool isHfaArg = (hfaType != TYP_UNDEF);
bool passUsingFloatRegs = argEntry->isPassedInFloatRegisters();
unsigned structSize = 0;
// Struct arguments may be morphed into a node that is not a struct type.
// In such case the fgArgTabEntry keeps track of whether the original node (before morphing)
// was a struct and the struct classification.
bool isStructArg = argEntry->isStruct;
GenTree* argObj = argx->gtEffectiveVal(true /*commaOnly*/);
if (isStructArg && varTypeIsStruct(argObj) && !argObj->OperIs(GT_ASG, GT_MKREFANY, GT_FIELD_LIST, GT_ARGPLACE))
{
CORINFO_CLASS_HANDLE objClass = gtGetStructHandle(argObj);
unsigned originalSize;
if (argObj->TypeGet() == TYP_STRUCT)
{
if (argObj->OperIs(GT_OBJ))
{
// Get the size off the OBJ node.
originalSize = argObj->AsObj()->GetLayout()->GetSize();
assert(originalSize == info.compCompHnd->getClassSize(objClass));
}
else
{
// We have a BADCODE assert for this in fgInitArgInfo.
assert(argObj->OperIs(GT_LCL_VAR));
originalSize = lvaGetDesc(argObj->AsLclVarCommon())->lvExactSize;
}
}
else
{
originalSize = genTypeSize(argx);
assert(originalSize == info.compCompHnd->getClassSize(objClass));
}
unsigned roundupSize = (unsigned)roundUp(originalSize, TARGET_POINTER_SIZE);
var_types structBaseType = argEntry->argType;
// First, handle the case where the argument is passed by reference.
if (argEntry->passedByRef)
{
DEBUG_ARG_SLOTS_ASSERT(size == 1);
copyBlkClass = objClass;
#ifdef UNIX_AMD64_ABI
assert(!"Structs are not passed by reference on x64/ux");
#endif // UNIX_AMD64_ABI
}
else // This is passed by value.
{
// Check to see if we can transform this into load of a primitive type.
// 'size' must be the number of pointer sized items
DEBUG_ARG_SLOTS_ASSERT(size == roundupSize / TARGET_POINTER_SIZE);
structSize = originalSize;
unsigned passingSize = originalSize;
// Check to see if we can transform this struct load (GT_OBJ) into a GT_IND of the appropriate size.
// When it can do this is platform-dependent:
// - In general, it can be done for power of 2 structs that fit in a single register.
// - For ARM and ARM64 it must also be a non-HFA struct, or have a single field.
// - This is irrelevant for X86, since structs are always passed by value on the stack.
GenTree* lclVar = fgIsIndirOfAddrOfLocal(argObj);
bool canTransform = false;
if (structBaseType != TYP_STRUCT)
{
if (isPow2(passingSize))
{
canTransform = (!argEntry->IsHfaArg() || (passingSize == genTypeSize(argEntry->GetHfaType())));
}
#if defined(TARGET_ARM64) || defined(UNIX_AMD64_ABI)
// For ARM64 or AMD64/UX we can pass non-power-of-2 structs in a register, but we can
// only transform in that case if the arg is a local.
// TODO-CQ: This transformation should be applicable in general, not just for the ARM64
// or UNIX_AMD64_ABI cases where they will be passed in registers.
else
{
canTransform = (lclVar != nullptr);
passingSize = genTypeSize(structBaseType);
}
#endif // TARGET_ARM64 || UNIX_AMD64_ABI
}
if (!canTransform)
{
#if defined(TARGET_AMD64)
#ifndef UNIX_AMD64_ABI
// On Windows structs are always copied and passed by reference (handled above) unless they are
// passed by value in a single register.
assert(size == 1);
copyBlkClass = objClass;
#else // UNIX_AMD64_ABI
// On Unix, structs are always passed by value.
// We only need a copy if we have one of the following:
// - The sizes don't match for a non-lclVar argument.
// - We have a known struct type (e.g. SIMD) that requires multiple registers.
// TODO-Amd64-Unix-Throughput: We don't need to keep the structDesc in the argEntry if it's not
// actually passed in registers.
if (argEntry->isPassedInRegisters())
{
if (argObj->OperIs(GT_OBJ))
{
if (passingSize != structSize)
{
copyBlkClass = objClass;
}
}
else if (lclVar == nullptr)
{
// This should only be the case of a value directly producing a known struct type.
assert(argObj->TypeGet() != TYP_STRUCT);
if (argEntry->numRegs > 1)
{
copyBlkClass = objClass;
}
}
}
#endif // UNIX_AMD64_ABI
#elif defined(TARGET_ARM64)
if ((passingSize != structSize) && (lclVar == nullptr))
{
copyBlkClass = objClass;
}
#endif
#ifdef TARGET_ARM
// TODO-1stClassStructs: Unify these conditions across targets.
if (((lclVar != nullptr) &&
(lvaGetPromotionType(lclVar->AsLclVarCommon()->GetLclNum()) == PROMOTION_TYPE_INDEPENDENT)) ||
((argObj->OperIs(GT_OBJ)) && (passingSize != structSize)))
{
copyBlkClass = objClass;
}
if (structSize < TARGET_POINTER_SIZE)
{
copyBlkClass = objClass;
}
#endif // TARGET_ARM
}
else
{
// We have a struct argument that fits into a register, and it is either a power of 2,
// or a local.
// Change our argument, as needed, into a value of the appropriate type.
CLANG_FORMAT_COMMENT_ANCHOR;
#ifdef TARGET_ARM
DEBUG_ARG_SLOTS_ASSERT((size == 1) || ((structBaseType == TYP_DOUBLE) && (size == 2)));
#else
DEBUG_ARG_SLOTS_ASSERT((size == 1) || (varTypeIsSIMD(structBaseType) &&
size == (genTypeSize(structBaseType) / REGSIZE_BYTES)));
#endif
assert((structBaseType != TYP_STRUCT) && (genTypeSize(structBaseType) >= originalSize));
if (argObj->OperIs(GT_OBJ))
{
argObj->ChangeOper(GT_IND);
// Now see if we can fold *(&X) into X
if (argObj->AsOp()->gtOp1->gtOper == GT_ADDR)
{
GenTree* temp = argObj->AsOp()->gtOp1->AsOp()->gtOp1;
// Keep the DONT_CSE flag in sync
// (as the addr always marks it for its op1)
temp->gtFlags &= ~GTF_DONT_CSE;
temp->gtFlags |= (argObj->gtFlags & GTF_DONT_CSE);
DEBUG_DESTROY_NODE(argObj->AsOp()->gtOp1); // GT_ADDR
DEBUG_DESTROY_NODE(argObj); // GT_IND
argObj = temp;
*parentArgx = temp;
argx = temp;
}
}
if (argObj->gtOper == GT_LCL_VAR)
{
unsigned lclNum = argObj->AsLclVarCommon()->GetLclNum();
LclVarDsc* varDsc = lvaGetDesc(lclNum);
if (varDsc->lvPromoted)
{
if (varDsc->lvFieldCnt == 1)
{
// get the first and only promoted field
LclVarDsc* fieldVarDsc = lvaGetDesc(varDsc->lvFieldLclStart);
if (genTypeSize(fieldVarDsc->TypeGet()) >= originalSize)
{
// we will use the first and only promoted field
argObj->AsLclVarCommon()->SetLclNum(varDsc->lvFieldLclStart);
if (varTypeIsEnregisterable(fieldVarDsc->TypeGet()) &&
(genTypeSize(fieldVarDsc->TypeGet()) == originalSize))
{
// Just use the existing field's type
argObj->gtType = fieldVarDsc->TypeGet();
}
else
{
// Can't use the existing field's type, so use GT_LCL_FLD to swizzle
// to a new type
lvaSetVarDoNotEnregister(lclNum DEBUGARG(DoNotEnregisterReason::SwizzleArg));
argObj->ChangeOper(GT_LCL_FLD);
argObj->gtType = structBaseType;
}
assert(varTypeIsEnregisterable(argObj->TypeGet()));
assert(copyBlkClass == NO_CLASS_HANDLE);
}
else
{
// use GT_LCL_FLD to swizzle the single field struct to a new type
lvaSetVarDoNotEnregister(lclNum DEBUGARG(DoNotEnregisterReason::SwizzleArg));
argObj->ChangeOper(GT_LCL_FLD);
argObj->gtType = structBaseType;
}
}
else
{
// The struct fits into a single register, but it has been promoted into its
// constituent fields, and so we have to re-assemble it
copyBlkClass = objClass;
}
}
else if (genTypeSize(varDsc->TypeGet()) != genTypeSize(structBaseType))
{
// Not a promoted struct, so just swizzle the type by using GT_LCL_FLD
lvaSetVarDoNotEnregister(lclNum DEBUGARG(DoNotEnregisterReason::SwizzleArg));
argObj->ChangeOper(GT_LCL_FLD);
argObj->gtType = structBaseType;
}
}
else
{
// Not a GT_LCL_VAR, so we can just change the type on the node
argObj->gtType = structBaseType;
}
assert(varTypeIsEnregisterable(argObj->TypeGet()) ||
((copyBlkClass != NO_CLASS_HANDLE) && varTypeIsEnregisterable(structBaseType)));
}
#if !defined(UNIX_AMD64_ABI) && !defined(TARGET_ARMARCH)
// TODO-CQ-XARCH: there is no need for a temp copy if we improve our code generation in
// `genPutStructArgStk` for xarch like we did it for Arm/Arm64.
// We still have a struct unless we converted the GT_OBJ into a GT_IND above...
if (isHfaArg && passUsingFloatRegs)
{
}
else if (structBaseType == TYP_STRUCT)
{
// If the valuetype size is not a multiple of TARGET_POINTER_SIZE,
// we must copyblk to a temp before doing the obj to avoid
// the obj reading memory past the end of the valuetype
CLANG_FORMAT_COMMENT_ANCHOR;
if (roundupSize > originalSize)
{
copyBlkClass = objClass;
// There are a few special cases where we can omit using a CopyBlk
// where we normally would need to use one.
if (argObj->OperIs(GT_OBJ) &&
argObj->AsObj()->gtGetOp1()->IsLocalAddrExpr() != nullptr) // Is the source a LclVar?
{
copyBlkClass = NO_CLASS_HANDLE;
}
}
}
#endif // !UNIX_AMD64_ABI
}
}
if (argEntry->isPassedInRegisters())
{
call->fgArgInfo->UpdateRegArg(argEntry, argx, reMorphing);
}
else
{
call->fgArgInfo->UpdateStkArg(argEntry, argx, reMorphing);
}
if (copyBlkClass != NO_CLASS_HANDLE)
{
fgMakeOutgoingStructArgCopy(call, args, copyBlkClass);
}
if (argx->gtOper == GT_MKREFANY)
{
// 'Lower' the MKREFANY tree and insert it.
noway_assert(!reMorphing);
#ifdef TARGET_X86
// Build the mkrefany as a GT_FIELD_LIST
GenTreeFieldList* fieldList = new (this, GT_FIELD_LIST) GenTreeFieldList();
fieldList->AddField(this, argx->AsOp()->gtGetOp1(), OFFSETOF__CORINFO_TypedReference__dataPtr, TYP_BYREF);
fieldList->AddField(this, argx->AsOp()->gtGetOp2(), OFFSETOF__CORINFO_TypedReference__type, TYP_I_IMPL);
fgArgTabEntry* fp = gtArgEntryByNode(call, argx);
args->SetNode(fieldList);
assert(fp->GetNode() == fieldList);
#else // !TARGET_X86
// Get a new temp
// Here we don't need unsafe value cls check since the addr of temp is used only in mkrefany
unsigned tmp = lvaGrabTemp(true DEBUGARG("by-value mkrefany struct argument"));
lvaSetStruct(tmp, impGetRefAnyClass(), false);
// Build the mkrefany as a comma node:
// (tmp.ptr=argx),(tmp.type=handle)
GenTreeLclFld* destPtrSlot = gtNewLclFldNode(tmp, TYP_I_IMPL, OFFSETOF__CORINFO_TypedReference__dataPtr);
GenTreeLclFld* destTypeSlot = gtNewLclFldNode(tmp, TYP_I_IMPL, OFFSETOF__CORINFO_TypedReference__type);
destPtrSlot->SetFieldSeq(GetFieldSeqStore()->CreateSingleton(GetRefanyDataField()));
destPtrSlot->gtFlags |= GTF_VAR_DEF;
destTypeSlot->SetFieldSeq(GetFieldSeqStore()->CreateSingleton(GetRefanyTypeField()));
destTypeSlot->gtFlags |= GTF_VAR_DEF;
GenTree* asgPtrSlot = gtNewAssignNode(destPtrSlot, argx->AsOp()->gtOp1);
GenTree* asgTypeSlot = gtNewAssignNode(destTypeSlot, argx->AsOp()->gtOp2);
GenTree* asg = gtNewOperNode(GT_COMMA, TYP_VOID, asgPtrSlot, asgTypeSlot);
// Change the expression to "(tmp=val)"
args->SetNode(asg);
// EvalArgsToTemps will cause tmp to actually get loaded as the argument
call->fgArgInfo->EvalToTmp(argEntry, tmp, asg);
lvaSetVarAddrExposed(tmp DEBUGARG(AddressExposedReason::TOO_CONSERVATIVE));
#endif // !TARGET_X86
}
#if FEATURE_MULTIREG_ARGS
if (isStructArg)
{
if (((argEntry->numRegs + argEntry->GetStackSlotsNumber()) > 1) ||
(isHfaArg && argx->TypeGet() == TYP_STRUCT))
{
hasMultiregStructArgs = true;
}
}
#ifdef TARGET_ARM
else if ((argEntry->argType == TYP_LONG) || (argEntry->argType == TYP_DOUBLE))
{
assert((argEntry->numRegs == 2) || (argEntry->numSlots == 2));
}
#endif
else
{
// We must have exactly one register or slot.
assert(((argEntry->numRegs == 1) && (argEntry->GetStackSlotsNumber() == 0)) ||
((argEntry->numRegs == 0) && (argEntry->GetStackSlotsNumber() == 1)));
}
#endif
#if defined(TARGET_X86)
if (isStructArg)
{
GenTree* lclNode = argx->OperIs(GT_LCL_VAR) ? argx : fgIsIndirOfAddrOfLocal(argx);
if ((lclNode != nullptr) &&
(lvaGetPromotionType(lclNode->AsLclVarCommon()->GetLclNum()) == Compiler::PROMOTION_TYPE_INDEPENDENT))
{
// Make a GT_FIELD_LIST of the field lclVars.
GenTreeLclVarCommon* lcl = lclNode->AsLclVarCommon();
LclVarDsc* varDsc = lvaGetDesc(lcl);
GenTreeFieldList* fieldList = new (this, GT_FIELD_LIST) GenTreeFieldList();
fgArgTabEntry* fp = gtArgEntryByNode(call, argx);
args->SetNode(fieldList);
assert(fp->GetNode() == fieldList);
for (unsigned fieldLclNum = varDsc->lvFieldLclStart;
fieldLclNum < varDsc->lvFieldLclStart + varDsc->lvFieldCnt; ++fieldLclNum)
{
LclVarDsc* fieldVarDsc = lvaGetDesc(fieldLclNum);
GenTree* fieldLcl;
if (fieldLclNum == varDsc->lvFieldLclStart)
{
lcl->SetLclNum(fieldLclNum);
lcl->SetOperResetFlags(GT_LCL_VAR);
lcl->gtType = fieldVarDsc->TypeGet();
fieldLcl = lcl;
}
else
{
fieldLcl = gtNewLclvNode(fieldLclNum, fieldVarDsc->TypeGet());
}
fieldList->AddField(this, fieldLcl, fieldVarDsc->lvFldOffset, fieldVarDsc->TypeGet());
}
}
}
#endif // TARGET_X86
flagsSummary |= args->GetNode()->gtFlags;
} // end foreach argument loop
if (!reMorphing)
{
call->fgArgInfo->ArgsComplete();
}
/* Process the function address, if indirect call */
if (call->gtCallType == CT_INDIRECT)
{
call->gtCallAddr = fgMorphTree(call->gtCallAddr);
// Const CSE may create an assignment node here
flagsSummary |= call->gtCallAddr->gtFlags;
}
#if FEATURE_FIXED_OUT_ARGS
// Record the outgoing argument size. If the call is a fast tail
// call, it will setup its arguments in incoming arg area instead
// of the out-going arg area, so we don't need to track the
// outgoing arg size.
if (!call->IsFastTailCall())
{
#if defined(UNIX_AMD64_ABI)
// This is currently required for the UNIX ABI to work correctly.
opts.compNeedToAlignFrame = true;
#endif // UNIX_AMD64_ABI
const unsigned outgoingArgSpaceSize = GetOutgoingArgByteSize(call->fgArgInfo->GetNextSlotByteOffset());
#if defined(DEBUG_ARG_SLOTS)
unsigned preallocatedArgCount = 0;
if (!compMacOsArm64Abi())
{
preallocatedArgCount = call->fgArgInfo->GetNextSlotNum();
assert(outgoingArgSpaceSize == preallocatedArgCount * REGSIZE_BYTES);
}
#endif
call->fgArgInfo->SetOutArgSize(max(outgoingArgSpaceSize, MIN_ARG_AREA_FOR_CALL));
#ifdef DEBUG
if (verbose)
{
const fgArgInfo* argInfo = call->fgArgInfo;
#if defined(DEBUG_ARG_SLOTS)
if (!compMacOsArm64Abi())
{
printf("argSlots=%d, preallocatedArgCount=%d, nextSlotNum=%d, nextSlotByteOffset=%d, "
"outgoingArgSpaceSize=%d\n",
argSlots, preallocatedArgCount, argInfo->GetNextSlotNum(), argInfo->GetNextSlotByteOffset(),
outgoingArgSpaceSize);
}
else
{
printf("nextSlotByteOffset=%d, outgoingArgSpaceSize=%d\n", argInfo->GetNextSlotByteOffset(),
outgoingArgSpaceSize);
}
#else
printf("nextSlotByteOffset=%d, outgoingArgSpaceSize=%d\n", argInfo->GetNextSlotByteOffset(),
outgoingArgSpaceSize);
#endif
}
#endif
}
#endif // FEATURE_FIXED_OUT_ARGS
// Clear the ASG and EXCEPT (if possible) flags on the call node
call->gtFlags &= ~GTF_ASG;
if (!call->OperMayThrow(this))
{
call->gtFlags &= ~GTF_EXCEPT;
}
// Union in the side effect flags from the call's operands
call->gtFlags |= flagsSummary & GTF_ALL_EFFECT;
// If we are remorphing or don't have any register arguments or other arguments that need
// temps, then we don't need to call SortArgs() and EvalArgsToTemps().
//
if (!reMorphing && (call->fgArgInfo->HasRegArgs() || call->fgArgInfo->NeedsTemps()))
{
// Do the 'defer or eval to temp' analysis.
call->fgArgInfo->SortArgs();
call->fgArgInfo->EvalArgsToTemps();
}
if (hasMultiregStructArgs)
{
fgMorphMultiregStructArgs(call);
}
#ifdef DEBUG
if (verbose)
{
JITDUMP("ArgTable for %d.%s after fgMorphArgs:\n", call->gtTreeID, GenTree::OpName(call->gtOper));
call->fgArgInfo->Dump(this);
JITDUMP("\n");
}
#endif
return call;
}
#ifdef _PREFAST_
#pragma warning(pop)
#endif
//-----------------------------------------------------------------------------
// fgMorphMultiregStructArgs: Locate the TYP_STRUCT arguments and
// call fgMorphMultiregStructArg on each of them.
//
// Arguments:
// call : a GenTreeCall node that has one or more TYP_STRUCT arguments\.
//
// Notes:
// We only call fgMorphMultiregStructArg for struct arguments that are not passed as simple types.
// It will ensure that the struct arguments are in the correct form.
// If this method fails to find any TYP_STRUCT arguments it will assert.
//
void Compiler::fgMorphMultiregStructArgs(GenTreeCall* call)
{
bool foundStructArg = false;
GenTreeFlags flagsSummary = GTF_EMPTY;
#ifdef TARGET_X86
assert(!"Logic error: no MultiregStructArgs for X86");
#endif
#if defined(TARGET_AMD64) && !defined(UNIX_AMD64_ABI)
assert(!"Logic error: no MultiregStructArgs for Windows X64 ABI");
#endif
for (GenTreeCall::Use& use : call->Args())
{
// For late arguments the arg tree that is overridden is in the gtCallLateArgs list.
// For such late args the gtCallArgList contains the setup arg node (evaluating the arg.)
// The tree from the gtCallLateArgs list is passed to the callee. The fgArgEntry node contains the mapping
// between the nodes in both lists. If the arg is not a late arg, the fgArgEntry->node points to itself,
// otherwise points to the list in the late args list.
bool isLateArg = (use.GetNode()->gtFlags & GTF_LATE_ARG) != 0;
fgArgTabEntry* fgEntryPtr = gtArgEntryByNode(call, use.GetNode());
assert(fgEntryPtr != nullptr);
GenTree* argx = fgEntryPtr->GetNode();
GenTreeCall::Use* lateUse = nullptr;
GenTree* lateNode = nullptr;
if (isLateArg)
{
for (GenTreeCall::Use& lateArgUse : call->LateArgs())
{
GenTree* argNode = lateArgUse.GetNode();
if (argx == argNode)
{
lateUse = &lateArgUse;
lateNode = argNode;
break;
}
}
assert((lateUse != nullptr) && (lateNode != nullptr));
}
if (!fgEntryPtr->isStruct)
{
continue;
}
unsigned size = (fgEntryPtr->numRegs + fgEntryPtr->GetStackSlotsNumber());
if ((size > 1) || (fgEntryPtr->IsHfaArg() && argx->TypeGet() == TYP_STRUCT))
{
foundStructArg = true;
if (varTypeIsStruct(argx) && !argx->OperIs(GT_FIELD_LIST))
{
if (fgEntryPtr->IsHfaRegArg())
{
var_types hfaType = fgEntryPtr->GetHfaType();
unsigned structSize;
if (argx->OperIs(GT_OBJ))
{
structSize = argx->AsObj()->GetLayout()->GetSize();
}
else if (varTypeIsSIMD(argx))
{
structSize = genTypeSize(argx);
}
else
{
assert(argx->OperIs(GT_LCL_VAR));
structSize = lvaGetDesc(argx->AsLclVar())->lvExactSize;
}
assert(structSize > 0);
if (structSize == genTypeSize(hfaType))
{
if (argx->OperIs(GT_OBJ))
{
argx->SetOper(GT_IND);
}
argx->gtType = hfaType;
}
}
GenTree* newArgx = fgMorphMultiregStructArg(argx, fgEntryPtr);
// Did we replace 'argx' with a new tree?
if (newArgx != argx)
{
// link the new arg node into either the late arg list or the gtCallArgs list
if (isLateArg)
{
lateUse->SetNode(newArgx);
}
else
{
use.SetNode(newArgx);
}
assert(fgEntryPtr->GetNode() == newArgx);
}
}
}
}
// We should only call this method when we actually have one or more multireg struct args
assert(foundStructArg);
// Update the flags
call->gtFlags |= (flagsSummary & GTF_ALL_EFFECT);
}
//-----------------------------------------------------------------------------
// fgMorphMultiregStructArg: Given a TYP_STRUCT arg from a call argument list,
// morph the argument as needed to be passed correctly.
//
// Arguments:
// arg - A GenTree node containing a TYP_STRUCT arg
// fgEntryPtr - the fgArgTabEntry information for the current 'arg'
//
// Notes:
// The arg must be a GT_OBJ or GT_LCL_VAR or GT_LCL_FLD of TYP_STRUCT.
// If 'arg' is a lclVar passed on the stack, we will ensure that any lclVars that must be on the
// stack are marked as doNotEnregister, and then we return.
//
// If it is passed by register, we mutate the argument into the GT_FIELD_LIST form
// which is only used for struct arguments.
//
// If arg is a LclVar we check if it is struct promoted and has the right number of fields
// and if they are at the appropriate offsets we will use the struct promted fields
// in the GT_FIELD_LIST nodes that we create.
// If we have a GT_LCL_VAR that isn't struct promoted or doesn't meet the requirements
// we will use a set of GT_LCL_FLDs nodes to access the various portions of the struct
// this also forces the struct to be stack allocated into the local frame.
// For the GT_OBJ case will clone the address expression and generate two (or more)
// indirections.
// Currently the implementation handles ARM64/ARM and will NYI for other architectures.
//
GenTree* Compiler::fgMorphMultiregStructArg(GenTree* arg, fgArgTabEntry* fgEntryPtr)
{
assert(varTypeIsStruct(arg->TypeGet()));
#if !defined(TARGET_ARMARCH) && !defined(UNIX_AMD64_ABI)
NYI("fgMorphMultiregStructArg requires implementation for this target");
#endif
#ifdef TARGET_ARM
if ((fgEntryPtr->IsSplit() && fgEntryPtr->GetStackSlotsNumber() + fgEntryPtr->numRegs > 4) ||
(!fgEntryPtr->IsSplit() && fgEntryPtr->GetRegNum() == REG_STK))
#else
if (fgEntryPtr->GetRegNum() == REG_STK)
#endif
{
GenTreeLclVarCommon* lcl = nullptr;
GenTree* actualArg = arg->gtEffectiveVal();
if (actualArg->OperGet() == GT_OBJ)
{
if (actualArg->gtGetOp1()->OperIs(GT_ADDR) && actualArg->gtGetOp1()->gtGetOp1()->OperIs(GT_LCL_VAR))
{
lcl = actualArg->gtGetOp1()->gtGetOp1()->AsLclVarCommon();
}
}
else if (actualArg->OperGet() == GT_LCL_VAR)
{
lcl = actualArg->AsLclVarCommon();
}
if (lcl != nullptr)
{
if (lvaGetPromotionType(lcl->GetLclNum()) == PROMOTION_TYPE_INDEPENDENT)
{
arg = fgMorphLclArgToFieldlist(lcl);
}
else if (arg->TypeGet() == TYP_STRUCT)
{
// If this is a non-register struct, it must be referenced from memory.
if (!actualArg->OperIs(GT_OBJ))
{
// Create an Obj of the temp to use it as a call argument.
arg = gtNewOperNode(GT_ADDR, TYP_I_IMPL, arg);
arg = gtNewObjNode(lvaGetStruct(lcl->GetLclNum()), arg);
}
// Its fields will need to be accessed by address.
lvaSetVarDoNotEnregister(lcl->GetLclNum() DEBUG_ARG(DoNotEnregisterReason::IsStructArg));
}
}
return arg;
}
#if FEATURE_MULTIREG_ARGS
// Examine 'arg' and setup argValue objClass and structSize
//
const CORINFO_CLASS_HANDLE objClass = gtGetStructHandle(arg);
GenTree* argValue = arg; // normally argValue will be arg, but see right below
unsigned structSize = 0;
if (arg->TypeGet() != TYP_STRUCT)
{
structSize = genTypeSize(arg->TypeGet());
assert(structSize == info.compCompHnd->getClassSize(objClass));
}
else if (arg->OperGet() == GT_OBJ)
{
GenTreeObj* argObj = arg->AsObj();
const ClassLayout* objLayout = argObj->GetLayout();
structSize = objLayout->GetSize();
assert(structSize == info.compCompHnd->getClassSize(objClass));
// If we have a GT_OBJ of a GT_ADDR then we set argValue to the child node of the GT_ADDR.
GenTree* op1 = argObj->gtOp1;
if (op1->OperGet() == GT_ADDR)
{
GenTree* underlyingTree = op1->AsOp()->gtOp1;
// Only update to the same type.
if (underlyingTree->OperIs(GT_LCL_VAR))
{
const LclVarDsc* varDsc = lvaGetDesc(underlyingTree->AsLclVar());
if (ClassLayout::AreCompatible(varDsc->GetLayout(), objLayout))
{
argValue = underlyingTree;
}
}
}
}
else if (arg->OperGet() == GT_LCL_VAR)
{
LclVarDsc* varDsc = lvaGetDesc(arg->AsLclVarCommon());
structSize = varDsc->lvExactSize;
assert(structSize == info.compCompHnd->getClassSize(objClass));
}
else
{
structSize = info.compCompHnd->getClassSize(objClass);
}
var_types hfaType = TYP_UNDEF;
var_types elemType = TYP_UNDEF;
unsigned elemCount = 0;
unsigned elemSize = 0;
var_types type[MAX_ARG_REG_COUNT] = {}; // TYP_UNDEF = 0
hfaType = fgEntryPtr->GetHfaType();
if (varTypeIsValidHfaType(hfaType) && fgEntryPtr->isPassedInFloatRegisters())
{
elemType = hfaType;
elemSize = genTypeSize(elemType);
elemCount = structSize / elemSize;
assert(elemSize * elemCount == structSize);
for (unsigned inx = 0; inx < elemCount; inx++)
{
type[inx] = elemType;
}
}
else
{
assert(structSize <= MAX_ARG_REG_COUNT * TARGET_POINTER_SIZE);
BYTE gcPtrs[MAX_ARG_REG_COUNT];
elemCount = roundUp(structSize, TARGET_POINTER_SIZE) / TARGET_POINTER_SIZE;
info.compCompHnd->getClassGClayout(objClass, &gcPtrs[0]);
for (unsigned inx = 0; inx < elemCount; inx++)
{
#ifdef UNIX_AMD64_ABI
if (gcPtrs[inx] == TYPE_GC_NONE)
{
type[inx] = GetTypeFromClassificationAndSizes(fgEntryPtr->structDesc.eightByteClassifications[inx],
fgEntryPtr->structDesc.eightByteSizes[inx]);
}
else
#endif // UNIX_AMD64_ABI
{
type[inx] = getJitGCType(gcPtrs[inx]);
}
}
#ifndef UNIX_AMD64_ABI
if ((argValue->OperGet() == GT_LCL_FLD) || (argValue->OperGet() == GT_LCL_VAR))
{
elemSize = TARGET_POINTER_SIZE;
// We can safely widen this to aligned bytes since we are loading from
// a GT_LCL_VAR or a GT_LCL_FLD which is properly padded and
// lives in the stack frame or will be a promoted field.
//
structSize = elemCount * TARGET_POINTER_SIZE;
}
else // we must have a GT_OBJ
{
assert(argValue->OperGet() == GT_OBJ);
// We need to load the struct from an arbitrary address
// and we can't read past the end of the structSize
// We adjust the last load type here
//
unsigned remainingBytes = structSize % TARGET_POINTER_SIZE;
unsigned lastElem = elemCount - 1;
if (remainingBytes != 0)
{
switch (remainingBytes)
{
case 1:
type[lastElem] = TYP_BYTE;
break;
case 2:
type[lastElem] = TYP_SHORT;
break;
#if defined(TARGET_ARM64) || defined(UNIX_AMD64_ABI)
case 4:
type[lastElem] = TYP_INT;
break;
#endif // (TARGET_ARM64) || (UNIX_AMD64_ABI)
default:
noway_assert(!"NYI: odd sized struct in fgMorphMultiregStructArg");
break;
}
}
}
#endif // !UNIX_AMD64_ABI
}
// We should still have a TYP_STRUCT
assert(varTypeIsStruct(argValue->TypeGet()));
GenTreeFieldList* newArg = nullptr;
// Are we passing a struct LclVar?
//
if (argValue->OperGet() == GT_LCL_VAR)
{
GenTreeLclVarCommon* varNode = argValue->AsLclVarCommon();
unsigned varNum = varNode->GetLclNum();
LclVarDsc* varDsc = lvaGetDesc(varNum);
// At this point any TYP_STRUCT LclVar must be an aligned struct
// or an HFA struct, both which are passed by value.
//
assert((varDsc->lvSize() == elemCount * TARGET_POINTER_SIZE) || varDsc->lvIsHfa());
varDsc->lvIsMultiRegArg = true;
#ifdef DEBUG
if (verbose)
{
JITDUMP("Multireg struct argument V%02u : ", varNum);
fgEntryPtr->Dump();
}
#endif // DEBUG
#ifndef UNIX_AMD64_ABI
// This local variable must match the layout of the 'objClass' type exactly
if (varDsc->lvIsHfa() && fgEntryPtr->isPassedInFloatRegisters())
{
// We have a HFA struct.
noway_assert(elemType == varDsc->GetHfaType());
noway_assert(elemSize == genTypeSize(elemType));
noway_assert(elemCount == (varDsc->lvExactSize / elemSize));
noway_assert(elemSize * elemCount == varDsc->lvExactSize);
for (unsigned inx = 0; (inx < elemCount); inx++)
{
noway_assert(type[inx] == elemType);
}
}
else
{
#if defined(TARGET_ARM64)
// We must have a 16-byte struct (non-HFA)
noway_assert(elemCount == 2);
#elif defined(TARGET_ARM)
noway_assert(elemCount <= 4);
#endif
for (unsigned inx = 0; inx < elemCount; inx++)
{
var_types currentGcLayoutType = varDsc->GetLayout()->GetGCPtrType(inx);
// We setup the type[inx] value above using the GC info from 'objClass'
// This GT_LCL_VAR must have the same GC layout info
//
if (varTypeIsGC(currentGcLayoutType))
{
noway_assert(type[inx] == currentGcLayoutType);
}
else
{
// We may have use a small type when we setup the type[inx] values above
// We can safely widen this to TYP_I_IMPL
type[inx] = TYP_I_IMPL;
}
}
}
if (varDsc->lvPromoted && varDsc->lvIsHfa() && fgEntryPtr->isPassedInFloatRegisters())
{
bool canMorphToFieldList = true;
for (unsigned fldOffset = 0; fldOffset < varDsc->lvExactSize; fldOffset += elemSize)
{
const unsigned fldVarNum = lvaGetFieldLocal(varDsc, fldOffset);
if ((fldVarNum == BAD_VAR_NUM) || !varTypeUsesFloatReg(lvaGetDesc(fldVarNum)))
{
canMorphToFieldList = false;
break;
}
}
if (canMorphToFieldList)
{
newArg = fgMorphLclArgToFieldlist(varNode);
}
}
else
#endif // !UNIX_AMD64_ABI
#if defined(TARGET_ARM64) || defined(UNIX_AMD64_ABI)
// Is this LclVar a promoted struct with exactly 2 fields?
if (varDsc->lvPromoted && (varDsc->lvFieldCnt == 2) && !varDsc->lvIsHfa())
{
// See if we have two promoted fields that start at offset 0 and 8?
unsigned loVarNum = lvaGetFieldLocal(varDsc, 0);
unsigned hiVarNum = lvaGetFieldLocal(varDsc, TARGET_POINTER_SIZE);
// Did we find the promoted fields at the necessary offsets?
if ((loVarNum != BAD_VAR_NUM) && (hiVarNum != BAD_VAR_NUM))
{
LclVarDsc* loVarDsc = lvaGetDesc(loVarNum);
LclVarDsc* hiVarDsc = lvaGetDesc(hiVarNum);
var_types loType = loVarDsc->lvType;
var_types hiType = hiVarDsc->lvType;
if ((varTypeIsFloating(loType) != genIsValidFloatReg(fgEntryPtr->GetRegNum(0))) ||
(varTypeIsFloating(hiType) != genIsValidFloatReg(fgEntryPtr->GetRegNum(1))))
{
// TODO-LSRA - It currently doesn't support the passing of floating point LCL_VARS in the integer
// registers. So for now we will use GT_LCLFLD's to pass this struct (it won't be enregistered)
//
JITDUMP("Multireg struct V%02u will be passed using GT_LCLFLD because it has float fields.\n",
varNum);
//
// we call lvaSetVarDoNotEnregister and do the proper transformation below.
//
}
else
{
// We can use the struct promoted field as the two arguments
// Create a new tree for 'arg'
// replace the existing LDOBJ(ADDR(LCLVAR))
// with a FIELD_LIST(LCLVAR-LO, FIELD_LIST(LCLVAR-HI, nullptr))
//
newArg = new (this, GT_FIELD_LIST) GenTreeFieldList();
newArg->AddField(this, gtNewLclvNode(loVarNum, loType), 0, loType);
newArg->AddField(this, gtNewLclvNode(hiVarNum, hiType), TARGET_POINTER_SIZE, hiType);
}
}
}
else
{
//
// We will create a list of GT_LCL_FLDs nodes to pass this struct
//
lvaSetVarDoNotEnregister(varNum DEBUG_ARG(DoNotEnregisterReason::LocalField));
}
#elif defined(TARGET_ARM)
// Is this LclVar a promoted struct with exactly same size?
if (varDsc->lvPromoted && (varDsc->lvFieldCnt == elemCount) && !varDsc->lvIsHfa())
{
// See if we have promoted fields?
unsigned varNums[4];
bool hasBadVarNum = false;
for (unsigned inx = 0; inx < elemCount; inx++)
{
varNums[inx] = lvaGetFieldLocal(varDsc, TARGET_POINTER_SIZE * inx);
if (varNums[inx] == BAD_VAR_NUM)
{
hasBadVarNum = true;
break;
}
}
// Did we find the promoted fields at the necessary offsets?
if (!hasBadVarNum)
{
LclVarDsc* varDscs[4];
var_types varType[4];
bool varIsFloat = false;
for (unsigned inx = 0; inx < elemCount; inx++)
{
varDscs[inx] = lvaGetDesc(varNums[inx]);
varType[inx] = varDscs[inx]->lvType;
if (varTypeIsFloating(varType[inx]))
{
// TODO-LSRA - It currently doesn't support the passing of floating point LCL_VARS in the
// integer
// registers. So for now we will use GT_LCLFLD's to pass this struct (it won't be enregistered)
//
JITDUMP("Multireg struct V%02u will be passed using GT_LCLFLD because it has float fields.\n",
varNum);
//
// we call lvaSetVarDoNotEnregister and do the proper transformation below.
//
varIsFloat = true;
break;
}
}
if (!varIsFloat)
{
newArg = fgMorphLclArgToFieldlist(varNode);
}
}
}
else
{
//
// We will create a list of GT_LCL_FLDs nodes to pass this struct
//
lvaSetVarDoNotEnregister(varNum DEBUG_ARG(DoNotEnregisterReason::LocalField));
}
#endif // TARGET_ARM
}
// If we didn't set newarg to a new List Node tree
//
if (newArg == nullptr)
{
if (fgEntryPtr->GetRegNum() == REG_STK)
{
// We leave this stack passed argument alone
return arg;
}
// Are we passing a GT_LCL_FLD (or a GT_LCL_VAR that was not struct promoted )
// A GT_LCL_FLD could also contain a 16-byte struct or HFA struct inside it?
//
if ((argValue->OperGet() == GT_LCL_FLD) || (argValue->OperGet() == GT_LCL_VAR))
{
GenTreeLclVarCommon* varNode = argValue->AsLclVarCommon();
unsigned varNum = varNode->GetLclNum();
LclVarDsc* varDsc = lvaGetDesc(varNum);
unsigned baseOffset = varNode->GetLclOffs();
unsigned lastOffset = baseOffset + structSize;
// The allocated size of our LocalVar must be at least as big as lastOffset
assert(varDsc->lvSize() >= lastOffset);
if (varDsc->HasGCPtr())
{
// alignment of the baseOffset is required
noway_assert((baseOffset % TARGET_POINTER_SIZE) == 0);
#ifndef UNIX_AMD64_ABI
noway_assert(elemSize == TARGET_POINTER_SIZE);
#endif
unsigned baseIndex = baseOffset / TARGET_POINTER_SIZE;
ClassLayout* layout = varDsc->GetLayout();
for (unsigned inx = 0; (inx < elemCount); inx++)
{
// The GC information must match what we setup using 'objClass'
if (layout->IsGCPtr(baseIndex + inx) || varTypeGCtype(type[inx]))
{
noway_assert(type[inx] == layout->GetGCPtrType(baseIndex + inx));
}
}
}
else // this varDsc contains no GC pointers
{
for (unsigned inx = 0; inx < elemCount; inx++)
{
// The GC information must match what we setup using 'objClass'
noway_assert(!varTypeIsGC(type[inx]));
}
}
//
// We create a list of GT_LCL_FLDs nodes to pass this struct
//
lvaSetVarDoNotEnregister(varNum DEBUG_ARG(DoNotEnregisterReason::LocalField));
// Create a new tree for 'arg'
// replace the existing LDOBJ(ADDR(LCLVAR))
// with a FIELD_LIST(LCLFLD-LO, LCLFLD-HI)
//
unsigned offset = baseOffset;
newArg = new (this, GT_FIELD_LIST) GenTreeFieldList();
for (unsigned inx = 0; inx < elemCount; inx++)
{
GenTree* nextLclFld = gtNewLclFldNode(varNum, type[inx], offset);
newArg->AddField(this, nextLclFld, offset, type[inx]);
offset += genTypeSize(type[inx]);
}
}
// Are we passing a GT_OBJ struct?
//
else if (argValue->OperGet() == GT_OBJ)
{
GenTreeObj* argObj = argValue->AsObj();
GenTree* baseAddr = argObj->gtOp1;
var_types addrType = baseAddr->TypeGet();
if (baseAddr->OperGet() == GT_ADDR)
{
GenTree* addrTaken = baseAddr->AsOp()->gtOp1;
if (addrTaken->IsLocal())
{
GenTreeLclVarCommon* varNode = addrTaken->AsLclVarCommon();
unsigned varNum = varNode->GetLclNum();
// We access non-struct type (for example, long) as a struct type.
// Make sure lclVar lives on stack to make sure its fields are accessible by address.
lvaSetVarDoNotEnregister(varNum DEBUGARG(DoNotEnregisterReason::LocalField));
}
}
// Create a new tree for 'arg'
// replace the existing LDOBJ(EXPR)
// with a FIELD_LIST(IND(EXPR), FIELD_LIST(IND(EXPR+8), nullptr) ...)
//
newArg = new (this, GT_FIELD_LIST) GenTreeFieldList();
unsigned offset = 0;
for (unsigned inx = 0; inx < elemCount; inx++)
{
GenTree* curAddr = baseAddr;
if (offset != 0)
{
GenTree* baseAddrDup = gtCloneExpr(baseAddr);
noway_assert(baseAddrDup != nullptr);
curAddr = gtNewOperNode(GT_ADD, addrType, baseAddrDup, gtNewIconNode(offset, TYP_I_IMPL));
}
else
{
curAddr = baseAddr;
}
GenTree* curItem = gtNewIndir(type[inx], curAddr);
// For safety all GT_IND should have at least GT_GLOB_REF set.
curItem->gtFlags |= GTF_GLOB_REF;
newArg->AddField(this, curItem, offset, type[inx]);
offset += genTypeSize(type[inx]);
}
}
}
#ifdef DEBUG
// If we reach here we should have set newArg to something
if (newArg == nullptr)
{
gtDispTree(argValue);
assert(!"Missing case in fgMorphMultiregStructArg");
}
#endif
noway_assert(newArg != nullptr);
#ifdef DEBUG
if (verbose)
{
printf("fgMorphMultiregStructArg created tree:\n");
gtDispTree(newArg);
}
#endif
arg = newArg; // consider calling fgMorphTree(newArg);
#endif // FEATURE_MULTIREG_ARGS
return arg;
}
//------------------------------------------------------------------------
// fgMorphLclArgToFieldlist: Morph a GT_LCL_VAR node to a GT_FIELD_LIST of its promoted fields
//
// Arguments:
// lcl - The GT_LCL_VAR node we will transform
//
// Return value:
// The new GT_FIELD_LIST that we have created.
//
GenTreeFieldList* Compiler::fgMorphLclArgToFieldlist(GenTreeLclVarCommon* lcl)
{
LclVarDsc* varDsc = lvaGetDesc(lcl);
assert(varDsc->lvPromoted);
unsigned fieldCount = varDsc->lvFieldCnt;
unsigned fieldLclNum = varDsc->lvFieldLclStart;
GenTreeFieldList* fieldList = new (this, GT_FIELD_LIST) GenTreeFieldList();
for (unsigned i = 0; i < fieldCount; i++)
{
LclVarDsc* fieldVarDsc = lvaGetDesc(fieldLclNum);
GenTree* lclVar = gtNewLclvNode(fieldLclNum, fieldVarDsc->TypeGet());
fieldList->AddField(this, lclVar, fieldVarDsc->lvFldOffset, fieldVarDsc->TypeGet());
fieldLclNum++;
}
return fieldList;
}
//------------------------------------------------------------------------
// fgMakeOutgoingStructArgCopy: make a copy of a struct variable if necessary,
// to pass to a callee.
//
// Arguments:
// call - call being processed
// args - args for the call
// copyBlkClass - class handle for the struct
//
// The arg is updated if necessary with the copy.
//
void Compiler::fgMakeOutgoingStructArgCopy(GenTreeCall* call, GenTreeCall::Use* args, CORINFO_CLASS_HANDLE copyBlkClass)
{
GenTree* argx = args->GetNode();
noway_assert(argx->gtOper != GT_MKREFANY);
fgArgTabEntry* argEntry = Compiler::gtArgEntryByNode(call, argx);
// If we're optimizing, see if we can avoid making a copy.
//
// We don't need a copy if this is the last use of an implicit by-ref local.
//
if (opts.OptimizationEnabled())
{
GenTreeLclVar* const lcl = argx->IsImplicitByrefParameterValue(this);
if (lcl != nullptr)
{
const unsigned varNum = lcl->GetLclNum();
LclVarDsc* const varDsc = lvaGetDesc(varNum);
const unsigned short totalAppearances = varDsc->lvRefCnt(RCS_EARLY);
// We don't have liveness so we rely on other indications of last use.
//
// We handle these cases:
//
// * (must not copy) If the call is a tail call, the use is a last use.
// We must skip the copy if we have a fast tail call.
//
// * (may not copy) if the call is noreturn, the use is a last use.
// We also check for just one reference here as we are not doing
// alias analysis of the call's parameters, or checking if the call
// site is not within some try region.
//
// * (may not copy) if there is exactly one use of the local in the method,
// and the call is not in loop, this is a last use.
//
// fgMightHaveLoop() is expensive; check it last, only if necessary.
//
if (call->IsTailCall() || //
((totalAppearances == 1) && call->IsNoReturn()) || //
((totalAppearances == 1) && !fgMightHaveLoop()))
{
args->SetNode(lcl);
assert(argEntry->GetNode() == lcl);
JITDUMP("did not need to make outgoing copy for last use of implicit byref V%2d\n", varNum);
return;
}
}
}
JITDUMP("making an outgoing copy for struct arg\n");
if (fgOutgoingArgTemps == nullptr)
{
fgOutgoingArgTemps = hashBv::Create(this);
}
unsigned tmp = 0;
bool found = false;
// Attempt to find a local we have already used for an outgoing struct and reuse it.
// We do not reuse within a statement.
if (!opts.MinOpts())
{
indexType lclNum;
FOREACH_HBV_BIT_SET(lclNum, fgOutgoingArgTemps)
{
LclVarDsc* varDsc = lvaGetDesc((unsigned)lclNum);
if (typeInfo::AreEquivalent(varDsc->lvVerTypeInfo, typeInfo(TI_STRUCT, copyBlkClass)) &&
!fgCurrentlyInUseArgTemps->testBit(lclNum))
{
tmp = (unsigned)lclNum;
found = true;
JITDUMP("reusing outgoing struct arg");
break;
}
}
NEXT_HBV_BIT_SET;
}
// Create the CopyBlk tree and insert it.
if (!found)
{
// Get a new temp
// Here We don't need unsafe value cls check, since the addr of this temp is used only in copyblk.
tmp = lvaGrabTemp(true DEBUGARG("by-value struct argument"));
lvaSetStruct(tmp, copyBlkClass, false);
if (call->IsVarargs())
{
lvaSetStructUsedAsVarArg(tmp);
}
fgOutgoingArgTemps->setBit(tmp);
}
fgCurrentlyInUseArgTemps->setBit(tmp);
// TYP_SIMD structs should not be enregistered, since ABI requires it to be
// allocated on stack and address of it needs to be passed.
if (lclVarIsSIMDType(tmp))
{
// TODO: check if we need this block here or other parts already deal with it.
lvaSetVarDoNotEnregister(tmp DEBUGARG(DoNotEnregisterReason::IsStructArg));
}
// Create a reference to the temp
GenTree* dest = gtNewLclvNode(tmp, lvaTable[tmp].lvType);
dest->gtFlags |= (GTF_DONT_CSE | GTF_VAR_DEF); // This is a def of the local, "entire" by construction.
// Copy the valuetype to the temp
GenTree* copyBlk = gtNewBlkOpNode(dest, argx, false /* not volatile */, true /* copyBlock */);
copyBlk = fgMorphCopyBlock(copyBlk);
#if FEATURE_FIXED_OUT_ARGS
// Do the copy early, and evalute the temp later (see EvalArgsToTemps)
// When on Unix create LCL_FLD for structs passed in more than one registers. See fgMakeTmpArgNode
GenTree* arg = copyBlk;
#else // FEATURE_FIXED_OUT_ARGS
// Structs are always on the stack, and thus never need temps
// so we have to put the copy and temp all into one expression.
argEntry->tmpNum = tmp;
GenTree* arg = fgMakeTmpArgNode(argEntry);
// Change the expression to "(tmp=val),tmp"
arg = gtNewOperNode(GT_COMMA, arg->TypeGet(), copyBlk, arg);
#endif // FEATURE_FIXED_OUT_ARGS
args->SetNode(arg);
call->fgArgInfo->EvalToTmp(argEntry, tmp, arg);
}
#ifdef TARGET_ARM
// See declaration for specification comment.
void Compiler::fgAddSkippedRegsInPromotedStructArg(LclVarDsc* varDsc,
unsigned firstArgRegNum,
regMaskTP* pArgSkippedRegMask)
{
assert(varDsc->lvPromoted);
// There's no way to do these calculations without breaking abstraction and assuming that
// integer register arguments are consecutive ints. They are on ARM.
// To start, figure out what register contains the last byte of the first argument.
LclVarDsc* firstFldVarDsc = lvaGetDesc(varDsc->lvFieldLclStart);
unsigned lastFldRegOfLastByte =
(firstFldVarDsc->lvFldOffset + firstFldVarDsc->lvExactSize - 1) / TARGET_POINTER_SIZE;
;
// Now we're keeping track of the register that the last field ended in; see what registers
// subsequent fields start in, and whether any are skipped.
// (We assume here the invariant that the fields are sorted in offset order.)
for (unsigned fldVarOffset = 1; fldVarOffset < varDsc->lvFieldCnt; fldVarOffset++)
{
unsigned fldVarNum = varDsc->lvFieldLclStart + fldVarOffset;
LclVarDsc* fldVarDsc = lvaGetDesc(fldVarNum);
unsigned fldRegOffset = fldVarDsc->lvFldOffset / TARGET_POINTER_SIZE;
assert(fldRegOffset >= lastFldRegOfLastByte); // Assuming sorted fields.
// This loop should enumerate the offsets of any registers skipped.
// Find what reg contains the last byte:
// And start at the first register after that. If that isn't the first reg of the current
for (unsigned skippedRegOffsets = lastFldRegOfLastByte + 1; skippedRegOffsets < fldRegOffset;
skippedRegOffsets++)
{
// If the register number would not be an arg reg, we're done.
if (firstArgRegNum + skippedRegOffsets >= MAX_REG_ARG)
return;
*pArgSkippedRegMask |= genRegMask(regNumber(firstArgRegNum + skippedRegOffsets));
}
lastFldRegOfLastByte = (fldVarDsc->lvFldOffset + fldVarDsc->lvExactSize - 1) / TARGET_POINTER_SIZE;
}
}
#endif // TARGET_ARM
/*****************************************************************************
*
* A little helper used to rearrange nested commutative operations. The
* effect is that nested associative, commutative operations are transformed
* into a 'left-deep' tree, i.e. into something like this:
*
* (((a op b) op c) op d) op...
*/
#if REARRANGE_ADDS
void Compiler::fgMoveOpsLeft(GenTree* tree)
{
GenTree* op1;
GenTree* op2;
genTreeOps oper;
do
{
op1 = tree->AsOp()->gtOp1;
op2 = tree->AsOp()->gtOp2;
oper = tree->OperGet();
noway_assert(GenTree::OperIsCommutative(oper));
noway_assert(oper == GT_ADD || oper == GT_XOR || oper == GT_OR || oper == GT_AND || oper == GT_MUL);
noway_assert(!varTypeIsFloating(tree->TypeGet()) || !opts.genFPorder);
noway_assert(oper == op2->gtOper);
// Commutativity doesn't hold if overflow checks are needed
if (tree->gtOverflowEx() || op2->gtOverflowEx())
{
return;
}
if (gtIsActiveCSE_Candidate(op2))
{
// If we have marked op2 as a CSE candidate,
// we can't perform a commutative reordering
// because any value numbers that we computed for op2
// will be incorrect after performing a commutative reordering
//
return;
}
if (oper == GT_MUL && (op2->gtFlags & GTF_MUL_64RSLT))
{
return;
}
// Check for GTF_ADDRMODE_NO_CSE flag on add/mul Binary Operators
if (((oper == GT_ADD) || (oper == GT_MUL)) && ((tree->gtFlags & GTF_ADDRMODE_NO_CSE) != 0))
{
return;
}
if ((tree->gtFlags | op2->gtFlags) & GTF_BOOLEAN)
{
// We could deal with this, but we were always broken and just hit the assert
// below regarding flags, which means it's not frequent, so will just bail out.
// See #195514
return;
}
noway_assert(!tree->gtOverflowEx() && !op2->gtOverflowEx());
GenTree* ad1 = op2->AsOp()->gtOp1;
GenTree* ad2 = op2->AsOp()->gtOp2;
// Compiler::optOptimizeBools() can create GT_OR of two GC pointers yeilding a GT_INT
// We can not reorder such GT_OR trees
//
if (varTypeIsGC(ad1->TypeGet()) != varTypeIsGC(op2->TypeGet()))
{
break;
}
// Don't split up a byref calculation and create a new byref. E.g.,
// [byref]+ (ref, [int]+ (int, int)) => [byref]+ ([byref]+ (ref, int), int).
// Doing this transformation could create a situation where the first
// addition (that is, [byref]+ (ref, int) ) creates a byref pointer that
// no longer points within the ref object. If a GC happens, the byref won't
// get updated. This can happen, for instance, if one of the int components
// is negative. It also requires the address generation be in a fully-interruptible
// code region.
//
if (varTypeIsGC(op1->TypeGet()) && op2->TypeGet() == TYP_I_IMPL)
{
assert(varTypeIsGC(tree->TypeGet()) && (oper == GT_ADD));
break;
}
/* Change "(x op (y op z))" to "(x op y) op z" */
/* ie. "(op1 op (ad1 op ad2))" to "(op1 op ad1) op ad2" */
GenTree* new_op1 = op2;
new_op1->AsOp()->gtOp1 = op1;
new_op1->AsOp()->gtOp2 = ad1;
/* Change the flags. */
// Make sure we arent throwing away any flags
noway_assert((new_op1->gtFlags &
~(GTF_MAKE_CSE | GTF_DONT_CSE | // It is ok that new_op1->gtFlags contains GTF_DONT_CSE flag.
GTF_REVERSE_OPS | // The reverse ops flag also can be set, it will be re-calculated
GTF_NODE_MASK | GTF_ALL_EFFECT | GTF_UNSIGNED)) == 0);
new_op1->gtFlags =
(new_op1->gtFlags & (GTF_NODE_MASK | GTF_DONT_CSE)) | // Make sure we propagate GTF_DONT_CSE flag.
(op1->gtFlags & GTF_ALL_EFFECT) | (ad1->gtFlags & GTF_ALL_EFFECT);
/* Retype new_op1 if it has not/become a GC ptr. */
if (varTypeIsGC(op1->TypeGet()))
{
noway_assert((varTypeIsGC(tree->TypeGet()) && op2->TypeGet() == TYP_I_IMPL &&
oper == GT_ADD) || // byref(ref + (int+int))
(varTypeIsI(tree->TypeGet()) && op2->TypeGet() == TYP_I_IMPL &&
oper == GT_OR)); // int(gcref | int(gcref|intval))
new_op1->gtType = tree->gtType;
}
else if (varTypeIsGC(ad2->TypeGet()))
{
// Neither ad1 nor op1 are GC. So new_op1 isnt either
noway_assert(op1->gtType == TYP_I_IMPL && ad1->gtType == TYP_I_IMPL);
new_op1->gtType = TYP_I_IMPL;
}
// If new_op1 is a new expression. Assign it a new unique value number.
// vnStore is null before the ValueNumber phase has run
if (vnStore != nullptr)
{
// We can only keep the old value number on new_op1 if both op1 and ad2
// have the same non-NoVN value numbers. Since op is commutative, comparing
// only ad2 and op1 is enough.
if ((op1->gtVNPair.GetLiberal() == ValueNumStore::NoVN) ||
(ad2->gtVNPair.GetLiberal() == ValueNumStore::NoVN) ||
(ad2->gtVNPair.GetLiberal() != op1->gtVNPair.GetLiberal()))
{
new_op1->gtVNPair.SetBoth(vnStore->VNForExpr(nullptr, new_op1->TypeGet()));
}
}
tree->AsOp()->gtOp1 = new_op1;
tree->AsOp()->gtOp2 = ad2;
/* If 'new_op1' is now the same nested op, process it recursively */
if ((ad1->gtOper == oper) && !ad1->gtOverflowEx())
{
fgMoveOpsLeft(new_op1);
}
/* If 'ad2' is now the same nested op, process it
* Instead of recursion, we set up op1 and op2 for the next loop.
*/
op1 = new_op1;
op2 = ad2;
} while ((op2->gtOper == oper) && !op2->gtOverflowEx());
return;
}
#endif
/*****************************************************************************/
void Compiler::fgSetRngChkTarget(GenTree* tree, bool delay)
{
if (tree->OperIs(GT_BOUNDS_CHECK))
{
GenTreeBoundsChk* const boundsChk = tree->AsBoundsChk();
BasicBlock* const failBlock = fgSetRngChkTargetInner(boundsChk->gtThrowKind, delay);
if (failBlock != nullptr)
{
boundsChk->gtIndRngFailBB = failBlock;
}
}
else if (tree->OperIs(GT_INDEX_ADDR))
{
GenTreeIndexAddr* const indexAddr = tree->AsIndexAddr();
BasicBlock* const failBlock = fgSetRngChkTargetInner(SCK_RNGCHK_FAIL, delay);
if (failBlock != nullptr)
{
indexAddr->gtIndRngFailBB = failBlock;
}
}
else
{
noway_assert(tree->OperIs(GT_ARR_ELEM, GT_ARR_INDEX));
fgSetRngChkTargetInner(SCK_RNGCHK_FAIL, delay);
}
}
BasicBlock* Compiler::fgSetRngChkTargetInner(SpecialCodeKind kind, bool delay)
{
if (opts.MinOpts())
{
delay = false;
}
if (!opts.compDbgCode)
{
if (!delay && !compIsForInlining())
{
// Create/find the appropriate "range-fail" label
return fgRngChkTarget(compCurBB, kind);
}
}
return nullptr;
}
/*****************************************************************************
*
* Expand a GT_INDEX node and fully morph the child operands
*
* The orginal GT_INDEX node is bashed into the GT_IND node that accesses
* the array element. We expand the GT_INDEX node into a larger tree that
* evaluates the array base and index. The simplest expansion is a GT_COMMA
* with a GT_BOUNDS_CHECK and a GT_IND with a GTF_INX_RNGCHK flag.
* For complex array or index expressions one or more GT_COMMA assignments
* are inserted so that we only evaluate the array or index expressions once.
*
* The fully expanded tree is then morphed. This causes gtFoldExpr to
* perform local constant prop and reorder the constants in the tree and
* fold them.
*
* We then parse the resulting array element expression in order to locate
* and label the constants and variables that occur in the tree.
*/
const int MAX_ARR_COMPLEXITY = 4;
const int MAX_INDEX_COMPLEXITY = 4;
GenTree* Compiler::fgMorphArrayIndex(GenTree* tree)
{
noway_assert(tree->gtOper == GT_INDEX);
GenTreeIndex* asIndex = tree->AsIndex();
var_types elemTyp = asIndex->TypeGet();
unsigned elemSize = asIndex->gtIndElemSize;
CORINFO_CLASS_HANDLE elemStructType = asIndex->gtStructElemClass;
noway_assert(elemTyp != TYP_STRUCT || elemStructType != nullptr);
// Fold "cns_str"[cns_index] to ushort constant
// NOTE: don't do it for empty string, the operation will fail anyway
if (opts.OptimizationEnabled() && asIndex->Arr()->OperIs(GT_CNS_STR) &&
!asIndex->Arr()->AsStrCon()->IsStringEmptyField() && asIndex->Index()->IsIntCnsFitsInI32())
{
const int cnsIndex = static_cast<int>(asIndex->Index()->AsIntConCommon()->IconValue());
if (cnsIndex >= 0)
{
int length;
const char16_t* str = info.compCompHnd->getStringLiteral(asIndex->Arr()->AsStrCon()->gtScpHnd,
asIndex->Arr()->AsStrCon()->gtSconCPX, &length);
if ((cnsIndex < length) && (str != nullptr))
{
GenTree* cnsCharNode = gtNewIconNode(str[cnsIndex], TYP_INT);
INDEBUG(cnsCharNode->gtDebugFlags |= GTF_DEBUG_NODE_MORPHED);
return cnsCharNode;
}
}
}
#ifdef FEATURE_SIMD
if (varTypeIsStruct(elemTyp) && structSizeMightRepresentSIMDType(elemSize))
{
// If this is a SIMD type, this is the point at which we lose the type information,
// so we need to set the correct type on the GT_IND.
// (We don't care about the base type here, so we only check, but don't retain, the return value).
unsigned simdElemSize = 0;
if (getBaseJitTypeAndSizeOfSIMDType(elemStructType, &simdElemSize) != CORINFO_TYPE_UNDEF)
{
assert(simdElemSize == elemSize);
elemTyp = getSIMDTypeForSize(elemSize);
// This is the new type of the node.
tree->gtType = elemTyp;
// Now set elemStructType to null so that we don't confuse value numbering.
elemStructType = nullptr;
}
}
#endif // FEATURE_SIMD
// Set up the array length's offset into lenOffs
// And the first element's offset into elemOffs
ssize_t lenOffs;
ssize_t elemOffs;
if (tree->gtFlags & GTF_INX_STRING_LAYOUT)
{
lenOffs = OFFSETOF__CORINFO_String__stringLen;
elemOffs = OFFSETOF__CORINFO_String__chars;
tree->gtFlags &= ~GTF_INX_STRING_LAYOUT; // Clear this flag as it is used for GTF_IND_VOLATILE
}
else
{
// We have a standard array
lenOffs = OFFSETOF__CORINFO_Array__length;
elemOffs = OFFSETOF__CORINFO_Array__data;
}
// In minopts, we expand GT_INDEX to GT_IND(GT_INDEX_ADDR) in order to minimize the size of the IR. As minopts
// compilation time is roughly proportional to the size of the IR, this helps keep compilation times down.
// Furthermore, this representation typically saves on code size in minopts w.r.t. the complete expansion
// performed when optimizing, as it does not require LclVar nodes (which are always stack loads/stores in
// minopts).
//
// When we *are* optimizing, we fully expand GT_INDEX to:
// 1. Evaluate the array address expression and store the result in a temp if the expression is complex or
// side-effecting.
// 2. Evaluate the array index expression and store the result in a temp if the expression is complex or
// side-effecting.
// 3. Perform an explicit bounds check: GT_BOUNDS_CHECK(index, GT_ARR_LENGTH(array))
// 4. Compute the address of the element that will be accessed:
// GT_ADD(GT_ADD(array, firstElementOffset), GT_MUL(index, elementSize))
// 5. Dereference the address with a GT_IND.
//
// This expansion explicitly exposes the bounds check and the address calculation to the optimizer, which allows
// for more straightforward bounds-check removal, CSE, etc.
if (opts.MinOpts())
{
GenTree* const array = fgMorphTree(asIndex->Arr());
GenTree* const index = fgMorphTree(asIndex->Index());
GenTreeIndexAddr* const indexAddr =
new (this, GT_INDEX_ADDR) GenTreeIndexAddr(array, index, elemTyp, elemStructType, elemSize,
static_cast<unsigned>(lenOffs), static_cast<unsigned>(elemOffs));
indexAddr->gtFlags |= (array->gtFlags | index->gtFlags) & GTF_ALL_EFFECT;
// Mark the indirection node as needing a range check if necessary.
// Note this will always be true unless JitSkipArrayBoundCheck() is used
if ((indexAddr->gtFlags & GTF_INX_RNGCHK) != 0)
{
fgSetRngChkTarget(indexAddr);
}
if (!tree->TypeIs(TYP_STRUCT))
{
tree->ChangeOper(GT_IND);
}
else
{
DEBUG_DESTROY_NODE(tree);
tree = gtNewObjNode(elemStructType, indexAddr);
INDEBUG(tree->gtDebugFlags |= GTF_DEBUG_NODE_MORPHED);
}
GenTreeIndir* const indir = tree->AsIndir();
indir->Addr() = indexAddr;
bool canCSE = indir->CanCSE();
indir->gtFlags = GTF_IND_ARR_INDEX | (indexAddr->gtFlags & GTF_ALL_EFFECT);
if (!canCSE)
{
indir->SetDoNotCSE();
}
INDEBUG(indexAddr->gtDebugFlags |= GTF_DEBUG_NODE_MORPHED);
return indir;
}
GenTree* arrRef = asIndex->Arr();
GenTree* index = asIndex->Index();
bool chkd = ((tree->gtFlags & GTF_INX_RNGCHK) != 0); // if false, range checking will be disabled
bool indexNonFaulting = ((tree->gtFlags & GTF_INX_NOFAULT) != 0); // if true, mark GTF_IND_NONFAULTING
bool nCSE = ((tree->gtFlags & GTF_DONT_CSE) != 0);
GenTree* arrRefDefn = nullptr; // non-NULL if we need to allocate a temp for the arrRef expression
GenTree* indexDefn = nullptr; // non-NULL if we need to allocate a temp for the index expression
GenTree* bndsChk = nullptr;
// If we're doing range checking, introduce a GT_BOUNDS_CHECK node for the address.
if (chkd)
{
GenTree* arrRef2 = nullptr; // The second copy will be used in array address expression
GenTree* index2 = nullptr;
// If the arrRef or index expressions involves an assignment, a call, or reads from global memory,
// then we *must* allocate a temporary in which to "localize" those values, to ensure that the
// same values are used in the bounds check and the actual dereference.
// Also we allocate the temporary when the expression is sufficiently complex/expensive.
//
// Note that if the expression is a GT_FIELD, it has not yet been morphed so its true complexity is
// not exposed. Without that condition there are cases of local struct fields that were previously,
// needlessly, marked as GTF_GLOB_REF, and when that was fixed, there were some regressions that
// were mostly ameliorated by adding this condition.
//
// Likewise, allocate a temporary if the expression is a GT_LCL_FLD node. These used to be created
// after fgMorphArrayIndex from GT_FIELD trees so this preserves the existing behavior. This is
// perhaps a decision that should be left to CSE but FX diffs show that it is slightly better to
// do this here.
if ((arrRef->gtFlags & (GTF_ASG | GTF_CALL | GTF_GLOB_REF)) ||
gtComplexityExceeds(&arrRef, MAX_ARR_COMPLEXITY) || arrRef->OperIs(GT_FIELD, GT_LCL_FLD))
{
unsigned arrRefTmpNum = lvaGrabTemp(true DEBUGARG("arr expr"));
arrRefDefn = gtNewTempAssign(arrRefTmpNum, arrRef);
arrRef = gtNewLclvNode(arrRefTmpNum, arrRef->TypeGet());
arrRef2 = gtNewLclvNode(arrRefTmpNum, arrRef->TypeGet());
}
else
{
arrRef2 = gtCloneExpr(arrRef);
noway_assert(arrRef2 != nullptr);
}
if ((index->gtFlags & (GTF_ASG | GTF_CALL | GTF_GLOB_REF)) || gtComplexityExceeds(&index, MAX_ARR_COMPLEXITY) ||
index->OperIs(GT_FIELD, GT_LCL_FLD))
{
unsigned indexTmpNum = lvaGrabTemp(true DEBUGARG("index expr"));
indexDefn = gtNewTempAssign(indexTmpNum, index);
index = gtNewLclvNode(indexTmpNum, index->TypeGet());
index2 = gtNewLclvNode(indexTmpNum, index->TypeGet());
}
else
{
index2 = gtCloneExpr(index);
noway_assert(index2 != nullptr);
}
// Next introduce a GT_BOUNDS_CHECK node
var_types bndsChkType = TYP_INT; // By default, try to use 32-bit comparison for array bounds check.
#ifdef TARGET_64BIT
// The CLI Spec allows an array to be indexed by either an int32 or a native int. In the case
// of a 64 bit architecture this means the array index can potentially be a TYP_LONG, so for this case,
// the comparison will have to be widened to 64 bits.
if (index->TypeGet() == TYP_I_IMPL)
{
bndsChkType = TYP_I_IMPL;
}
#endif // TARGET_64BIT
GenTree* arrLen = gtNewArrLen(TYP_INT, arrRef, (int)lenOffs, compCurBB);
if (bndsChkType != TYP_INT)
{
arrLen = gtNewCastNode(bndsChkType, arrLen, true, bndsChkType);
}
GenTreeBoundsChk* arrBndsChk = new (this, GT_BOUNDS_CHECK) GenTreeBoundsChk(index, arrLen, SCK_RNGCHK_FAIL);
bndsChk = arrBndsChk;
// Now we'll switch to using the second copies for arrRef and index
// to compute the address expression
arrRef = arrRef2;
index = index2;
}
// Create the "addr" which is "*(arrRef + ((index * elemSize) + elemOffs))"
GenTree* addr;
#ifdef TARGET_64BIT
// Widen 'index' on 64-bit targets
if (index->TypeGet() != TYP_I_IMPL)
{
if (index->OperGet() == GT_CNS_INT)
{
index->gtType = TYP_I_IMPL;
}
else
{
index = gtNewCastNode(TYP_I_IMPL, index, true, TYP_I_IMPL);
}
}
#endif // TARGET_64BIT
/* Scale the index value if necessary */
if (elemSize > 1)
{
GenTree* size = gtNewIconNode(elemSize, TYP_I_IMPL);
// Fix 392756 WP7 Crossgen
//
// During codegen optGetArrayRefScaleAndIndex() makes the assumption that op2 of a GT_MUL node
// is a constant and is not capable of handling CSE'ing the elemSize constant into a lclvar.
// Hence to prevent the constant from becoming a CSE we mark it as NO_CSE.
//
size->gtFlags |= GTF_DONT_CSE;
/* Multiply by the array element size */
addr = gtNewOperNode(GT_MUL, TYP_I_IMPL, index, size);
}
else
{
addr = index;
}
// Be careful to only create the byref pointer when the full index expression is added to the array reference.
// We don't want to create a partial byref address expression that doesn't include the full index offset:
// a byref must point within the containing object. It is dangerous (especially when optimizations come into
// play) to create a "partial" byref that doesn't point exactly to the correct object; there is risk that
// the partial byref will not point within the object, and thus not get updated correctly during a GC.
// This is mostly a risk in fully-interruptible code regions.
// We can generate two types of trees for "addr":
//
// 1) "arrRef + (index + elemOffset)"
// 2) "(arrRef + elemOffset) + index"
//
// XArch has powerful addressing modes such as [base + index*scale + offset] so it's fine with 1),
// while for Arm we better try to make an invariant sub-tree as large as possible, which is usually
// "(arrRef + elemOffset)" and is CSE/LoopHoisting friendly => produces better codegen.
// 2) should still be safe from GC's point of view since both ADD operations are byref and point to
// within the object so GC will be able to correctly track and update them.
bool groupArrayRefWithElemOffset = false;
#ifdef TARGET_ARMARCH
groupArrayRefWithElemOffset = true;
// TODO: in some cases even on ARM we better use 1) shape because if "index" is invariant and "arrRef" is not
// we at least will be able to hoist/CSE "index + elemOffset" in some cases.
// See https://github.com/dotnet/runtime/pull/61293#issuecomment-964146497
// Use 2) form only for primitive types for now - it significantly reduced number of size regressions
if (!varTypeIsIntegral(elemTyp) && !varTypeIsFloating(elemTyp))
{
groupArrayRefWithElemOffset = false;
}
#endif
// First element's offset
GenTree* elemOffset = gtNewIconNode(elemOffs, TYP_I_IMPL);
if (groupArrayRefWithElemOffset)
{
GenTree* basePlusOffset = gtNewOperNode(GT_ADD, TYP_BYREF, arrRef, elemOffset);
addr = gtNewOperNode(GT_ADD, TYP_BYREF, basePlusOffset, addr);
}
else
{
addr = gtNewOperNode(GT_ADD, TYP_I_IMPL, addr, elemOffset);
addr = gtNewOperNode(GT_ADD, TYP_BYREF, arrRef, addr);
}
assert(((tree->gtDebugFlags & GTF_DEBUG_NODE_LARGE) != 0) ||
(GenTree::s_gtNodeSizes[GT_IND] == TREE_NODE_SZ_SMALL));
// Change the orginal GT_INDEX node into a GT_IND node
tree->SetOper(GT_IND);
// If the index node is a floating-point type, notify the compiler
// we'll potentially use floating point registers at the time of codegen.
if (varTypeUsesFloatReg(tree->gtType))
{
this->compFloatingPointUsed = true;
}
// We've now consumed the GTF_INX_RNGCHK and GTF_INX_NOFAULT, and the node
// is no longer a GT_INDEX node.
tree->gtFlags &= ~(GTF_INX_RNGCHK | GTF_INX_NOFAULT);
tree->AsOp()->gtOp1 = addr;
// This is an array index expression.
tree->gtFlags |= GTF_IND_ARR_INDEX;
// If there's a bounds check, the indir won't fault.
if (bndsChk || indexNonFaulting)
{
tree->gtFlags |= GTF_IND_NONFAULTING;
}
else
{
tree->gtFlags |= GTF_EXCEPT;
}
if (nCSE)
{
tree->gtFlags |= GTF_DONT_CSE;
}
// Store information about it.
GetArrayInfoMap()->Set(tree, ArrayInfo(elemTyp, elemSize, (int)elemOffs, elemStructType));
// Remember this 'indTree' that we just created, as we still need to attach the fieldSeq information to it.
GenTree* indTree = tree;
// Did we create a bndsChk tree?
if (bndsChk)
{
// Use a GT_COMMA node to prepend the array bound check
//
tree = gtNewOperNode(GT_COMMA, elemTyp, bndsChk, tree);
/* Mark the indirection node as needing a range check */
fgSetRngChkTarget(bndsChk);
}
if (indexDefn != nullptr)
{
// Use a GT_COMMA node to prepend the index assignment
//
tree = gtNewOperNode(GT_COMMA, tree->TypeGet(), indexDefn, tree);
}
if (arrRefDefn != nullptr)
{
// Use a GT_COMMA node to prepend the arRef assignment
//
tree = gtNewOperNode(GT_COMMA, tree->TypeGet(), arrRefDefn, tree);
}
JITDUMP("fgMorphArrayIndex (before remorph):\n")
DISPTREE(tree)
// Currently we morph the tree to perform some folding operations prior
// to attaching fieldSeq info and labeling constant array index contributions
//
tree = fgMorphTree(tree);
JITDUMP("fgMorphArrayIndex (after remorph):\n")
DISPTREE(tree)
// Ideally we just want to proceed to attaching fieldSeq info and labeling the
// constant array index contributions, but the morphing operation may have changed
// the 'tree' into something that now unconditionally throws an exception.
//
// In such case the gtEffectiveVal could be a new tree or it's gtOper could be modified
// or it could be left unchanged. If it is unchanged then we should not return,
// instead we should proceed to attaching fieldSeq info, etc...
//
GenTree* arrElem = tree->gtEffectiveVal();
if (fgIsCommaThrow(tree))
{
if ((arrElem != indTree) || // A new tree node may have been created
(!indTree->OperIs(GT_IND))) // The GT_IND may have been changed to a GT_CNS_INT
{
return tree; // Just return the Comma-Throw, don't try to attach the fieldSeq info, etc..
}
}
assert(!fgGlobalMorph || (arrElem->gtDebugFlags & GTF_DEBUG_NODE_MORPHED));
DBEXEC(fgGlobalMorph && (arrElem == tree), tree->gtDebugFlags &= ~GTF_DEBUG_NODE_MORPHED)
addr = arrElem->gtGetOp1();
GenTree* cnsOff = nullptr;
if (addr->OperIs(GT_ADD))
{
GenTree* addrOp1 = addr->gtGetOp1();
if (groupArrayRefWithElemOffset)
{
if (addrOp1->OperIs(GT_ADD) && addrOp1->gtGetOp2()->IsCnsIntOrI())
{
assert(addrOp1->gtGetOp1()->TypeIs(TYP_REF));
cnsOff = addrOp1->gtGetOp2();
addr = addr->gtGetOp2();
// Label any constant array index contributions with #ConstantIndex and any LclVars with
// GTF_VAR_ARR_INDEX
addr->LabelIndex(this);
}
else
{
assert(addr->gtGetOp2()->IsCnsIntOrI());
cnsOff = addr->gtGetOp2();
addr = nullptr;
}
}
else
{
assert(addr->TypeIs(TYP_BYREF));
assert(addr->gtGetOp1()->TypeIs(TYP_REF));
addr = addr->gtGetOp2();
// Look for the constant [#FirstElem] node here, or as the RHS of an ADD.
if (addr->IsCnsIntOrI())
{
cnsOff = addr;
addr = nullptr;
}
else
{
if ((addr->OperIs(GT_ADD)) && addr->gtGetOp2()->IsCnsIntOrI())
{
cnsOff = addr->gtGetOp2();
addr = addr->gtGetOp1();
}
// Label any constant array index contributions with #ConstantIndex and any LclVars with
// GTF_VAR_ARR_INDEX
addr->LabelIndex(this);
}
}
}
else if (addr->IsCnsIntOrI())
{
cnsOff = addr;
}
FieldSeqNode* firstElemFseq = GetFieldSeqStore()->CreateSingleton(FieldSeqStore::FirstElemPseudoField);
if ((cnsOff != nullptr) && (cnsOff->AsIntCon()->gtIconVal == elemOffs))
{
// Assign it the [#FirstElem] field sequence
//
cnsOff->AsIntCon()->gtFieldSeq = firstElemFseq;
}
else // We have folded the first element's offset with the index expression
{
// Build the [#ConstantIndex, #FirstElem] field sequence
//
FieldSeqNode* constantIndexFseq = GetFieldSeqStore()->CreateSingleton(FieldSeqStore::ConstantIndexPseudoField);
FieldSeqNode* fieldSeq = GetFieldSeqStore()->Append(constantIndexFseq, firstElemFseq);
if (cnsOff == nullptr) // It must have folded into a zero offset
{
// Record in the general zero-offset map.
fgAddFieldSeqForZeroOffset(addr, fieldSeq);
}
else
{
cnsOff->AsIntCon()->gtFieldSeq = fieldSeq;
}
}
return tree;
}
#ifdef TARGET_X86
/*****************************************************************************
*
* Wrap fixed stack arguments for varargs functions to go through varargs
* cookie to access them, except for the cookie itself.
*
* Non-x86 platforms are allowed to access all arguments directly
* so we don't need this code.
*
*/
GenTree* Compiler::fgMorphStackArgForVarArgs(unsigned lclNum, var_types varType, unsigned lclOffs)
{
/* For the fixed stack arguments of a varargs function, we need to go
through the varargs cookies to access them, except for the
cookie itself */
LclVarDsc* varDsc = lvaGetDesc(lclNum);
if (varDsc->lvIsParam && !varDsc->lvIsRegArg && lclNum != lvaVarargsHandleArg)
{
// Create a node representing the local pointing to the base of the args
GenTree* ptrArg =
gtNewOperNode(GT_SUB, TYP_I_IMPL, gtNewLclvNode(lvaVarargsBaseOfStkArgs, TYP_I_IMPL),
gtNewIconNode(varDsc->GetStackOffset() -
codeGen->intRegState.rsCalleeRegArgCount * REGSIZE_BYTES - lclOffs));
// Access the argument through the local
GenTree* tree;
if (varTypeIsStruct(varType))
{
CORINFO_CLASS_HANDLE typeHnd = varDsc->GetStructHnd();
assert(typeHnd != nullptr);
tree = gtNewObjNode(typeHnd, ptrArg);
}
else
{
tree = gtNewOperNode(GT_IND, varType, ptrArg);
}
tree->gtFlags |= GTF_IND_TGTANYWHERE;
if (varDsc->IsAddressExposed())
{
tree->gtFlags |= GTF_GLOB_REF;
}
return fgMorphTree(tree);
}
return NULL;
}
#endif
/*****************************************************************************
*
* Transform the given GT_LCL_VAR tree for code generation.
*/
GenTree* Compiler::fgMorphLocalVar(GenTree* tree, bool forceRemorph)
{
assert(tree->gtOper == GT_LCL_VAR);
unsigned lclNum = tree->AsLclVarCommon()->GetLclNum();
var_types varType = lvaGetRealType(lclNum);
LclVarDsc* varDsc = lvaGetDesc(lclNum);
if (varDsc->IsAddressExposed())
{
tree->gtFlags |= GTF_GLOB_REF;
}
#ifdef TARGET_X86
if (info.compIsVarArgs)
{
GenTree* newTree = fgMorphStackArgForVarArgs(lclNum, varType, 0);
if (newTree != nullptr)
{
if (newTree->OperIsBlk() && ((tree->gtFlags & GTF_VAR_DEF) == 0))
{
newTree->SetOper(GT_IND);
}
return newTree;
}
}
#endif // TARGET_X86
/* If not during the global morphing phase bail */
if (!fgGlobalMorph && !forceRemorph)
{
return tree;
}
bool varAddr = (tree->gtFlags & GTF_DONT_CSE) != 0;
noway_assert(!(tree->gtFlags & GTF_VAR_DEF) || varAddr); // GTF_VAR_DEF should always imply varAddr
if (!varAddr && varDsc->lvNormalizeOnLoad())
{
// TYP_BOOL quirk: previously, the code in optAssertionIsSubrange did not handle TYP_BOOL.
// Now it does, but this leads to some regressions because we lose the uniform VNs for trees
// that represent the "reduced" normalize-on-load locals, i. e. LCL_VAR(small type V00), created
// here with local assertions, and "expanded", i. e. CAST(small type <- LCL_VAR(int V00)).
// This is a pretty fundamental problem with how normalize-on-load locals appear to the optimizer.
// This quirk preserves the previous behavior.
// TODO-CQ: fix the VNs for normalize-on-load locals and remove this quirk.
bool isBoolQuirk = varType == TYP_BOOL;
// Assertion prop can tell us to omit adding a cast here. This is
// useful when the local is a small-typed parameter that is passed in a
// register: in that case, the ABI specifies that the upper bits might
// be invalid, but the assertion guarantees us that we have normalized
// when we wrote it.
if (optLocalAssertionProp && !isBoolQuirk &&
optAssertionIsSubrange(tree, IntegralRange::ForType(varType), apFull) != NO_ASSERTION_INDEX)
{
// The previous assertion can guarantee us that if this node gets
// assigned a register, it will be normalized already. It is still
// possible that this node ends up being in memory, in which case
// normalization will still be needed, so we better have the right
// type.
assert(tree->TypeGet() == varDsc->TypeGet());
return tree;
}
// Small-typed arguments and aliased locals are normalized on load.
// Other small-typed locals are normalized on store.
// Also, under the debugger as the debugger could write to the variable.
// If this is one of the former, insert a narrowing cast on the load.
// ie. Convert: var-short --> cast-short(var-int)
tree->gtType = TYP_INT;
fgMorphTreeDone(tree);
tree = gtNewCastNode(TYP_INT, tree, false, varType);
fgMorphTreeDone(tree);
return tree;
}
return tree;
}
/*****************************************************************************
Grab a temp for big offset morphing.
This method will grab a new temp if no temp of this "type" has been created.
Or it will return the same cached one if it has been created.
*/
unsigned Compiler::fgGetBigOffsetMorphingTemp(var_types type)
{
unsigned lclNum = fgBigOffsetMorphingTemps[type];
if (lclNum == BAD_VAR_NUM)
{
// We haven't created a temp for this kind of type. Create one now.
lclNum = lvaGrabTemp(false DEBUGARG("Big Offset Morphing"));
fgBigOffsetMorphingTemps[type] = lclNum;
}
else
{
// We better get the right type.
noway_assert(lvaTable[lclNum].TypeGet() == type);
}
noway_assert(lclNum != BAD_VAR_NUM);
return lclNum;
}
/*****************************************************************************
*
* Transform the given GT_FIELD tree for code generation.
*/
GenTree* Compiler::fgMorphField(GenTree* tree, MorphAddrContext* mac)
{
assert(tree->gtOper == GT_FIELD);
CORINFO_FIELD_HANDLE symHnd = tree->AsField()->gtFldHnd;
unsigned fldOffset = tree->AsField()->gtFldOffset;
GenTree* objRef = tree->AsField()->GetFldObj();
bool objIsLocal = false;
FieldSeqNode* fieldSeq = FieldSeqStore::NotAField();
if (!tree->AsField()->gtFldMayOverlap)
{
if (objRef != nullptr)
{
fieldSeq = GetFieldSeqStore()->CreateSingleton(symHnd, FieldSeqNode::FieldKind::Instance);
}
else
{
// Only simple statics get importred as GT_FIELDs.
fieldSeq = GetFieldSeqStore()->CreateSingleton(symHnd, FieldSeqNode::FieldKind::SimpleStatic);
}
}
// Reset the flag because we may reuse the node.
tree->AsField()->gtFldMayOverlap = false;
if (fgGlobalMorph && (objRef != nullptr) && (objRef->gtOper == GT_ADDR))
{
// Make sure we've checked if 'objRef' is an address of an implicit-byref parameter.
// If it is, fgMorphImplicitByRefArgs may change it do a different opcode, which the
// simd field rewrites are sensitive to.
fgMorphImplicitByRefArgs(objRef);
}
noway_assert(((objRef != nullptr) && (objRef->IsLocalAddrExpr() != nullptr)) ||
((tree->gtFlags & GTF_GLOB_REF) != 0));
#ifdef FEATURE_SIMD
// if this field belongs to simd struct, translate it to simd intrinsic.
if (mac == nullptr)
{
if (IsBaselineSimdIsaSupported())
{
GenTree* newTree = fgMorphFieldToSimdGetElement(tree);
if (newTree != tree)
{
newTree = fgMorphTree(newTree);
return newTree;
}
}
}
else if ((objRef != nullptr) && (objRef->OperGet() == GT_ADDR) && varTypeIsSIMD(objRef->gtGetOp1()))
{
GenTreeLclVarCommon* lcl = objRef->IsLocalAddrExpr();
if (lcl != nullptr)
{
lvaSetVarDoNotEnregister(lcl->GetLclNum() DEBUGARG(DoNotEnregisterReason::LocalField));
}
}
#endif
// Create a default MorphAddrContext early so it doesn't go out of scope
// before it is used.
MorphAddrContext defMAC(MACK_Ind);
/* Is this an instance data member? */
if (objRef)
{
GenTree* addr;
objIsLocal = objRef->IsLocal();
if (tree->gtFlags & GTF_IND_TLS_REF)
{
NO_WAY("instance field can not be a TLS ref.");
}
/* We'll create the expression "*(objRef + mem_offs)" */
noway_assert(varTypeIsGC(objRef->TypeGet()) || objRef->TypeGet() == TYP_I_IMPL);
/*
Now we have a tree like this:
+--------------------+
| GT_FIELD | tree
+----------+---------+
|
+--------------+-------------+
|tree->AsField()->GetFldObj()|
+--------------+-------------+
We want to make it like this (when fldOffset is <= MAX_UNCHECKED_OFFSET_FOR_NULL_OBJECT):
+--------------------+
| GT_IND/GT_OBJ | tree
+---------+----------+
|
|
+---------+----------+
| GT_ADD | addr
+---------+----------+
|
/ \
/ \
/ \
+-------------------+ +----------------------+
| objRef | | fldOffset |
| | | (when fldOffset !=0) |
+-------------------+ +----------------------+
or this (when fldOffset is > MAX_UNCHECKED_OFFSET_FOR_NULL_OBJECT):
+--------------------+
| GT_IND/GT_OBJ | tree
+----------+---------+
|
+----------+---------+
| GT_COMMA | comma2
+----------+---------+
|
/ \
/ \
/ \
/ \
+---------+----------+ +---------+----------+
comma | GT_COMMA | | "+" (i.e. GT_ADD) | addr
+---------+----------+ +---------+----------+
| |
/ \ / \
/ \ / \
/ \ / \
+-----+-----+ +-----+-----+ +---------+ +-----------+
asg | GT_ASG | ind | GT_IND | | tmpLcl | | fldOffset |
+-----+-----+ +-----+-----+ +---------+ +-----------+
| |
/ \ |
/ \ |
/ \ |
+-----+-----+ +-----+-----+ +-----------+
| tmpLcl | | objRef | | tmpLcl |
+-----------+ +-----------+ +-----------+
*/
var_types objRefType = objRef->TypeGet();
GenTree* comma = nullptr;
// NULL mac means we encounter the GT_FIELD first. This denotes a dereference of the field,
// and thus is equivalent to a MACK_Ind with zero offset.
if (mac == nullptr)
{
mac = &defMAC;
}
// This flag is set to enable the "conservative" style of explicit null-check insertion.
// This means that we insert an explicit null check whenever we create byref by adding a
// constant offset to a ref, in a MACK_Addr context (meaning that the byref is not immediately
// dereferenced). The alternative is "aggressive", which would not insert such checks (for
// small offsets); in this plan, we would transfer some null-checking responsibility to
// callee's of methods taking byref parameters. They would have to add explicit null checks
// when creating derived byrefs from argument byrefs by adding constants to argument byrefs, in
// contexts where the resulting derived byref is not immediately dereferenced (or if the offset is too
// large). To make the "aggressive" scheme work, however, we'd also have to add explicit derived-from-null
// checks for byref parameters to "external" methods implemented in C++, and in P/Invoke stubs.
// This is left here to point out how to implement it.
CLANG_FORMAT_COMMENT_ANCHOR;
#define CONSERVATIVE_NULL_CHECK_BYREF_CREATION 1
bool addExplicitNullCheck = false;
// Implicit byref locals and string literals are never null.
if (fgAddrCouldBeNull(objRef))
{
// If the objRef is a GT_ADDR node, it, itself, never requires null checking. The expression
// whose address is being taken is either a local or static variable, whose address is necessarily
// non-null, or else it is a field dereference, which will do its own bounds checking if necessary.
if (objRef->gtOper != GT_ADDR && (mac->m_kind == MACK_Addr || mac->m_kind == MACK_Ind))
{
if (!mac->m_allConstantOffsets || fgIsBigOffset(mac->m_totalOffset + fldOffset))
{
addExplicitNullCheck = true;
}
else
{
// In R2R mode the field offset for some fields may change when the code
// is loaded. So we can't rely on a zero offset here to suppress the null check.
//
// See GitHub issue #16454.
bool fieldHasChangeableOffset = false;
#ifdef FEATURE_READYTORUN
fieldHasChangeableOffset = (tree->AsField()->gtFieldLookup.addr != nullptr);
#endif
#if CONSERVATIVE_NULL_CHECK_BYREF_CREATION
addExplicitNullCheck = (mac->m_kind == MACK_Addr) &&
((mac->m_totalOffset + fldOffset > 0) || fieldHasChangeableOffset);
#else
addExplicitNullCheck = (objRef->gtType == TYP_BYREF && mac->m_kind == MACK_Addr &&
((mac->m_totalOffset + fldOffset > 0) || fieldHasChangeableOffset));
#endif
}
}
}
if (addExplicitNullCheck)
{
#ifdef DEBUG
if (verbose)
{
printf("Before explicit null check morphing:\n");
gtDispTree(tree);
}
#endif
//
// Create the "comma" subtree
//
GenTree* asg = nullptr;
GenTree* nullchk;
unsigned lclNum;
if (objRef->gtOper != GT_LCL_VAR)
{
lclNum = fgGetBigOffsetMorphingTemp(genActualType(objRef->TypeGet()));
// Create the "asg" node
asg = gtNewTempAssign(lclNum, objRef);
}
else
{
lclNum = objRef->AsLclVarCommon()->GetLclNum();
}
GenTree* lclVar = gtNewLclvNode(lclNum, objRefType);
nullchk = gtNewNullCheck(lclVar, compCurBB);
nullchk->gtFlags |= GTF_DONT_CSE; // Don't try to create a CSE for these TYP_BYTE indirections
if (asg)
{
// Create the "comma" node.
comma = gtNewOperNode(GT_COMMA,
TYP_VOID, // We don't want to return anything from this "comma" node.
// Set the type to TYP_VOID, so we can select "cmp" instruction
// instead of "mov" instruction later on.
asg, nullchk);
}
else
{
comma = nullchk;
}
addr = gtNewLclvNode(lclNum, objRefType); // Use "tmpLcl" to create "addr" node.
}
else
{
addr = objRef;
}
#ifdef FEATURE_READYTORUN
if (tree->AsField()->gtFieldLookup.addr != nullptr)
{
GenTree* offsetNode = nullptr;
if (tree->AsField()->gtFieldLookup.accessType == IAT_PVALUE)
{
offsetNode = gtNewIndOfIconHandleNode(TYP_I_IMPL, (size_t)tree->AsField()->gtFieldLookup.addr,
GTF_ICON_CONST_PTR, true);
#ifdef DEBUG
offsetNode->gtGetOp1()->AsIntCon()->gtTargetHandle = (size_t)symHnd;
#endif
}
else
{
noway_assert(!"unexpected accessType for R2R field access");
}
var_types addType = (objRefType == TYP_I_IMPL) ? TYP_I_IMPL : TYP_BYREF;
addr = gtNewOperNode(GT_ADD, addType, addr, offsetNode);
}
#endif
if (fldOffset != 0)
{
// Generate the "addr" node.
// Add the member offset to the object's address.
addr = gtNewOperNode(GT_ADD, (objRefType == TYP_I_IMPL) ? TYP_I_IMPL : TYP_BYREF, addr,
gtNewIconHandleNode(fldOffset, GTF_ICON_FIELD_OFF, fieldSeq));
}
// Now let's set the "tree" as a GT_IND tree.
tree->SetOper(GT_IND);
tree->AsOp()->gtOp1 = addr;
tree->SetIndirExceptionFlags(this);
if (addExplicitNullCheck)
{
//
// Create "comma2" node and link it to "tree".
//
GenTree* comma2;
comma2 = gtNewOperNode(GT_COMMA,
addr->TypeGet(), // The type of "comma2" node is the same as the type of "addr" node.
comma, addr);
tree->AsOp()->gtOp1 = comma2;
}
#ifdef DEBUG
if (verbose)
{
if (addExplicitNullCheck)
{
printf("After adding explicit null check:\n");
gtDispTree(tree);
}
}
#endif
}
else /* This is a static data member */
{
if (tree->gtFlags & GTF_IND_TLS_REF)
{
// Thread Local Storage static field reference
//
// Field ref is a TLS 'Thread-Local-Storage' reference
//
// Build this tree: IND(*) #
// |
// ADD(I_IMPL)
// / \.
// / CNS(fldOffset)
// /
// /
// /
// IND(I_IMPL) == [Base of this DLL's TLS]
// |
// ADD(I_IMPL)
// / \.
// / CNS(IdValue*4) or MUL
// / / \.
// IND(I_IMPL) / CNS(4)
// | /
// CNS(TLS_HDL,0x2C) IND
// |
// CNS(pIdAddr)
//
// # Denotes the orginal node
//
void** pIdAddr = nullptr;
unsigned IdValue = info.compCompHnd->getFieldThreadLocalStoreID(symHnd, (void**)&pIdAddr);
//
// If we can we access the TLS DLL index ID value directly
// then pIdAddr will be NULL and
// IdValue will be the actual TLS DLL index ID
//
GenTree* dllRef = nullptr;
if (pIdAddr == nullptr)
{
if (IdValue != 0)
{
dllRef = gtNewIconNode(IdValue * 4, TYP_I_IMPL);
}
}
else
{
dllRef = gtNewIndOfIconHandleNode(TYP_I_IMPL, (size_t)pIdAddr, GTF_ICON_CONST_PTR, true);
// Next we multiply by 4
dllRef = gtNewOperNode(GT_MUL, TYP_I_IMPL, dllRef, gtNewIconNode(4, TYP_I_IMPL));
}
#define WIN32_TLS_SLOTS (0x2C) // Offset from fs:[0] where the pointer to the slots resides
// Mark this ICON as a TLS_HDL, codegen will use FS:[cns]
GenTree* tlsRef = gtNewIconHandleNode(WIN32_TLS_SLOTS, GTF_ICON_TLS_HDL);
// Translate GTF_FLD_INITCLASS to GTF_ICON_INITCLASS
if ((tree->gtFlags & GTF_FLD_INITCLASS) != 0)
{
tree->gtFlags &= ~GTF_FLD_INITCLASS;
tlsRef->gtFlags |= GTF_ICON_INITCLASS;
}
tlsRef = gtNewOperNode(GT_IND, TYP_I_IMPL, tlsRef);
if (dllRef != nullptr)
{
/* Add the dllRef */
tlsRef = gtNewOperNode(GT_ADD, TYP_I_IMPL, tlsRef, dllRef);
}
/* indirect to have tlsRef point at the base of the DLLs Thread Local Storage */
tlsRef = gtNewOperNode(GT_IND, TYP_I_IMPL, tlsRef);
if (fldOffset != 0)
{
GenTree* fldOffsetNode = new (this, GT_CNS_INT) GenTreeIntCon(TYP_INT, fldOffset, fieldSeq);
/* Add the TLS static field offset to the address */
tlsRef = gtNewOperNode(GT_ADD, TYP_I_IMPL, tlsRef, fldOffsetNode);
}
// Final indirect to get to actual value of TLS static field
tree->SetOper(GT_IND);
tree->AsOp()->gtOp1 = tlsRef;
noway_assert(tree->gtFlags & GTF_IND_TLS_REF);
}
else
{
// Normal static field reference
//
// If we can we access the static's address directly
// then pFldAddr will be NULL and
// fldAddr will be the actual address of the static field
//
void** pFldAddr = nullptr;
void* fldAddr = info.compCompHnd->getFieldAddress(symHnd, (void**)&pFldAddr);
// We should always be able to access this static field address directly
//
assert(pFldAddr == nullptr);
// For boxed statics, this direct address will be for the box. We have already added
// the indirection for the field itself and attached the sequence, in importation.
bool isBoxedStatic = gtIsStaticFieldPtrToBoxedStruct(tree->TypeGet(), symHnd);
if (isBoxedStatic)
{
fieldSeq = FieldSeqStore::NotAField();
}
// TODO-CQ: enable this optimization for 32 bit targets.
bool isStaticReadOnlyInited = false;
#ifdef TARGET_64BIT
if (tree->TypeIs(TYP_REF) && !isBoxedStatic)
{
bool pIsSpeculative = true;
if (info.compCompHnd->getStaticFieldCurrentClass(symHnd, &pIsSpeculative) != NO_CLASS_HANDLE)
{
isStaticReadOnlyInited = !pIsSpeculative;
}
}
#endif // TARGET_64BIT
// TODO: choices made below have mostly historical reasons and
// should be unified to always use the IND(<address>) form.
CLANG_FORMAT_COMMENT_ANCHOR;
#ifdef TARGET_64BIT
bool preferIndir = true;
#else // !TARGET_64BIT
bool preferIndir = isBoxedStatic;
#endif // !TARGET_64BIT
if (preferIndir)
{
GenTreeFlags handleKind = GTF_EMPTY;
if (isBoxedStatic)
{
handleKind = GTF_ICON_STATIC_BOX_PTR;
}
else if (isStaticReadOnlyInited)
{
handleKind = GTF_ICON_CONST_PTR;
}
else
{
handleKind = GTF_ICON_STATIC_HDL;
}
GenTree* addr = gtNewIconHandleNode((size_t)fldAddr, handleKind, fieldSeq);
// Translate GTF_FLD_INITCLASS to GTF_ICON_INITCLASS, if we need to.
if (((tree->gtFlags & GTF_FLD_INITCLASS) != 0) && !isStaticReadOnlyInited)
{
tree->gtFlags &= ~GTF_FLD_INITCLASS;
addr->gtFlags |= GTF_ICON_INITCLASS;
}
tree->SetOper(GT_IND);
tree->AsOp()->gtOp1 = addr;
if (isBoxedStatic)
{
// The box for the static cannot be null, and is logically invariant, since it
// represents (a base for) the static's address.
tree->gtFlags |= (GTF_IND_INVARIANT | GTF_IND_NONFAULTING | GTF_IND_NONNULL);
}
else if (isStaticReadOnlyInited)
{
JITDUMP("Marking initialized static read-only field '%s' as invariant.\n", eeGetFieldName(symHnd));
// Static readonly field is not null at this point (see getStaticFieldCurrentClass impl).
tree->gtFlags |= (GTF_IND_INVARIANT | GTF_IND_NONFAULTING | GTF_IND_NONNULL);
}
return fgMorphSmpOp(tree);
}
else
{
// Only volatile or classinit could be set, and they map over
noway_assert((tree->gtFlags & ~(GTF_FLD_VOLATILE | GTF_FLD_INITCLASS | GTF_COMMON_MASK)) == 0);
static_assert_no_msg(GTF_FLD_VOLATILE == GTF_CLS_VAR_VOLATILE);
static_assert_no_msg(GTF_FLD_INITCLASS == GTF_CLS_VAR_INITCLASS);
tree->SetOper(GT_CLS_VAR);
tree->AsClsVar()->gtClsVarHnd = symHnd;
tree->AsClsVar()->gtFieldSeq = fieldSeq;
}
return tree;
}
}
noway_assert(tree->gtOper == GT_IND);
if (fldOffset == 0)
{
GenTree* addr = tree->AsOp()->gtOp1;
// 'addr' may be a GT_COMMA. Skip over any comma nodes
addr = addr->gtEffectiveVal();
#ifdef DEBUG
if (verbose)
{
printf("\nBefore calling fgAddFieldSeqForZeroOffset:\n");
gtDispTree(tree);
}
#endif
// We expect 'addr' to be an address at this point.
assert(addr->TypeGet() == TYP_BYREF || addr->TypeGet() == TYP_I_IMPL || addr->TypeGet() == TYP_REF);
// Since we don't make a constant zero to attach the field sequence to, associate it with the "addr" node.
fgAddFieldSeqForZeroOffset(addr, fieldSeq);
}
// Pass down the current mac; if non null we are computing an address
GenTree* result = fgMorphSmpOp(tree, mac);
#ifdef DEBUG
if (verbose)
{
printf("\nFinal value of Compiler::fgMorphField after calling fgMorphSmpOp:\n");
gtDispTree(result);
}
#endif
return result;
}
//------------------------------------------------------------------------------
// fgMorphCallInline: attempt to inline a call
//
// Arguments:
// call - call expression to inline, inline candidate
// inlineResult - result tracking and reporting
//
// Notes:
// Attempts to inline the call.
//
// If successful, callee's IR is inserted in place of the call, and
// is marked with an InlineContext.
//
// If unsuccessful, the transformations done in anticipation of a
// possible inline are undone, and the candidate flag on the call
// is cleared.
void Compiler::fgMorphCallInline(GenTreeCall* call, InlineResult* inlineResult)
{
bool inliningFailed = false;
// Is this call an inline candidate?
if (call->IsInlineCandidate())
{
InlineContext* createdContext = nullptr;
// Attempt the inline
fgMorphCallInlineHelper(call, inlineResult, &createdContext);
// We should have made up our minds one way or another....
assert(inlineResult->IsDecided());
// If we failed to inline, we have a bit of work to do to cleanup
if (inlineResult->IsFailure())
{
if (createdContext != nullptr)
{
// We created a context before we got to the failure, so mark
// it as failed in the tree.
createdContext->SetFailed(inlineResult);
}
else
{
#ifdef DEBUG
// In debug we always put all inline attempts into the inline tree.
InlineContext* ctx =
m_inlineStrategy->NewContext(call->gtInlineCandidateInfo->inlinersContext, fgMorphStmt, call);
ctx->SetFailed(inlineResult);
#endif
}
inliningFailed = true;
// Clear the Inline Candidate flag so we can ensure later we tried
// inlining all candidates.
//
call->gtFlags &= ~GTF_CALL_INLINE_CANDIDATE;
}
}
else
{
// This wasn't an inline candidate. So it must be a GDV candidate.
assert(call->IsGuardedDevirtualizationCandidate());
// We already know we can't inline this call, so don't even bother to try.
inliningFailed = true;
}
// If we failed to inline (or didn't even try), do some cleanup.
if (inliningFailed)
{
if (call->gtReturnType != TYP_VOID)
{
JITDUMP("Inlining [%06u] failed, so bashing " FMT_STMT " to NOP\n", dspTreeID(call), fgMorphStmt->GetID());
// Detach the GT_CALL tree from the original statement by
// hanging a "nothing" node to it. Later the "nothing" node will be removed
// and the original GT_CALL tree will be picked up by the GT_RET_EXPR node.
noway_assert(fgMorphStmt->GetRootNode() == call);
fgMorphStmt->SetRootNode(gtNewNothingNode());
}
}
}
//------------------------------------------------------------------------------
// fgMorphCallInlineHelper: Helper to attempt to inline a call
//
// Arguments:
// call - call expression to inline, inline candidate
// result - result to set to success or failure
// createdContext - The context that was created if the inline attempt got to the inliner.
//
// Notes:
// Attempts to inline the call.
//
// If successful, callee's IR is inserted in place of the call, and
// is marked with an InlineContext.
//
// If unsuccessful, the transformations done in anticipation of a
// possible inline are undone, and the candidate flag on the call
// is cleared.
//
// If a context was created because we got to the importer then it is output by this function.
// If the inline succeeded, this context will already be marked as successful. If it failed and
// a context is returned, then it will not have been marked as success or failed.
void Compiler::fgMorphCallInlineHelper(GenTreeCall* call, InlineResult* result, InlineContext** createdContext)
{
// Don't expect any surprises here.
assert(result->IsCandidate());
if (lvaCount >= MAX_LV_NUM_COUNT_FOR_INLINING)
{
// For now, attributing this to call site, though it's really
// more of a budget issue (lvaCount currently includes all
// caller and prospective callee locals). We still might be
// able to inline other callees into this caller, or inline
// this callee in other callers.
result->NoteFatal(InlineObservation::CALLSITE_TOO_MANY_LOCALS);
return;
}
if (call->IsVirtual())
{
result->NoteFatal(InlineObservation::CALLSITE_IS_VIRTUAL);
return;
}
// Re-check this because guarded devirtualization may allow these through.
if (gtIsRecursiveCall(call) && call->IsImplicitTailCall())
{
result->NoteFatal(InlineObservation::CALLSITE_IMPLICIT_REC_TAIL_CALL);
return;
}
// impMarkInlineCandidate() is expected not to mark tail prefixed calls
// and recursive tail calls as inline candidates.
noway_assert(!call->IsTailPrefixedCall());
noway_assert(!call->IsImplicitTailCall() || !gtIsRecursiveCall(call));
//
// Calling inlinee's compiler to inline the method.
//
unsigned startVars = lvaCount;
#ifdef DEBUG
if (verbose)
{
printf("Expanding INLINE_CANDIDATE in statement ");
printStmtID(fgMorphStmt);
printf(" in " FMT_BB ":\n", compCurBB->bbNum);
gtDispStmt(fgMorphStmt);
if (call->IsImplicitTailCall())
{
printf("Note: candidate is implicit tail call\n");
}
}
#endif
impInlineRoot()->m_inlineStrategy->NoteAttempt(result);
//
// Invoke the compiler to inline the call.
//
fgInvokeInlineeCompiler(call, result, createdContext);
if (result->IsFailure())
{
// Undo some changes made in anticipation of inlining...
// Zero out the used locals
memset(lvaTable + startVars, 0, (lvaCount - startVars) * sizeof(*lvaTable));
for (unsigned i = startVars; i < lvaCount; i++)
{
new (&lvaTable[i], jitstd::placement_t()) LclVarDsc(); // call the constructor.
}
lvaCount = startVars;
#ifdef DEBUG
if (verbose)
{
// printf("Inlining failed. Restore lvaCount to %d.\n", lvaCount);
}
#endif
return;
}
#ifdef DEBUG
if (verbose)
{
// printf("After inlining lvaCount=%d.\n", lvaCount);
}
#endif
}
//------------------------------------------------------------------------
// fgCanFastTailCall: Check to see if this tail call can be optimized as epilog+jmp.
//
// Arguments:
// callee - The callee to check
// failReason - If this method returns false, the reason why. Can be nullptr.
//
// Return Value:
// Returns true or false based on whether the callee can be fastTailCalled
//
// Notes:
// This function is target specific and each target will make the fastTailCall
// decision differently. See the notes below.
//
// This function calls fgInitArgInfo() to initialize the arg info table, which
// is used to analyze the argument. This function can alter the call arguments
// by adding argument IR nodes for non-standard arguments.
//
// Windows Amd64:
// A fast tail call can be made whenever the number of callee arguments
// is less than or equal to the number of caller arguments, or we have four
// or fewer callee arguments. This is because, on Windows AMD64, each
// argument uses exactly one register or one 8-byte stack slot. Thus, we only
// need to count arguments, and not be concerned with the size of each
// incoming or outgoing argument.
//
// Can fast tail call examples (amd64 Windows):
//
// -- Callee will have all register arguments --
// caller(int, int, int, int)
// callee(int, int, float, int)
//
// -- Callee requires stack space that is equal or less than the caller --
// caller(struct, struct, struct, struct, struct, struct)
// callee(int, int, int, int, int, int)
//
// -- Callee requires stack space that is less than the caller --
// caller(struct, double, struct, float, struct, struct)
// callee(int, int, int, int, int)
//
// -- Callee will have all register arguments --
// caller(int)
// callee(int, int, int, int)
//
// Cannot fast tail call examples (amd64 Windows):
//
// -- Callee requires stack space that is larger than the caller --
// caller(struct, double, struct, float, struct, struct)
// callee(int, int, int, int, int, double, double, double)
//
// -- Callee has a byref struct argument --
// caller(int, int, int)
// callee(struct(size 3 bytes))
//
// Unix Amd64 && Arm64:
// A fastTailCall decision can be made whenever the callee's stack space is
// less than or equal to the caller's stack space. There are many permutations
// of when the caller and callee have different stack sizes if there are
// structs being passed to either the caller or callee.
//
// Exceptions:
// If the callee has a 9 to 16 byte struct argument and the callee has
// stack arguments, the decision will be to not fast tail call. This is
// because before fgMorphArgs is done, the struct is unknown whether it
// will be placed on the stack or enregistered. Therefore, the conservative
// decision of do not fast tail call is taken. This limitations should be
// removed if/when fgMorphArgs no longer depends on fgCanFastTailCall.
//
// Can fast tail call examples (amd64 Unix):
//
// -- Callee will have all register arguments --
// caller(int, int, int, int)
// callee(int, int, float, int)
//
// -- Callee requires stack space that is equal to the caller --
// caller({ long, long }, { int, int }, { int }, { int }, { int }, { int }) -- 6 int register arguments, 16 byte
// stack
// space
// callee(int, int, int, int, int, int, int, int) -- 6 int register arguments, 16 byte stack space
//
// -- Callee requires stack space that is less than the caller --
// caller({ long, long }, int, { long, long }, int, { long, long }, { long, long }) 6 int register arguments, 32 byte
// stack
// space
// callee(int, int, int, int, int, int, { long, long } ) // 6 int register arguments, 16 byte stack space
//
// -- Callee will have all register arguments --
// caller(int)
// callee(int, int, int, int)
//
// Cannot fast tail call examples (amd64 Unix):
//
// -- Callee requires stack space that is larger than the caller --
// caller(float, float, float, float, float, float, float, float) -- 8 float register arguments
// callee(int, int, int, int, int, int, int, int) -- 6 int register arguments, 16 byte stack space
//
// -- Callee has structs which cannot be enregistered (Implementation Limitation) --
// caller(float, float, float, float, float, float, float, float, { double, double, double }) -- 8 float register
// arguments, 24 byte stack space
// callee({ double, double, double }) -- 24 bytes stack space
//
// -- Callee requires stack space and has a struct argument >8 bytes and <16 bytes (Implementation Limitation) --
// caller(int, int, int, int, int, int, { double, double, double }) -- 6 int register arguments, 24 byte stack space
// callee(int, int, int, int, int, int, { int, int }) -- 6 int registers, 16 byte stack space
//
// -- Caller requires stack space and nCalleeArgs > nCallerArgs (Bug) --
// caller({ double, double, double, double, double, double }) // 48 byte stack
// callee(int, int) -- 2 int registers
bool Compiler::fgCanFastTailCall(GenTreeCall* callee, const char** failReason)
{
#if FEATURE_FASTTAILCALL
// To reach here means that the return types of the caller and callee are tail call compatible.
// In the case of structs that can be returned in a register, compRetNativeType is set to the actual return type.
CLANG_FORMAT_COMMENT_ANCHOR;
#ifdef DEBUG
if (callee->IsTailPrefixedCall())
{
var_types retType = info.compRetType;
assert(impTailCallRetTypeCompatible(false, retType, info.compMethodInfo->args.retTypeClass, info.compCallConv,
(var_types)callee->gtReturnType, callee->gtRetClsHnd,
callee->GetUnmanagedCallConv()));
}
#endif
assert(!callee->AreArgsComplete());
fgInitArgInfo(callee);
fgArgInfo* argInfo = callee->fgArgInfo;
unsigned calleeArgStackSize = 0;
unsigned callerArgStackSize = info.compArgStackSize;
auto reportFastTailCallDecision = [&](const char* thisFailReason) {
if (failReason != nullptr)
{
*failReason = thisFailReason;
}
#ifdef DEBUG
if ((JitConfig.JitReportFastTailCallDecisions()) == 1)
{
if (callee->gtCallType != CT_INDIRECT)
{
const char* methodName;
methodName = eeGetMethodFullName(callee->gtCallMethHnd);
printf("[Fast tailcall decision]: Caller: %s\n[Fast tailcall decision]: Callee: %s -- Decision: ",
info.compFullName, methodName);
}
else
{
printf("[Fast tailcall decision]: Caller: %s\n[Fast tailcall decision]: Callee: IndirectCall -- "
"Decision: ",
info.compFullName);
}
if (thisFailReason == nullptr)
{
printf("Will fast tailcall");
}
else
{
printf("Will not fast tailcall (%s)", thisFailReason);
}
printf(" (CallerArgStackSize: %d, CalleeArgStackSize: %d)\n\n", callerArgStackSize, calleeArgStackSize);
}
else
{
if (thisFailReason == nullptr)
{
JITDUMP("[Fast tailcall decision]: Will fast tailcall\n");
}
else
{
JITDUMP("[Fast tailcall decision]: Will not fast tailcall (%s)\n", thisFailReason);
}
}
#endif // DEBUG
};
for (unsigned index = 0; index < argInfo->ArgCount(); ++index)
{
fgArgTabEntry* arg = argInfo->GetArgEntry(index, false);
calleeArgStackSize = roundUp(calleeArgStackSize, arg->GetByteAlignment());
calleeArgStackSize += arg->GetStackByteSize();
#ifdef TARGET_ARM
if (arg->IsSplit())
{
reportFastTailCallDecision("Splitted argument in callee is not supported on ARM32");
return false;
}
#endif // TARGET_ARM
}
calleeArgStackSize = GetOutgoingArgByteSize(calleeArgStackSize);
#ifdef TARGET_ARM
if (compHasSplitParam)
{
reportFastTailCallDecision("Splitted argument in caller is not supported on ARM32");
return false;
}
if (compIsProfilerHookNeeded())
{
reportFastTailCallDecision("Profiler is not supported on ARM32");
return false;
}
// On ARM32 we have only one non-parameter volatile register and we need it
// for the GS security cookie check. We could technically still tailcall
// when the callee does not use all argument registers, but we keep the
// code simple here.
if (getNeedsGSSecurityCookie())
{
reportFastTailCallDecision("Not enough registers available due to the GS security cookie check");
return false;
}
#endif
if (!opts.compFastTailCalls)
{
reportFastTailCallDecision("Configuration doesn't allow fast tail calls");
return false;
}
if (callee->IsStressTailCall())
{
reportFastTailCallDecision("Fast tail calls are not performed under tail call stress");
return false;
}
#ifdef TARGET_ARM
if (callee->IsR2RRelativeIndir() || callee->HasNonStandardAddedArgs(this))
{
reportFastTailCallDecision(
"Method with non-standard args passed in callee saved register cannot be tail called");
return false;
}
#endif
// Note on vararg methods:
// If the caller is vararg method, we don't know the number of arguments passed by caller's caller.
// But we can be sure that in-coming arg area of vararg caller would be sufficient to hold its
// fixed args. Therefore, we can allow a vararg method to fast tail call other methods as long as
// out-going area required for callee is bounded by caller's fixed argument space.
//
// Note that callee being a vararg method is not a problem since we can account the params being passed.
//
// We will currently decide to not fast tail call on Windows armarch if the caller or callee is a vararg
// method. This is due to the ABI differences for native vararg methods for these platforms. There is
// work required to shuffle arguments to the correct locations.
CLANG_FORMAT_COMMENT_ANCHOR;
if (TargetOS::IsWindows && TargetArchitecture::IsArmArch && (info.compIsVarArgs || callee->IsVarargs()))
{
reportFastTailCallDecision("Fast tail calls with varargs not supported on Windows ARM/ARM64");
return false;
}
if (compLocallocUsed)
{
reportFastTailCallDecision("Localloc used");
return false;
}
#ifdef TARGET_AMD64
// Needed for Jit64 compat.
// In future, enabling fast tail calls from methods that need GS cookie
// check would require codegen side work to emit GS cookie check before a
// tail call.
if (getNeedsGSSecurityCookie())
{
reportFastTailCallDecision("GS Security cookie check required");
return false;
}
#endif
// If the NextCallReturnAddress intrinsic is used we should do normal calls.
if (info.compHasNextCallRetAddr)
{
reportFastTailCallDecision("Uses NextCallReturnAddress intrinsic");
return false;
}
if (callee->HasRetBufArg()) // RetBuf
{
// If callee has RetBuf param, caller too must have it.
// Otherwise go the slow route.
if (info.compRetBuffArg == BAD_VAR_NUM)
{
reportFastTailCallDecision("Callee has RetBuf but caller does not.");
return false;
}
}
// For a fast tail call the caller will use its incoming arg stack space to place
// arguments, so if the callee requires more arg stack space than is available here
// the fast tail call cannot be performed. This is common to all platforms.
// Note that the GC'ness of on stack args need not match since the arg setup area is marked
// as non-interruptible for fast tail calls.
if (calleeArgStackSize > callerArgStackSize)
{
reportFastTailCallDecision("Not enough incoming arg space");
return false;
}
// For Windows some struct parameters are copied on the local frame
// and then passed by reference. We cannot fast tail call in these situation
// as we need to keep our frame around.
if (fgCallHasMustCopyByrefParameter(callee))
{
reportFastTailCallDecision("Callee has a byref parameter");
return false;
}
reportFastTailCallDecision(nullptr);
return true;
#else // FEATURE_FASTTAILCALL
if (failReason)
*failReason = "Fast tailcalls are not supported on this platform";
return false;
#endif
}
//------------------------------------------------------------------------
// fgCallHasMustCopyByrefParameter: Check to see if this call has a byref parameter that
// requires a struct copy in the caller.
//
// Arguments:
// callee - The callee to check
//
// Return Value:
// Returns true or false based on whether this call has a byref parameter that
// requires a struct copy in the caller.
#if FEATURE_FASTTAILCALL
bool Compiler::fgCallHasMustCopyByrefParameter(GenTreeCall* callee)
{
fgArgInfo* argInfo = callee->fgArgInfo;
bool hasMustCopyByrefParameter = false;
for (unsigned index = 0; index < argInfo->ArgCount(); ++index)
{
fgArgTabEntry* arg = argInfo->GetArgEntry(index, false);
if (arg->isStruct)
{
if (arg->passedByRef)
{
// Generally a byref arg will block tail calling, as we have to
// make a local copy of the struct for the callee.
hasMustCopyByrefParameter = true;
// If we're optimizing, we may be able to pass our caller's byref to our callee,
// and so still be able to avoid a struct copy.
if (opts.OptimizationEnabled())
{
// First, see if this arg is an implicit byref param.
GenTreeLclVar* const lcl = arg->GetNode()->IsImplicitByrefParameterValue(this);
if (lcl != nullptr)
{
// Yes, the arg is an implicit byref param.
const unsigned lclNum = lcl->GetLclNum();
LclVarDsc* const varDsc = lvaGetDesc(lcl);
// The param must not be promoted; if we've promoted, then the arg will be
// a local struct assembled from the promoted fields.
if (varDsc->lvPromoted)
{
JITDUMP("Arg [%06u] is promoted implicit byref V%02u, so no tail call\n",
dspTreeID(arg->GetNode()), lclNum);
}
else
{
JITDUMP("Arg [%06u] is unpromoted implicit byref V%02u, seeing if we can still tail call\n",
dspTreeID(arg->GetNode()), lclNum);
// We have to worry about introducing aliases if we bypass copying
// the struct at the call. We'll do some limited analysis to see if we
// can rule this out.
const unsigned argLimit = 6;
// If this is the only appearance of the byref in the method, then
// aliasing is not possible.
//
// If no other call arg refers to this byref, and no other arg is
// a pointer which could refer to this byref, we can optimize.
//
// We only check this for calls with small numbers of arguments,
// as the analysis cost will be quadratic.
//
const unsigned totalAppearances = varDsc->lvRefCnt(RCS_EARLY);
const unsigned callAppearances = (unsigned)varDsc->lvRefCntWtd(RCS_EARLY);
assert(totalAppearances >= callAppearances);
if (totalAppearances == 1)
{
JITDUMP("... yes, arg is the only appearance of V%02u\n", lclNum);
hasMustCopyByrefParameter = false;
}
else if (totalAppearances > callAppearances)
{
// lvRefCntWtd tracks the number of appearances of the arg at call sites.
// If this number doesn't match the regular ref count, there is
// a non-call appearance, and we must be conservative.
//
JITDUMP("... no, arg has %u non-call appearance(s)\n",
totalAppearances - callAppearances);
}
else if (argInfo->ArgCount() <= argLimit)
{
JITDUMP("... all %u appearance(s) are as implicit byref args to calls.\n"
"... Running alias analysis on this call's args\n",
totalAppearances);
GenTree* interferingArg = nullptr;
for (unsigned index2 = 0; index2 < argInfo->ArgCount(); ++index2)
{
if (index2 == index)
{
continue;
}
fgArgTabEntry* const arg2 = argInfo->GetArgEntry(index2, false);
JITDUMP("... checking other arg [%06u]...\n", dspTreeID(arg2->GetNode()));
DISPTREE(arg2->GetNode());
// Do we pass 'lcl' more than once to the callee?
if (arg2->isStruct && arg2->passedByRef)
{
GenTreeLclVarCommon* const lcl2 =
arg2->GetNode()->IsImplicitByrefParameterValue(this);
if ((lcl2 != nullptr) && (lclNum == lcl2->GetLclNum()))
{
// not copying would introduce aliased implicit byref structs
// in the callee ... we can't optimize.
interferingArg = arg2->GetNode();
break;
}
else
{
JITDUMP("... arg refers to different implicit byref V%02u\n",
lcl2->GetLclNum());
continue;
}
}
// Do we pass a byref pointer which might point within 'lcl'?
//
// We can assume the 'lcl' is unaliased on entry to the
// method, so the only way we can have an aliasing byref pointer at
// the call is if 'lcl' is address taken/exposed in the method.
//
// Note even though 'lcl' is not promoted, we are in the middle
// of the promote->rewrite->undo->(morph)->demote cycle, and so
// might see references to promoted fields of 'lcl' that haven't yet
// been demoted (see fgMarkDemotedImplicitByRefArgs).
//
// So, we also need to scan all 'lcl's fields, if any, to see if they
// are exposed.
//
// When looking for aliases from other args, we check for both TYP_BYREF
// and TYP_I_IMPL typed args here. Conceptually anything that points into
// an implicit byref parameter should be TYP_BYREF, as these parameters could
// refer to boxed heap locations (say if the method is invoked by reflection)
// but there are some stack only structs (like typed references) where
// the importer/runtime code uses TYP_I_IMPL, and fgInitArgInfo will
// transiently retype all simple address-of implicit parameter args as
// TYP_I_IMPL.
//
if ((arg2->argType == TYP_BYREF) || (arg2->argType == TYP_I_IMPL))
{
JITDUMP("...arg is a byref, must run an alias check\n");
bool checkExposure = true;
bool hasExposure = false;
// See if there is any way arg could refer to a parameter struct.
GenTree* arg2Node = arg2->GetNode();
if (arg2Node->OperIs(GT_LCL_VAR))
{
GenTreeLclVarCommon* arg2LclNode = arg2Node->AsLclVarCommon();
assert(arg2LclNode->GetLclNum() != lclNum);
LclVarDsc* arg2Dsc = lvaGetDesc(arg2LclNode);
// Other params can't alias implicit byref params
if (arg2Dsc->lvIsParam)
{
checkExposure = false;
}
}
// Because we're checking TYP_I_IMPL above, at least
// screen out obvious things that can't cause aliases.
else if (arg2Node->IsIntegralConst())
{
checkExposure = false;
}
if (checkExposure)
{
JITDUMP(
"... not sure where byref arg points, checking if V%02u is exposed\n",
lclNum);
// arg2 might alias arg, see if we've exposed
// arg somewhere in the method.
if (varDsc->lvHasLdAddrOp || varDsc->IsAddressExposed())
{
// Struct as a whole is exposed, can't optimize
JITDUMP("... V%02u is exposed\n", lclNum);
hasExposure = true;
}
else if (varDsc->lvFieldLclStart != 0)
{
// This is the promoted/undone struct case.
//
// The field start is actually the local number of the promoted local,
// use it to enumerate the fields.
const unsigned promotedLcl = varDsc->lvFieldLclStart;
LclVarDsc* const promotedVarDsc = lvaGetDesc(promotedLcl);
JITDUMP("...promoted-unpromoted case -- also checking exposure of "
"fields of V%02u\n",
promotedLcl);
for (unsigned fieldIndex = 0; fieldIndex < promotedVarDsc->lvFieldCnt;
fieldIndex++)
{
LclVarDsc* fieldDsc =
lvaGetDesc(promotedVarDsc->lvFieldLclStart + fieldIndex);
if (fieldDsc->lvHasLdAddrOp || fieldDsc->IsAddressExposed())
{
// Promoted and not yet demoted field is exposed, can't optimize
JITDUMP("... field V%02u is exposed\n",
promotedVarDsc->lvFieldLclStart + fieldIndex);
hasExposure = true;
break;
}
}
}
}
if (hasExposure)
{
interferingArg = arg2->GetNode();
break;
}
}
else
{
JITDUMP("...arg is not a byref or implicit byref (%s)\n",
varTypeName(arg2->GetNode()->TypeGet()));
}
}
if (interferingArg != nullptr)
{
JITDUMP("... no, arg [%06u] may alias with V%02u\n", dspTreeID(interferingArg),
lclNum);
}
else
{
JITDUMP("... yes, no other arg in call can alias V%02u\n", lclNum);
hasMustCopyByrefParameter = false;
}
}
else
{
JITDUMP(" ... no, call has %u > %u args, alias analysis deemed too costly\n",
argInfo->ArgCount(), argLimit);
}
}
}
}
if (hasMustCopyByrefParameter)
{
// This arg requires a struct copy. No reason to keep scanning the remaining args.
break;
}
}
}
}
return hasMustCopyByrefParameter;
}
#endif
//------------------------------------------------------------------------
// fgMorphPotentialTailCall: Attempt to morph a call that the importer has
// identified as a potential tailcall to an actual tailcall and return the
// placeholder node to use in this case.
//
// Arguments:
// call - The call to morph.
//
// Return Value:
// Returns a node to use if the call was morphed into a tailcall. If this
// function returns a node the call is done being morphed and the new node
// should be used. Otherwise the call will have been demoted to a regular call
// and should go through normal morph.
//
// Notes:
// This is called only for calls that the importer has already identified as
// potential tailcalls. It will do profitability and legality checks and
// classify which kind of tailcall we are able to (or should) do, along with
// modifying the trees to perform that kind of tailcall.
//
GenTree* Compiler::fgMorphPotentialTailCall(GenTreeCall* call)
{
// It should either be an explicit (i.e. tail prefixed) or an implicit tail call
assert(call->IsTailPrefixedCall() ^ call->IsImplicitTailCall());
// It cannot be an inline candidate
assert(!call->IsInlineCandidate());
auto failTailCall = [&](const char* reason, unsigned lclNum = BAD_VAR_NUM) {
#ifdef DEBUG
if (verbose)
{
printf("\nRejecting tail call in morph for call ");
printTreeID(call);
printf(": %s", reason);
if (lclNum != BAD_VAR_NUM)
{
printf(" V%02u", lclNum);
}
printf("\n");
}
#endif
// for non user funcs, we have no handles to report
info.compCompHnd->reportTailCallDecision(nullptr,
(call->gtCallType == CT_USER_FUNC) ? call->gtCallMethHnd : nullptr,
call->IsTailPrefixedCall(), TAILCALL_FAIL, reason);
// We have checked the candidate so demote.
call->gtCallMoreFlags &= ~GTF_CALL_M_EXPLICIT_TAILCALL;
#if FEATURE_TAILCALL_OPT
call->gtCallMoreFlags &= ~GTF_CALL_M_IMPLICIT_TAILCALL;
#endif
};
if (call->gtCallMoreFlags & GTF_CALL_M_SPECIAL_INTRINSIC)
{
failTailCall("Might turn into an intrinsic");
return nullptr;
}
#ifdef TARGET_ARM
if (call->gtCallMoreFlags & GTF_CALL_M_WRAPPER_DELEGATE_INV)
{
failTailCall("Non-standard calling convention");
return nullptr;
}
#endif
if (call->IsNoReturn() && !call->IsTailPrefixedCall())
{
// Such tail calls always throw an exception and we won't be able to see current
// Caller() in the stacktrace.
failTailCall("Never returns");
return nullptr;
}
#ifdef DEBUG
if (opts.compGcChecks && (info.compRetType == TYP_REF))
{
failTailCall("COMPlus_JitGCChecks or stress might have interposed a call to CORINFO_HELP_CHECK_OBJ, "
"invalidating tailcall opportunity");
return nullptr;
}
#endif
// We have to ensure to pass the incoming retValBuf as the
// outgoing one. Using a temp will not do as this function will
// not regain control to do the copy. This can happen when inlining
// a tailcall which also has a potential tailcall in it: the IL looks
// like we can do a tailcall, but the trees generated use a temp for the inlinee's
// result. TODO-CQ: Fix this.
if (info.compRetBuffArg != BAD_VAR_NUM)
{
noway_assert(call->TypeGet() == TYP_VOID);
GenTree* retValBuf = call->gtCallArgs->GetNode();
if (retValBuf->gtOper != GT_LCL_VAR || retValBuf->AsLclVarCommon()->GetLclNum() != info.compRetBuffArg)
{
failTailCall("Need to copy return buffer");
return nullptr;
}
}
// We are still not sure whether it can be a tail call. Because, when converting
// a call to an implicit tail call, we must check that there are no locals with
// their address taken. If this is the case, we have to assume that the address
// has been leaked and the current stack frame must live until after the final
// call.
// Verify that none of vars has lvHasLdAddrOp or IsAddressExposed() bit set. Note
// that lvHasLdAddrOp is much more conservative. We cannot just base it on
// IsAddressExposed() alone since it is not guaranteed to be set on all VarDscs
// during morph stage. The reason for also checking IsAddressExposed() is that in case
// of vararg methods user args are marked as addr exposed but not lvHasLdAddrOp.
// The combination of lvHasLdAddrOp and IsAddressExposed() though conservative allows us
// never to be incorrect.
//
// TODO-Throughput: have a compiler level flag to indicate whether method has vars whose
// address is taken. Such a flag could be set whenever lvHasLdAddrOp or IsAddressExposed()
// is set. This avoids the need for iterating through all lcl vars of the current
// method. Right now throughout the code base we are not consistently using 'set'
// method to set lvHasLdAddrOp and IsAddressExposed() flags.
bool isImplicitOrStressTailCall = call->IsImplicitTailCall() || call->IsStressTailCall();
if (isImplicitOrStressTailCall && compLocallocUsed)
{
failTailCall("Localloc used");
return nullptr;
}
bool hasStructParam = false;
for (unsigned varNum = 0; varNum < lvaCount; varNum++)
{
LclVarDsc* varDsc = lvaGetDesc(varNum);
// If the method is marked as an explicit tail call we will skip the
// following three hazard checks.
// We still must check for any struct parameters and set 'hasStructParam'
// so that we won't transform the recursive tail call into a loop.
//
if (isImplicitOrStressTailCall)
{
if (varDsc->lvHasLdAddrOp && !lvaIsImplicitByRefLocal(varNum))
{
failTailCall("Local address taken", varNum);
return nullptr;
}
if (varDsc->IsAddressExposed())
{
if (lvaIsImplicitByRefLocal(varNum))
{
// The address of the implicit-byref is a non-address use of the pointer parameter.
}
else if (varDsc->lvIsStructField && lvaIsImplicitByRefLocal(varDsc->lvParentLcl))
{
// The address of the implicit-byref's field is likewise a non-address use of the pointer
// parameter.
}
else if (varDsc->lvPromoted && (lvaTable[varDsc->lvFieldLclStart].lvParentLcl != varNum))
{
// This temp was used for struct promotion bookkeeping. It will not be used, and will have
// its ref count and address-taken flag reset in fgMarkDemotedImplicitByRefArgs.
assert(lvaIsImplicitByRefLocal(lvaTable[varDsc->lvFieldLclStart].lvParentLcl));
assert(fgGlobalMorph);
}
else
{
failTailCall("Local address taken", varNum);
return nullptr;
}
}
if (varDsc->lvPromoted && varDsc->lvIsParam && !lvaIsImplicitByRefLocal(varNum))
{
failTailCall("Has Struct Promoted Param", varNum);
return nullptr;
}
if (varDsc->lvPinned)
{
// A tail call removes the method from the stack, which means the pinning
// goes away for the callee. We can't allow that.
failTailCall("Has Pinned Vars", varNum);
return nullptr;
}
}
if (varTypeIsStruct(varDsc->TypeGet()) && varDsc->lvIsParam)
{
hasStructParam = true;
// This prevents transforming a recursive tail call into a loop
// but doesn't prevent tail call optimization so we need to
// look at the rest of parameters.
}
}
if (!fgCheckStmtAfterTailCall())
{
failTailCall("Unexpected statements after the tail call");
return nullptr;
}
const char* failReason = nullptr;
bool canFastTailCall = fgCanFastTailCall(call, &failReason);
CORINFO_TAILCALL_HELPERS tailCallHelpers;
bool tailCallViaJitHelper = false;
if (!canFastTailCall)
{
if (call->IsImplicitTailCall())
{
// Implicit or opportunistic tail calls are always dispatched via fast tail call
// mechanism and never via tail call helper for perf.
failTailCall(failReason);
return nullptr;
}
assert(call->IsTailPrefixedCall());
assert(call->tailCallInfo != nullptr);
// We do not currently handle non-standard args except for VSD stubs.
if (!call->IsVirtualStub() && call->HasNonStandardAddedArgs(this))
{
failTailCall(
"Method with non-standard args passed in callee trash register cannot be tail called via helper");
return nullptr;
}
// On x86 we have a faster mechanism than the general one which we use
// in almost all cases. See fgCanTailCallViaJitHelper for more information.
if (fgCanTailCallViaJitHelper())
{
tailCallViaJitHelper = true;
}
else
{
// Make sure we can get the helpers. We do this last as the runtime
// will likely be required to generate these.
CORINFO_RESOLVED_TOKEN* token = nullptr;
CORINFO_SIG_INFO* sig = call->tailCallInfo->GetSig();
unsigned flags = 0;
if (!call->tailCallInfo->IsCalli())
{
token = call->tailCallInfo->GetToken();
if (call->tailCallInfo->IsCallvirt())
{
flags |= CORINFO_TAILCALL_IS_CALLVIRT;
}
}
if (call->gtCallThisArg != nullptr)
{
var_types thisArgType = call->gtCallThisArg->GetNode()->TypeGet();
if (thisArgType != TYP_REF)
{
flags |= CORINFO_TAILCALL_THIS_ARG_IS_BYREF;
}
}
if (!info.compCompHnd->getTailCallHelpers(token, sig, (CORINFO_GET_TAILCALL_HELPERS_FLAGS)flags,
&tailCallHelpers))
{
failTailCall("Tail call help not available");
return nullptr;
}
}
}
// Check if we can make the tailcall a loop.
bool fastTailCallToLoop = false;
#if FEATURE_TAILCALL_OPT
// TODO-CQ: enable the transformation when the method has a struct parameter that can be passed in a register
// or return type is a struct that can be passed in a register.
//
// TODO-CQ: if the method being compiled requires generic context reported in gc-info (either through
// hidden generic context param or through keep alive thisptr), then while transforming a recursive
// call to such a method requires that the generic context stored on stack slot be updated. Right now,
// fgMorphRecursiveFastTailCallIntoLoop() is not handling update of generic context while transforming
// a recursive call into a loop. Another option is to modify gtIsRecursiveCall() to check that the
// generic type parameters of both caller and callee generic method are the same.
if (opts.compTailCallLoopOpt && canFastTailCall && gtIsRecursiveCall(call) && !lvaReportParamTypeArg() &&
!lvaKeepAliveAndReportThis() && !call->IsVirtual() && !hasStructParam && !varTypeIsStruct(call->TypeGet()))
{
fastTailCallToLoop = true;
}
#endif
// Ok -- now we are committed to performing a tailcall. Report the decision.
CorInfoTailCall tailCallResult;
if (fastTailCallToLoop)
{
tailCallResult = TAILCALL_RECURSIVE;
}
else if (canFastTailCall)
{
tailCallResult = TAILCALL_OPTIMIZED;
}
else
{
tailCallResult = TAILCALL_HELPER;
}
info.compCompHnd->reportTailCallDecision(nullptr,
(call->gtCallType == CT_USER_FUNC) ? call->gtCallMethHnd : nullptr,
call->IsTailPrefixedCall(), tailCallResult, nullptr);
// Are we currently planning to expand the gtControlExpr as an early virtual call target?
//
if (call->IsExpandedEarly() && call->IsVirtualVtable())
{
// It isn't alway profitable to expand a virtual call early
//
// We alway expand the TAILCALL_HELPER type late.
// And we exapnd late when we have an optimized tail call
// and the this pointer needs to be evaluated into a temp.
//
if (tailCallResult == TAILCALL_HELPER)
{
// We will alway expand this late in lower instead.
// (see LowerTailCallViaJitHelper as it needs some work
// for us to be able to expand this earlier in morph)
//
call->ClearExpandedEarly();
}
else if ((tailCallResult == TAILCALL_OPTIMIZED) &&
((call->gtCallThisArg->GetNode()->gtFlags & GTF_SIDE_EFFECT) != 0))
{
// We generate better code when we expand this late in lower instead.
//
call->ClearExpandedEarly();
}
}
// Now actually morph the call.
compTailCallUsed = true;
// This will prevent inlining this call.
call->gtCallMoreFlags |= GTF_CALL_M_TAILCALL;
if (tailCallViaJitHelper)
{
call->gtCallMoreFlags |= GTF_CALL_M_TAILCALL_VIA_JIT_HELPER;
}
#if FEATURE_TAILCALL_OPT
if (fastTailCallToLoop)
{
call->gtCallMoreFlags |= GTF_CALL_M_TAILCALL_TO_LOOP;
}
#endif
// Mark that this is no longer a pending tailcall. We need to do this before
// we call fgMorphCall again (which happens in the fast tailcall case) to
// avoid recursing back into this method.
call->gtCallMoreFlags &= ~GTF_CALL_M_EXPLICIT_TAILCALL;
#if FEATURE_TAILCALL_OPT
call->gtCallMoreFlags &= ~GTF_CALL_M_IMPLICIT_TAILCALL;
#endif
#ifdef DEBUG
if (verbose)
{
printf("\nGTF_CALL_M_TAILCALL bit set for call ");
printTreeID(call);
printf("\n");
if (fastTailCallToLoop)
{
printf("\nGTF_CALL_M_TAILCALL_TO_LOOP bit set for call ");
printTreeID(call);
printf("\n");
}
}
#endif
// For R2R we might need a different entry point for this call if we are doing a tailcall.
// The reason is that the normal delay load helper uses the return address to find the indirection
// cell in xarch, but now the JIT is expected to leave the indirection cell in REG_R2R_INDIRECT_PARAM:
// We optimize delegate invocations manually in the JIT so skip this for those.
if (call->IsR2RRelativeIndir() && canFastTailCall && !fastTailCallToLoop && !call->IsDelegateInvoke())
{
info.compCompHnd->updateEntryPointForTailCall(&call->gtEntryPoint);
#ifdef TARGET_XARCH
// We have already computed arg info to make the fast tailcall decision, but on X64 we now
// have to pass the indirection cell, so redo arg info.
call->ResetArgInfo();
#endif
}
// If this block has a flow successor, make suitable updates.
//
BasicBlock* const nextBlock = compCurBB->GetUniqueSucc();
if (nextBlock == nullptr)
{
// No unique successor. compCurBB should be a return.
//
assert(compCurBB->bbJumpKind == BBJ_RETURN);
}
else
{
// Flow no longer reaches nextBlock from here.
//
fgRemoveRefPred(nextBlock, compCurBB);
// Adjust profile weights.
//
// Note if this is a tail call to loop, further updates
// are needed once we install the loop edge.
//
if (compCurBB->hasProfileWeight() && nextBlock->hasProfileWeight())
{
// Since we have linear flow we can update the next block weight.
//
weight_t const blockWeight = compCurBB->bbWeight;
weight_t const nextWeight = nextBlock->bbWeight;
weight_t const newNextWeight = nextWeight - blockWeight;
// If the math would result in a negative weight then there's
// no local repair we can do; just leave things inconsistent.
//
if (newNextWeight >= 0)
{
// Note if we'd already morphed the IR in nextblock we might
// have done something profile sensitive that we should arguably reconsider.
//
JITDUMP("Reducing profile weight of " FMT_BB " from " FMT_WT " to " FMT_WT "\n", nextBlock->bbNum,
nextWeight, newNextWeight);
nextBlock->setBBProfileWeight(newNextWeight);
}
else
{
JITDUMP("Not reducing profile weight of " FMT_BB " as its weight " FMT_WT
" is less than direct flow pred " FMT_BB " weight " FMT_WT "\n",
nextBlock->bbNum, nextWeight, compCurBB->bbNum, blockWeight);
}
// If nextBlock is not a BBJ_RETURN, it should have a unique successor that
// is a BBJ_RETURN, as we allow a little bit of flow after a tail call.
//
if (nextBlock->bbJumpKind != BBJ_RETURN)
{
BasicBlock* retBlock = nextBlock->GetUniqueSucc();
// Check if we have a sequence of GT_ASG blocks where the same variable is assigned
// to temp locals over and over.
// Also allow casts on the RHSs of the assignments, and blocks with GT_NOPs.
//
// { GT_ASG(t_0, GT_CALL(...)) }
// { GT_ASG(t_1, t0) } (with casts on rhs potentially)
// ...
// { GT_ASG(t_n, t_(n - 1)) }
// { GT_RET t_n }
//
if (retBlock->bbJumpKind != BBJ_RETURN)
{
// Make sure the block has a single statement
assert(nextBlock->firstStmt() == nextBlock->lastStmt());
// And the root node is "ASG(LCL_VAR, LCL_VAR)"
GenTree* asgNode = nextBlock->firstStmt()->GetRootNode();
assert(asgNode->OperIs(GT_ASG));
unsigned lcl = asgNode->gtGetOp1()->AsLclVarCommon()->GetLclNum();
while (retBlock->bbJumpKind != BBJ_RETURN)
{
#ifdef DEBUG
Statement* nonEmptyStmt = nullptr;
for (Statement* const stmt : retBlock->Statements())
{
// Ignore NOP statements
if (!stmt->GetRootNode()->OperIs(GT_NOP))
{
// Only a single non-NOP statement is allowed
assert(nonEmptyStmt == nullptr);
nonEmptyStmt = stmt;
}
}
if (nonEmptyStmt != nullptr)
{
asgNode = nonEmptyStmt->GetRootNode();
if (!asgNode->OperIs(GT_NOP))
{
assert(asgNode->OperIs(GT_ASG));
GenTree* rhs = asgNode->gtGetOp2();
while (rhs->OperIs(GT_CAST))
{
assert(!rhs->gtOverflow());
rhs = rhs->gtGetOp1();
}
assert(lcl == rhs->AsLclVarCommon()->GetLclNum());
lcl = asgNode->gtGetOp1()->AsLclVarCommon()->GetLclNum();
}
}
#endif
retBlock = retBlock->GetUniqueSucc();
}
}
assert(retBlock->bbJumpKind == BBJ_RETURN);
if (retBlock->hasProfileWeight())
{
// Do similar updates here.
//
weight_t const nextNextWeight = retBlock->bbWeight;
weight_t const newNextNextWeight = nextNextWeight - blockWeight;
// If the math would result in an negative weight then there's
// no local repair we can do; just leave things inconsistent.
//
if (newNextNextWeight >= 0)
{
JITDUMP("Reducing profile weight of " FMT_BB " from " FMT_WT " to " FMT_WT "\n",
retBlock->bbNum, nextNextWeight, newNextNextWeight);
retBlock->setBBProfileWeight(newNextNextWeight);
}
else
{
JITDUMP("Not reducing profile weight of " FMT_BB " as its weight " FMT_WT
" is less than direct flow pred " FMT_BB " weight " FMT_WT "\n",
retBlock->bbNum, nextNextWeight, compCurBB->bbNum, blockWeight);
}
}
}
}
}
#if !FEATURE_TAILCALL_OPT_SHARED_RETURN
// We enable shared-ret tail call optimization for recursive calls even if
// FEATURE_TAILCALL_OPT_SHARED_RETURN is not defined.
if (gtIsRecursiveCall(call))
#endif
{
// Many tailcalls will have call and ret in the same block, and thus be
// BBJ_RETURN, but if the call falls through to a ret, and we are doing a
// tailcall, change it here.
compCurBB->bbJumpKind = BBJ_RETURN;
}
GenTree* stmtExpr = fgMorphStmt->GetRootNode();
#ifdef DEBUG
// Tail call needs to be in one of the following IR forms
// Either a call stmt or
// GT_RETURN(GT_CALL(..)) or GT_RETURN(GT_CAST(GT_CALL(..)))
// var = GT_CALL(..) or var = (GT_CAST(GT_CALL(..)))
// GT_COMMA(GT_CALL(..), GT_NOP) or GT_COMMA(GT_CAST(GT_CALL(..)), GT_NOP)
// In the above,
// GT_CASTS may be nested.
genTreeOps stmtOper = stmtExpr->gtOper;
if (stmtOper == GT_CALL)
{
assert(stmtExpr == call);
}
else
{
assert(stmtOper == GT_RETURN || stmtOper == GT_ASG || stmtOper == GT_COMMA);
GenTree* treeWithCall;
if (stmtOper == GT_RETURN)
{
treeWithCall = stmtExpr->gtGetOp1();
}
else if (stmtOper == GT_COMMA)
{
// Second operation must be nop.
assert(stmtExpr->gtGetOp2()->IsNothingNode());
treeWithCall = stmtExpr->gtGetOp1();
}
else
{
treeWithCall = stmtExpr->gtGetOp2();
}
// Peel off casts
while (treeWithCall->gtOper == GT_CAST)
{
assert(!treeWithCall->gtOverflow());
treeWithCall = treeWithCall->gtGetOp1();
}
assert(treeWithCall == call);
}
#endif
// Store the call type for later to introduce the correct placeholder.
var_types origCallType = call->TypeGet();
GenTree* result;
if (!canFastTailCall && !tailCallViaJitHelper)
{
// For tailcall via CORINFO_TAILCALL_HELPERS we transform into regular
// calls with (to the JIT) regular control flow so we do not need to do
// much special handling.
result = fgMorphTailCallViaHelpers(call, tailCallHelpers);
}
else
{
// Otherwise we will transform into something that does not return. For
// fast tailcalls a "jump" and for tailcall via JIT helper a call to a
// JIT helper that does not return. So peel off everything after the
// call.
Statement* nextMorphStmt = fgMorphStmt->GetNextStmt();
JITDUMP("Remove all stmts after the call.\n");
while (nextMorphStmt != nullptr)
{
Statement* stmtToRemove = nextMorphStmt;
nextMorphStmt = stmtToRemove->GetNextStmt();
fgRemoveStmt(compCurBB, stmtToRemove);
}
bool isRootReplaced = false;
GenTree* root = fgMorphStmt->GetRootNode();
if (root != call)
{
JITDUMP("Replace root node [%06d] with [%06d] tail call node.\n", dspTreeID(root), dspTreeID(call));
isRootReplaced = true;
fgMorphStmt->SetRootNode(call);
}
// Avoid potential extra work for the return (for example, vzeroupper)
call->gtType = TYP_VOID;
// The runtime requires that we perform a null check on the `this` argument before
// tail calling to a virtual dispatch stub. This requirement is a consequence of limitations
// in the runtime's ability to map an AV to a NullReferenceException if
// the AV occurs in a dispatch stub that has unmanaged caller.
if (call->IsVirtualStub())
{
call->gtFlags |= GTF_CALL_NULLCHECK;
}
// Do some target-specific transformations (before we process the args,
// etc.) for the JIT helper case.
if (tailCallViaJitHelper)
{
fgMorphTailCallViaJitHelper(call);
// Force re-evaluating the argInfo. fgMorphTailCallViaJitHelper will modify the
// argument list, invalidating the argInfo.
call->fgArgInfo = nullptr;
}
// Tail call via JIT helper: The VM can't use return address hijacking
// if we're not going to return and the helper doesn't have enough info
// to safely poll, so we poll before the tail call, if the block isn't
// already safe. Since tail call via helper is a slow mechanism it
// doen't matter whether we emit GC poll. his is done to be in parity
// with Jit64. Also this avoids GC info size increase if all most all
// methods are expected to be tail calls (e.g. F#).
//
// Note that we can avoid emitting GC-poll if we know that the current
// BB is dominated by a Gc-SafePoint block. But we don't have dominator
// info at this point. One option is to just add a place holder node for
// GC-poll (e.g. GT_GCPOLL) here and remove it in lowering if the block
// is dominated by a GC-SafePoint. For now it not clear whether
// optimizing slow tail calls is worth the effort. As a low cost check,
// we check whether the first and current basic blocks are
// GC-SafePoints.
//
// Fast Tail call as epilog+jmp - No need to insert GC-poll. Instead,
// fgSetBlockOrder() is going to mark the method as fully interruptible
// if the block containing this tail call is reachable without executing
// any call.
BasicBlock* curBlock = compCurBB;
if (canFastTailCall || (fgFirstBB->bbFlags & BBF_GC_SAFE_POINT) || (compCurBB->bbFlags & BBF_GC_SAFE_POINT) ||
(fgCreateGCPoll(GCPOLL_INLINE, compCurBB) == curBlock))
{
// We didn't insert a poll block, so we need to morph the call now
// (Normally it will get morphed when we get to the split poll block)
GenTree* temp = fgMorphCall(call);
noway_assert(temp == call);
}
// Fast tail call: in case of fast tail calls, we need a jmp epilog and
// hence mark it as BBJ_RETURN with BBF_JMP flag set.
noway_assert(compCurBB->bbJumpKind == BBJ_RETURN);
if (canFastTailCall)
{
compCurBB->bbFlags |= BBF_HAS_JMP;
}
else
{
// We call CORINFO_HELP_TAILCALL which does not return, so we will
// not need epilogue.
compCurBB->bbJumpKind = BBJ_THROW;
}
if (isRootReplaced)
{
// We have replaced the root node of this stmt and deleted the rest,
// but we still have the deleted, dead nodes on the `fgMorph*` stack
// if the root node was an `ASG`, `RET` or `CAST`.
// Return a zero con node to exit morphing of the old trees without asserts
// and forbid POST_ORDER morphing doing something wrong with our call.
var_types callType;
if (varTypeIsStruct(origCallType))
{
CORINFO_CLASS_HANDLE retClsHnd = call->gtRetClsHnd;
Compiler::structPassingKind howToReturnStruct;
callType = getReturnTypeForStruct(retClsHnd, call->GetUnmanagedCallConv(), &howToReturnStruct);
assert((howToReturnStruct != SPK_Unknown) && (howToReturnStruct != SPK_ByReference));
if (howToReturnStruct == SPK_ByValue)
{
callType = TYP_I_IMPL;
}
else if (howToReturnStruct == SPK_ByValueAsHfa || varTypeIsSIMD(callType))
{
callType = TYP_FLOAT;
}
assert((callType != TYP_UNKNOWN) && !varTypeIsStruct(callType));
}
else
{
callType = origCallType;
}
assert((callType != TYP_UNKNOWN) && !varTypeIsStruct(callType));
callType = genActualType(callType);
GenTree* zero = gtNewZeroConNode(callType);
result = fgMorphTree(zero);
}
else
{
result = call;
}
}
return result;
}
//------------------------------------------------------------------------
// fgMorphTailCallViaHelpers: Transform the given GT_CALL tree for tailcall code
// generation.
//
// Arguments:
// call - The call to transform
// helpers - The tailcall helpers provided by the runtime.
//
// Return Value:
// Returns the transformed node.
//
// Notes:
// This transforms
// GT_CALL
// {callTarget}
// {this}
// {args}
// into
// GT_COMMA
// GT_CALL StoreArgsStub
// {callTarget} (depending on flags provided by the runtime)
// {this} (as a regular arg)
// {args}
// GT_COMMA
// GT_CALL Dispatcher
// GT_ADDR ReturnAddress
// {CallTargetStub}
// GT_ADDR ReturnValue
// GT_LCL ReturnValue
// whenever the call node returns a value. If the call node does not return a
// value the last comma will not be there.
//
GenTree* Compiler::fgMorphTailCallViaHelpers(GenTreeCall* call, CORINFO_TAILCALL_HELPERS& help)
{
// R2R requires different handling but we don't support tailcall via
// helpers in R2R yet, so just leave it for now.
// TODO: R2R: TailCallViaHelper
assert(!opts.IsReadyToRun());
JITDUMP("fgMorphTailCallViaHelpers (before):\n");
DISPTREE(call);
// Don't support tail calling helper methods
assert(call->gtCallType != CT_HELPER);
// We come this route only for tail prefixed calls that cannot be dispatched as
// fast tail calls
assert(!call->IsImplicitTailCall());
// We want to use the following assert, but it can modify the IR in some cases, so we
// can't do that in an assert.
// assert(!fgCanFastTailCall(call, nullptr));
// We might or might not have called fgInitArgInfo before this point: in
// builds with FEATURE_FASTTAILCALL we will have called it when checking if
// we could do a fast tailcall, so it is possible we have added extra IR
// for non-standard args that we must get rid of. Get rid of that IR here
// and do this first as it will 'expose' the retbuf as the first arg, which
// we rely upon in fgCreateCallDispatcherAndGetResult.
call->ResetArgInfo();
GenTree* callDispatcherAndGetResult = fgCreateCallDispatcherAndGetResult(call, help.hCallTarget, help.hDispatcher);
// Change the call to a call to the StoreArgs stub.
if (call->HasRetBufArg())
{
JITDUMP("Removing retbuf");
call->gtCallArgs = call->gtCallArgs->GetNext();
call->gtCallMoreFlags &= ~GTF_CALL_M_RETBUFFARG;
}
const bool stubNeedsTargetFnPtr = (help.flags & CORINFO_TAILCALL_STORE_TARGET) != 0;
GenTree* doBeforeStoreArgsStub = nullptr;
GenTree* thisPtrStubArg = nullptr;
// Put 'this' in normal param list
if (call->gtCallThisArg != nullptr)
{
JITDUMP("Moving this pointer into arg list\n");
GenTree* objp = call->gtCallThisArg->GetNode();
GenTree* thisPtr = nullptr;
call->gtCallThisArg = nullptr;
// JIT will need one or two copies of "this" in the following cases:
// 1) the call needs null check;
// 2) StoreArgs stub needs the target function pointer address and if the call is virtual
// the stub also needs "this" in order to evalute the target.
const bool callNeedsNullCheck = call->NeedsNullCheck();
const bool stubNeedsThisPtr = stubNeedsTargetFnPtr && call->IsVirtual();
// TODO-Review: The following transformation is implemented under assumption that
// both conditions can be true. However, I could not construct such example
// where a virtual tail call would require null check. In case, if the conditions
// are mutually exclusive the following could be simplified.
if (callNeedsNullCheck || stubNeedsThisPtr)
{
// Clone "this" if "this" has no side effects.
if ((objp->gtFlags & GTF_SIDE_EFFECT) == 0)
{
thisPtr = gtClone(objp, true);
}
// Create a temp and spill "this" to the temp if "this" has side effects or "this" was too complex to clone.
if (thisPtr == nullptr)
{
const unsigned lclNum = lvaGrabTemp(true DEBUGARG("tail call thisptr"));
// tmp = "this"
doBeforeStoreArgsStub = gtNewTempAssign(lclNum, objp);
if (callNeedsNullCheck)
{
// COMMA(tmp = "this", deref(tmp))
GenTree* tmp = gtNewLclvNode(lclNum, objp->TypeGet());
GenTree* nullcheck = gtNewNullCheck(tmp, compCurBB);
doBeforeStoreArgsStub = gtNewOperNode(GT_COMMA, TYP_VOID, doBeforeStoreArgsStub, nullcheck);
}
thisPtr = gtNewLclvNode(lclNum, objp->TypeGet());
if (stubNeedsThisPtr)
{
thisPtrStubArg = gtNewLclvNode(lclNum, objp->TypeGet());
}
}
else
{
if (callNeedsNullCheck)
{
// deref("this")
doBeforeStoreArgsStub = gtNewNullCheck(objp, compCurBB);
if (stubNeedsThisPtr)
{
thisPtrStubArg = gtClone(objp, true);
}
}
else
{
assert(stubNeedsThisPtr);
thisPtrStubArg = objp;
}
}
call->gtFlags &= ~GTF_CALL_NULLCHECK;
assert((thisPtrStubArg != nullptr) == stubNeedsThisPtr);
}
else
{
thisPtr = objp;
}
// During rationalization tmp="this" and null check will be materialized
// in the right execution order.
assert(thisPtr != nullptr);
call->gtCallArgs = gtPrependNewCallArg(thisPtr, call->gtCallArgs);
}
// We may need to pass the target, for instance for calli or generic methods
// where we pass instantiating stub.
if (stubNeedsTargetFnPtr)
{
JITDUMP("Adding target since VM requested it\n");
GenTree* target;
if (!call->IsVirtual())
{
if (call->gtCallType == CT_INDIRECT)
{
noway_assert(call->gtCallAddr != nullptr);
target = call->gtCallAddr;
}
else
{
CORINFO_CONST_LOOKUP addrInfo;
info.compCompHnd->getFunctionEntryPoint(call->gtCallMethHnd, &addrInfo);
CORINFO_GENERIC_HANDLE handle = nullptr;
void* pIndirection = nullptr;
assert(addrInfo.accessType != IAT_PPVALUE && addrInfo.accessType != IAT_RELPVALUE);
if (addrInfo.accessType == IAT_VALUE)
{
handle = addrInfo.handle;
}
else if (addrInfo.accessType == IAT_PVALUE)
{
pIndirection = addrInfo.addr;
}
target = gtNewIconEmbHndNode(handle, pIndirection, GTF_ICON_FTN_ADDR, call->gtCallMethHnd);
}
}
else
{
assert(!call->tailCallInfo->GetSig()->hasTypeArg());
CORINFO_CALL_INFO callInfo;
unsigned flags = CORINFO_CALLINFO_LDFTN;
if (call->tailCallInfo->IsCallvirt())
{
flags |= CORINFO_CALLINFO_CALLVIRT;
}
eeGetCallInfo(call->tailCallInfo->GetToken(), nullptr, (CORINFO_CALLINFO_FLAGS)flags, &callInfo);
target = getVirtMethodPointerTree(thisPtrStubArg, call->tailCallInfo->GetToken(), &callInfo);
}
// Insert target as last arg
GenTreeCall::Use** newArgSlot = &call->gtCallArgs;
while (*newArgSlot != nullptr)
{
newArgSlot = &(*newArgSlot)->NextRef();
}
*newArgSlot = gtNewCallArgs(target);
}
// This is now a direct call to the store args stub and not a tailcall.
call->gtCallType = CT_USER_FUNC;
call->gtCallMethHnd = help.hStoreArgs;
call->gtFlags &= ~GTF_CALL_VIRT_KIND_MASK;
call->gtCallMoreFlags &= ~(GTF_CALL_M_TAILCALL | GTF_CALL_M_DELEGATE_INV | GTF_CALL_M_WRAPPER_DELEGATE_INV);
// The store-args stub returns no value.
call->gtRetClsHnd = nullptr;
call->gtType = TYP_VOID;
call->gtReturnType = TYP_VOID;
GenTree* callStoreArgsStub = call;
if (doBeforeStoreArgsStub != nullptr)
{
callStoreArgsStub = gtNewOperNode(GT_COMMA, TYP_VOID, doBeforeStoreArgsStub, callStoreArgsStub);
}
GenTree* finalTree =
gtNewOperNode(GT_COMMA, callDispatcherAndGetResult->TypeGet(), callStoreArgsStub, callDispatcherAndGetResult);
finalTree = fgMorphTree(finalTree);
JITDUMP("fgMorphTailCallViaHelpers (after):\n");
DISPTREE(finalTree);
return finalTree;
}
//------------------------------------------------------------------------
// fgCreateCallDispatcherAndGetResult: Given a call
// CALL
// {callTarget}
// {retbuf}
// {this}
// {args}
// create a similarly typed node that calls the tailcall dispatcher and returns
// the result, as in the following:
// COMMA
// CALL TailCallDispatcher
// ADDR ReturnAddress
// &CallTargetFunc
// ADDR RetValue
// RetValue
// If the call has type TYP_VOID, only create the CALL node.
//
// Arguments:
// origCall - the call
// callTargetStubHnd - the handle of the CallTarget function (this is a special
// IL stub created by the runtime)
// dispatcherHnd - the handle of the tailcall dispatcher function
//
// Return Value:
// A node that can be used in place of the original call.
//
GenTree* Compiler::fgCreateCallDispatcherAndGetResult(GenTreeCall* origCall,
CORINFO_METHOD_HANDLE callTargetStubHnd,
CORINFO_METHOD_HANDLE dispatcherHnd)
{
GenTreeCall* callDispatcherNode =
gtNewCallNode(CT_USER_FUNC, dispatcherHnd, TYP_VOID, nullptr, fgMorphStmt->GetDebugInfo());
// The dispatcher has signature
// void DispatchTailCalls(void* callersRetAddrSlot, void* callTarget, void* retValue)
// Add return value arg.
GenTree* retValArg;
GenTree* retVal = nullptr;
unsigned int newRetLcl = BAD_VAR_NUM;
GenTree* copyToRetBufNode = nullptr;
if (origCall->HasRetBufArg())
{
JITDUMP("Transferring retbuf\n");
GenTree* retBufArg = origCall->gtCallArgs->GetNode();
assert(info.compRetBuffArg != BAD_VAR_NUM);
assert(retBufArg->OperIsLocal());
assert(retBufArg->AsLclVarCommon()->GetLclNum() == info.compRetBuffArg);
// Caller return buffer argument retBufArg can point to GC heap while the dispatcher expects
// the return value argument retValArg to point to the stack.
// We use a temporary stack allocated return buffer to hold the value during the dispatcher call
// and copy the value back to the caller return buffer after that.
unsigned int tmpRetBufNum = lvaGrabTemp(true DEBUGARG("substitute local for return buffer"));
constexpr bool unsafeValueClsCheck = false;
lvaSetStruct(tmpRetBufNum, origCall->gtRetClsHnd, unsafeValueClsCheck);
lvaSetVarAddrExposed(tmpRetBufNum DEBUGARG(AddressExposedReason::DISPATCH_RET_BUF));
var_types tmpRetBufType = lvaGetDesc(tmpRetBufNum)->TypeGet();
retValArg = gtNewOperNode(GT_ADDR, TYP_I_IMPL, gtNewLclvNode(tmpRetBufNum, tmpRetBufType));
var_types callerRetBufType = lvaGetDesc(info.compRetBuffArg)->TypeGet();
GenTree* dstAddr = gtNewLclvNode(info.compRetBuffArg, callerRetBufType);
GenTree* dst = gtNewObjNode(info.compMethodInfo->args.retTypeClass, dstAddr);
GenTree* src = gtNewLclvNode(tmpRetBufNum, tmpRetBufType);
constexpr bool isVolatile = false;
constexpr bool isCopyBlock = true;
copyToRetBufNode = gtNewBlkOpNode(dst, src, isVolatile, isCopyBlock);
if (origCall->gtType != TYP_VOID)
{
retVal = gtClone(retBufArg);
}
}
else if (origCall->gtType != TYP_VOID)
{
JITDUMP("Creating a new temp for the return value\n");
newRetLcl = lvaGrabTemp(false DEBUGARG("Return value for tail call dispatcher"));
if (varTypeIsStruct(origCall->gtType))
{
lvaSetStruct(newRetLcl, origCall->gtRetClsHnd, false);
}
else
{
// Since we pass a reference to the return value to the dispatcher
// we need to use the real return type so we can normalize it on
// load when we return it.
lvaTable[newRetLcl].lvType = (var_types)origCall->gtReturnType;
}
lvaSetVarAddrExposed(newRetLcl DEBUGARG(AddressExposedReason::DISPATCH_RET_BUF));
retValArg =
gtNewOperNode(GT_ADDR, TYP_I_IMPL, gtNewLclvNode(newRetLcl, genActualType(lvaTable[newRetLcl].lvType)));
retVal = gtNewLclvNode(newRetLcl, genActualType(lvaTable[newRetLcl].lvType));
if (varTypeIsStruct(origCall->gtType))
{
retVal = impFixupStructReturnType(retVal, origCall->gtRetClsHnd, origCall->GetUnmanagedCallConv());
}
}
else
{
JITDUMP("No return value so using null pointer as arg\n");
retValArg = gtNewZeroConNode(TYP_I_IMPL);
}
callDispatcherNode->gtCallArgs = gtPrependNewCallArg(retValArg, callDispatcherNode->gtCallArgs);
// Add callTarget
callDispatcherNode->gtCallArgs =
gtPrependNewCallArg(new (this, GT_FTN_ADDR) GenTreeFptrVal(TYP_I_IMPL, callTargetStubHnd),
callDispatcherNode->gtCallArgs);
// Add the caller's return address slot.
if (lvaRetAddrVar == BAD_VAR_NUM)
{
lvaRetAddrVar = lvaGrabTemp(false DEBUGARG("Return address"));
lvaTable[lvaRetAddrVar].lvType = TYP_I_IMPL;
lvaSetVarAddrExposed(lvaRetAddrVar DEBUGARG(AddressExposedReason::DISPATCH_RET_BUF));
}
GenTree* retAddrSlot = gtNewOperNode(GT_ADDR, TYP_I_IMPL, gtNewLclvNode(lvaRetAddrVar, TYP_I_IMPL));
callDispatcherNode->gtCallArgs = gtPrependNewCallArg(retAddrSlot, callDispatcherNode->gtCallArgs);
GenTree* finalTree = callDispatcherNode;
if (copyToRetBufNode != nullptr)
{
finalTree = gtNewOperNode(GT_COMMA, TYP_VOID, callDispatcherNode, copyToRetBufNode);
}
if (origCall->gtType == TYP_VOID)
{
return finalTree;
}
assert(retVal != nullptr);
finalTree = gtNewOperNode(GT_COMMA, origCall->TypeGet(), finalTree, retVal);
// The JIT seems to want to CSE this comma and messes up multi-reg ret
// values in the process. Just avoid CSE'ing this tree entirely in that
// case.
if (origCall->HasMultiRegRetVal())
{
finalTree->gtFlags |= GTF_DONT_CSE;
}
return finalTree;
}
//------------------------------------------------------------------------
// getLookupTree: get a lookup tree
//
// Arguments:
// pResolvedToken - resolved token of the call
// pLookup - the lookup to get the tree for
// handleFlags - flags to set on the result node
// compileTimeHandle - compile-time handle corresponding to the lookup
//
// Return Value:
// A node representing the lookup tree
//
GenTree* Compiler::getLookupTree(CORINFO_RESOLVED_TOKEN* pResolvedToken,
CORINFO_LOOKUP* pLookup,
GenTreeFlags handleFlags,
void* compileTimeHandle)
{
if (!pLookup->lookupKind.needsRuntimeLookup)
{
// No runtime lookup is required.
// Access is direct or memory-indirect (of a fixed address) reference
CORINFO_GENERIC_HANDLE handle = nullptr;
void* pIndirection = nullptr;
assert(pLookup->constLookup.accessType != IAT_PPVALUE && pLookup->constLookup.accessType != IAT_RELPVALUE);
if (pLookup->constLookup.accessType == IAT_VALUE)
{
handle = pLookup->constLookup.handle;
}
else if (pLookup->constLookup.accessType == IAT_PVALUE)
{
pIndirection = pLookup->constLookup.addr;
}
return gtNewIconEmbHndNode(handle, pIndirection, handleFlags, compileTimeHandle);
}
return getRuntimeLookupTree(pResolvedToken, pLookup, compileTimeHandle);
}
//------------------------------------------------------------------------
// getRuntimeLookupTree: get a tree for a runtime lookup
//
// Arguments:
// pResolvedToken - resolved token of the call
// pLookup - the lookup to get the tree for
// compileTimeHandle - compile-time handle corresponding to the lookup
//
// Return Value:
// A node representing the runtime lookup tree
//
GenTree* Compiler::getRuntimeLookupTree(CORINFO_RESOLVED_TOKEN* pResolvedToken,
CORINFO_LOOKUP* pLookup,
void* compileTimeHandle)
{
assert(!compIsForInlining());
CORINFO_RUNTIME_LOOKUP* pRuntimeLookup = &pLookup->runtimeLookup;
// If pRuntimeLookup->indirections is equal to CORINFO_USEHELPER, it specifies that a run-time helper should be
// used; otherwise, it specifies the number of indirections via pRuntimeLookup->offsets array.
if ((pRuntimeLookup->indirections == CORINFO_USEHELPER) || pRuntimeLookup->testForNull ||
pRuntimeLookup->testForFixup)
{
// If the first condition is true, runtime lookup tree is available only via the run-time helper function.
// TODO-CQ If the second or third condition is true, we are always using the slow path since we can't
// introduce control flow at this point. See impRuntimeLookupToTree for the logic to avoid calling the helper.
// The long-term solution is to introduce a new node representing a runtime lookup, create instances
// of that node both in the importer and here, and expand the node in lower (introducing control flow if
// necessary).
return gtNewRuntimeLookupHelperCallNode(pRuntimeLookup,
getRuntimeContextTree(pLookup->lookupKind.runtimeLookupKind),
compileTimeHandle);
}
GenTree* result = getRuntimeContextTree(pLookup->lookupKind.runtimeLookupKind);
ArrayStack<GenTree*> stmts(getAllocator(CMK_ArrayStack));
auto cloneTree = [&](GenTree** tree DEBUGARG(const char* reason)) -> GenTree* {
if (!((*tree)->gtFlags & GTF_GLOB_EFFECT))
{
GenTree* clone = gtClone(*tree, true);
if (clone)
{
return clone;
}
}
unsigned temp = lvaGrabTemp(true DEBUGARG(reason));
stmts.Push(gtNewTempAssign(temp, *tree));
*tree = gtNewLclvNode(temp, lvaGetActualType(temp));
return gtNewLclvNode(temp, lvaGetActualType(temp));
};
// Apply repeated indirections
for (WORD i = 0; i < pRuntimeLookup->indirections; i++)
{
GenTree* preInd = nullptr;
if ((i == 1 && pRuntimeLookup->indirectFirstOffset) || (i == 2 && pRuntimeLookup->indirectSecondOffset))
{
preInd = cloneTree(&result DEBUGARG("getRuntimeLookupTree indirectOffset"));
}
if (i != 0)
{
result = gtNewOperNode(GT_IND, TYP_I_IMPL, result);
result->gtFlags |= GTF_IND_NONFAULTING;
result->gtFlags |= GTF_IND_INVARIANT;
}
if ((i == 1 && pRuntimeLookup->indirectFirstOffset) || (i == 2 && pRuntimeLookup->indirectSecondOffset))
{
result = gtNewOperNode(GT_ADD, TYP_I_IMPL, preInd, result);
}
if (pRuntimeLookup->offsets[i] != 0)
{
result = gtNewOperNode(GT_ADD, TYP_I_IMPL, result, gtNewIconNode(pRuntimeLookup->offsets[i], TYP_I_IMPL));
}
}
assert(!pRuntimeLookup->testForNull);
if (pRuntimeLookup->indirections > 0)
{
assert(!pRuntimeLookup->testForFixup);
result = gtNewOperNode(GT_IND, TYP_I_IMPL, result);
result->gtFlags |= GTF_IND_NONFAULTING;
}
// Produces GT_COMMA(stmt1, GT_COMMA(stmt2, ... GT_COMMA(stmtN, result)))
while (!stmts.Empty())
{
result = gtNewOperNode(GT_COMMA, TYP_I_IMPL, stmts.Pop(), result);
}
DISPTREE(result);
return result;
}
//------------------------------------------------------------------------
// getVirtMethodPointerTree: get a tree for a virtual method pointer
//
// Arguments:
// thisPtr - tree representing `this` pointer
// pResolvedToken - pointer to the resolved token of the method
// pCallInfo - pointer to call info
//
// Return Value:
// A node representing the virtual method pointer
GenTree* Compiler::getVirtMethodPointerTree(GenTree* thisPtr,
CORINFO_RESOLVED_TOKEN* pResolvedToken,
CORINFO_CALL_INFO* pCallInfo)
{
GenTree* exactTypeDesc = getTokenHandleTree(pResolvedToken, true);
GenTree* exactMethodDesc = getTokenHandleTree(pResolvedToken, false);
GenTreeCall::Use* helpArgs = gtNewCallArgs(thisPtr, exactTypeDesc, exactMethodDesc);
return gtNewHelperCallNode(CORINFO_HELP_VIRTUAL_FUNC_PTR, TYP_I_IMPL, helpArgs);
}
//------------------------------------------------------------------------
// getTokenHandleTree: get a handle tree for a token
//
// Arguments:
// pResolvedToken - token to get a handle for
// parent - whether parent should be imported
//
// Return Value:
// A node representing the virtual method pointer
GenTree* Compiler::getTokenHandleTree(CORINFO_RESOLVED_TOKEN* pResolvedToken, bool parent)
{
CORINFO_GENERICHANDLE_RESULT embedInfo;
info.compCompHnd->embedGenericHandle(pResolvedToken, parent, &embedInfo);
GenTree* result = getLookupTree(pResolvedToken, &embedInfo.lookup, gtTokenToIconFlags(pResolvedToken->token),
embedInfo.compileTimeHandle);
// If we have a result and it requires runtime lookup, wrap it in a runtime lookup node.
if ((result != nullptr) && embedInfo.lookup.lookupKind.needsRuntimeLookup)
{
result = gtNewRuntimeLookup(embedInfo.compileTimeHandle, embedInfo.handleType, result);
}
return result;
}
/*****************************************************************************
*
* Transform the given GT_CALL tree for tail call via JIT helper.
*/
void Compiler::fgMorphTailCallViaJitHelper(GenTreeCall* call)
{
JITDUMP("fgMorphTailCallViaJitHelper (before):\n");
DISPTREE(call);
// For the helper-assisted tail calls, we need to push all the arguments
// into a single list, and then add a few extra at the beginning or end.
//
// For x86, the tailcall helper is defined as:
//
// JIT_TailCall(<function args>, int numberOfOldStackArgsWords, int numberOfNewStackArgsWords, int flags, void*
// callTarget)
//
// Note that the special arguments are on the stack, whereas the function arguments follow
// the normal convention: there might be register arguments in ECX and EDX. The stack will
// look like (highest address at the top):
// first normal stack argument
// ...
// last normal stack argument
// numberOfOldStackArgs
// numberOfNewStackArgs
// flags
// callTarget
//
// Each special arg is 4 bytes.
//
// 'flags' is a bitmask where:
// 1 == restore callee-save registers (EDI,ESI,EBX). The JIT always saves all
// callee-saved registers for tailcall functions. Note that the helper assumes
// that the callee-saved registers live immediately below EBP, and must have been
// pushed in this order: EDI, ESI, EBX.
// 2 == call target is a virtual stub dispatch.
//
// The x86 tail call helper lives in VM\i386\jithelp.asm. See that function for more details
// on the custom calling convention.
// Check for PInvoke call types that we don't handle in codegen yet.
assert(!call->IsUnmanaged());
assert(call->IsVirtual() || (call->gtCallType != CT_INDIRECT) || (call->gtCallCookie == nullptr));
// Don't support tail calling helper methods
assert(call->gtCallType != CT_HELPER);
// We come this route only for tail prefixed calls that cannot be dispatched as
// fast tail calls
assert(!call->IsImplicitTailCall());
// We want to use the following assert, but it can modify the IR in some cases, so we
// can't do that in an assert.
// assert(!fgCanFastTailCall(call, nullptr));
// First move the 'this' pointer (if any) onto the regular arg list. We do this because
// we are going to prepend special arguments onto the argument list (for non-x86 platforms),
// and thus shift where the 'this' pointer will be passed to a later argument slot. In
// addition, for all platforms, we are going to change the call into a helper call. Our code
// generation code for handling calls to helpers does not handle 'this' pointers. So, when we
// do this transformation, we must explicitly create a null 'this' pointer check, if required,
// since special 'this' pointer handling will no longer kick in.
//
// Some call types, such as virtual vtable calls, require creating a call address expression
// that involves the "this" pointer. Lowering will sometimes create an embedded statement
// to create a temporary that is assigned to the "this" pointer expression, and then use
// that temp to create the call address expression. This temp creation embedded statement
// will occur immediately before the "this" pointer argument, and then will be used for both
// the "this" pointer argument as well as the call address expression. In the normal ordering,
// the embedded statement establishing the "this" pointer temp will execute before both uses
// of the temp. However, for tail calls via a helper, we move the "this" pointer onto the
// normal call argument list, and insert a placeholder which will hold the call address
// expression. For non-x86, things are ok, because the order of execution of these is not
// altered. However, for x86, the call address expression is inserted as the *last* argument
// in the argument list, *after* the "this" pointer. It will be put on the stack, and be
// evaluated first. To ensure we don't end up with out-of-order temp definition and use,
// for those cases where call lowering creates an embedded form temp of "this", we will
// create a temp here, early, that will later get morphed correctly.
if (call->gtCallThisArg != nullptr)
{
GenTree* thisPtr = nullptr;
GenTree* objp = call->gtCallThisArg->GetNode();
call->gtCallThisArg = nullptr;
if ((call->IsDelegateInvoke() || call->IsVirtualVtable()) && !objp->OperIs(GT_LCL_VAR))
{
// tmp = "this"
unsigned lclNum = lvaGrabTemp(true DEBUGARG("tail call thisptr"));
GenTree* asg = gtNewTempAssign(lclNum, objp);
// COMMA(tmp = "this", tmp)
var_types vt = objp->TypeGet();
GenTree* tmp = gtNewLclvNode(lclNum, vt);
thisPtr = gtNewOperNode(GT_COMMA, vt, asg, tmp);
objp = thisPtr;
}
if (call->NeedsNullCheck())
{
// clone "this" if "this" has no side effects.
if ((thisPtr == nullptr) && !(objp->gtFlags & GTF_SIDE_EFFECT))
{
thisPtr = gtClone(objp, true);
}
var_types vt = objp->TypeGet();
if (thisPtr == nullptr)
{
// create a temp if either "this" has side effects or "this" is too complex to clone.
// tmp = "this"
unsigned lclNum = lvaGrabTemp(true DEBUGARG("tail call thisptr"));
GenTree* asg = gtNewTempAssign(lclNum, objp);
// COMMA(tmp = "this", deref(tmp))
GenTree* tmp = gtNewLclvNode(lclNum, vt);
GenTree* nullcheck = gtNewNullCheck(tmp, compCurBB);
asg = gtNewOperNode(GT_COMMA, TYP_VOID, asg, nullcheck);
// COMMA(COMMA(tmp = "this", deref(tmp)), tmp)
thisPtr = gtNewOperNode(GT_COMMA, vt, asg, gtNewLclvNode(lclNum, vt));
}
else
{
// thisPtr = COMMA(deref("this"), "this")
GenTree* nullcheck = gtNewNullCheck(thisPtr, compCurBB);
thisPtr = gtNewOperNode(GT_COMMA, vt, nullcheck, gtClone(objp, true));
}
call->gtFlags &= ~GTF_CALL_NULLCHECK;
}
else
{
thisPtr = objp;
}
// TODO-Cleanup: we leave it as a virtual stub call to
// use logic in `LowerVirtualStubCall`, clear GTF_CALL_VIRT_KIND_MASK here
// and change `LowerCall` to recognize it as a direct call.
// During rationalization tmp="this" and null check will
// materialize as embedded stmts in right execution order.
assert(thisPtr != nullptr);
call->gtCallArgs = gtPrependNewCallArg(thisPtr, call->gtCallArgs);
}
// Find the end of the argument list. ppArg will point at the last pointer; setting *ppArg will
// append to the list.
GenTreeCall::Use** ppArg = &call->gtCallArgs;
for (GenTreeCall::Use& use : call->Args())
{
ppArg = &use.NextRef();
}
assert(ppArg != nullptr);
assert(*ppArg == nullptr);
unsigned nOldStkArgsWords =
(compArgSize - (codeGen->intRegState.rsCalleeRegArgCount * REGSIZE_BYTES)) / REGSIZE_BYTES;
GenTree* arg3 = gtNewIconNode((ssize_t)nOldStkArgsWords, TYP_I_IMPL);
*ppArg = gtNewCallArgs(arg3); // numberOfOldStackArgs
ppArg = &((*ppArg)->NextRef());
// Inject a placeholder for the count of outgoing stack arguments that the Lowering phase will generate.
// The constant will be replaced.
GenTree* arg2 = gtNewIconNode(9, TYP_I_IMPL);
*ppArg = gtNewCallArgs(arg2); // numberOfNewStackArgs
ppArg = &((*ppArg)->NextRef());
// Inject a placeholder for the flags.
// The constant will be replaced.
GenTree* arg1 = gtNewIconNode(8, TYP_I_IMPL);
*ppArg = gtNewCallArgs(arg1);
ppArg = &((*ppArg)->NextRef());
// Inject a placeholder for the real call target that the Lowering phase will generate.
// The constant will be replaced.
GenTree* arg0 = gtNewIconNode(7, TYP_I_IMPL);
*ppArg = gtNewCallArgs(arg0);
// It is now a varargs tail call.
call->gtCallMoreFlags |= GTF_CALL_M_VARARGS;
call->gtFlags &= ~GTF_CALL_POP_ARGS;
// The function is responsible for doing explicit null check when it is necessary.
assert(!call->NeedsNullCheck());
JITDUMP("fgMorphTailCallViaJitHelper (after):\n");
DISPTREE(call);
}
//------------------------------------------------------------------------
// fgGetStubAddrArg: Return the virtual stub address for the given call.
//
// Notes:
// the JIT must place the address of the stub used to load the call target,
// the "stub indirection cell", in special call argument with special register.
//
// Arguments:
// call - a call that needs virtual stub dispatching.
//
// Return Value:
// addr tree with set resister requirements.
//
GenTree* Compiler::fgGetStubAddrArg(GenTreeCall* call)
{
assert(call->IsVirtualStub());
GenTree* stubAddrArg;
if (call->gtCallType == CT_INDIRECT)
{
stubAddrArg = gtClone(call->gtCallAddr, true);
}
else
{
assert(call->gtCallMoreFlags & GTF_CALL_M_VIRTSTUB_REL_INDIRECT);
ssize_t addr = ssize_t(call->gtStubCallStubAddr);
stubAddrArg = gtNewIconHandleNode(addr, GTF_ICON_FTN_ADDR);
#ifdef DEBUG
stubAddrArg->AsIntCon()->gtTargetHandle = (size_t)call->gtCallMethHnd;
#endif
}
assert(stubAddrArg != nullptr);
stubAddrArg->SetRegNum(virtualStubParamInfo->GetReg());
return stubAddrArg;
}
//------------------------------------------------------------------------------
// fgGetArgTabEntryParameterLclNum : Get the lcl num for the parameter that
// corresponds to the argument to a recursive call.
//
// Notes:
// Due to non-standard args this is not just fgArgTabEntry::argNum.
// For example, in R2R compilations we will have added a non-standard
// arg for the R2R indirection cell.
//
// Arguments:
// argTabEntry - the arg
//
unsigned Compiler::fgGetArgTabEntryParameterLclNum(GenTreeCall* call, fgArgTabEntry* argTabEntry)
{
fgArgInfo* argInfo = call->fgArgInfo;
unsigned argCount = argInfo->ArgCount();
fgArgTabEntry** argTable = argInfo->ArgTable();
unsigned numToRemove = 0;
for (unsigned i = 0; i < argCount; i++)
{
fgArgTabEntry* arg = argTable[i];
// Late added args add extra args that do not map to IL parameters and that we should not reassign.
if (!arg->isNonStandard() || !arg->isNonStandardArgAddedLate())
continue;
if (arg->argNum < argTabEntry->argNum)
numToRemove++;
}
return argTabEntry->argNum - numToRemove;
}
//------------------------------------------------------------------------------
// fgMorphRecursiveFastTailCallIntoLoop : Transform a recursive fast tail call into a loop.
//
//
// Arguments:
// block - basic block ending with a recursive fast tail call
// recursiveTailCall - recursive tail call to transform
//
// Notes:
// The legality of the transformation is ensured by the checks in endsWithTailCallConvertibleToLoop.
void Compiler::fgMorphRecursiveFastTailCallIntoLoop(BasicBlock* block, GenTreeCall* recursiveTailCall)
{
assert(recursiveTailCall->IsTailCallConvertibleToLoop());
Statement* lastStmt = block->lastStmt();
assert(recursiveTailCall == lastStmt->GetRootNode());
// Transform recursive tail call into a loop.
Statement* earlyArgInsertionPoint = lastStmt;
const DebugInfo& callDI = lastStmt->GetDebugInfo();
// Hoist arg setup statement for the 'this' argument.
GenTreeCall::Use* thisArg = recursiveTailCall->gtCallThisArg;
if ((thisArg != nullptr) && !thisArg->GetNode()->IsNothingNode() && !thisArg->GetNode()->IsArgPlaceHolderNode())
{
Statement* thisArgStmt = gtNewStmt(thisArg->GetNode(), callDI);
fgInsertStmtBefore(block, earlyArgInsertionPoint, thisArgStmt);
}
// All arguments whose trees may involve caller parameter local variables need to be assigned to temps first;
// then the temps need to be assigned to the method parameters. This is done so that the caller
// parameters are not re-assigned before call arguments depending on them are evaluated.
// tmpAssignmentInsertionPoint and paramAssignmentInsertionPoint keep track of
// where the next temp or parameter assignment should be inserted.
// In the example below the first call argument (arg1 - 1) needs to be assigned to a temp first
// while the second call argument (const 1) doesn't.
// Basic block before tail recursion elimination:
// ***** BB04, stmt 1 (top level)
// [000037] ------------ * stmtExpr void (top level) (IL 0x00A...0x013)
// [000033] --C - G------ - \--* call void RecursiveMethod
// [000030] ------------ | / --* const int - 1
// [000031] ------------arg0 in rcx + --* +int
// [000029] ------------ | \--* lclVar int V00 arg1
// [000032] ------------arg1 in rdx \--* const int 1
//
//
// Basic block after tail recursion elimination :
// ***** BB04, stmt 1 (top level)
// [000051] ------------ * stmtExpr void (top level) (IL 0x00A... ? ? ? )
// [000030] ------------ | / --* const int - 1
// [000031] ------------ | / --* +int
// [000029] ------------ | | \--* lclVar int V00 arg1
// [000050] - A---------- \--* = int
// [000049] D------N---- \--* lclVar int V02 tmp0
//
// ***** BB04, stmt 2 (top level)
// [000055] ------------ * stmtExpr void (top level) (IL 0x00A... ? ? ? )
// [000052] ------------ | / --* lclVar int V02 tmp0
// [000054] - A---------- \--* = int
// [000053] D------N---- \--* lclVar int V00 arg0
// ***** BB04, stmt 3 (top level)
// [000058] ------------ * stmtExpr void (top level) (IL 0x00A... ? ? ? )
// [000032] ------------ | / --* const int 1
// [000057] - A---------- \--* = int
// [000056] D------N---- \--* lclVar int V01 arg1
Statement* tmpAssignmentInsertionPoint = lastStmt;
Statement* paramAssignmentInsertionPoint = lastStmt;
// Process early args. They may contain both setup statements for late args and actual args.
// Early args don't include 'this' arg. We need to account for that so that the call to gtArgEntryByArgNum
// below has the correct second argument.
int earlyArgIndex = (thisArg == nullptr) ? 0 : 1;
for (GenTreeCall::Use& use : recursiveTailCall->Args())
{
GenTree* earlyArg = use.GetNode();
if (!earlyArg->IsNothingNode() && !earlyArg->IsArgPlaceHolderNode())
{
if ((earlyArg->gtFlags & GTF_LATE_ARG) != 0)
{
// This is a setup node so we need to hoist it.
Statement* earlyArgStmt = gtNewStmt(earlyArg, callDI);
fgInsertStmtBefore(block, earlyArgInsertionPoint, earlyArgStmt);
}
else
{
// This is an actual argument that needs to be assigned to the corresponding caller parameter.
fgArgTabEntry* curArgTabEntry = gtArgEntryByArgNum(recursiveTailCall, earlyArgIndex);
// Late-added non-standard args are extra args that are not passed as locals, so skip those
if (!curArgTabEntry->isNonStandard() || !curArgTabEntry->isNonStandardArgAddedLate())
{
Statement* paramAssignStmt =
fgAssignRecursiveCallArgToCallerParam(earlyArg, curArgTabEntry,
fgGetArgTabEntryParameterLclNum(recursiveTailCall,
curArgTabEntry),
block, callDI, tmpAssignmentInsertionPoint,
paramAssignmentInsertionPoint);
if ((tmpAssignmentInsertionPoint == lastStmt) && (paramAssignStmt != nullptr))
{
// All temp assignments will happen before the first param assignment.
tmpAssignmentInsertionPoint = paramAssignStmt;
}
}
}
}
earlyArgIndex++;
}
// Process late args.
int lateArgIndex = 0;
for (GenTreeCall::Use& use : recursiveTailCall->LateArgs())
{
// A late argument is an actual argument that needs to be assigned to the corresponding caller's parameter.
GenTree* lateArg = use.GetNode();
fgArgTabEntry* curArgTabEntry = gtArgEntryByLateArgIndex(recursiveTailCall, lateArgIndex);
// Late-added non-standard args are extra args that are not passed as locals, so skip those
if (!curArgTabEntry->isNonStandard() || !curArgTabEntry->isNonStandardArgAddedLate())
{
Statement* paramAssignStmt =
fgAssignRecursiveCallArgToCallerParam(lateArg, curArgTabEntry,
fgGetArgTabEntryParameterLclNum(recursiveTailCall,
curArgTabEntry),
block, callDI, tmpAssignmentInsertionPoint,
paramAssignmentInsertionPoint);
if ((tmpAssignmentInsertionPoint == lastStmt) && (paramAssignStmt != nullptr))
{
// All temp assignments will happen before the first param assignment.
tmpAssignmentInsertionPoint = paramAssignStmt;
}
}
lateArgIndex++;
}
// If the method has starg.s 0 or ldarga.s 0 a special local (lvaArg0Var) is created so that
// compThisArg stays immutable. Normally it's assigned in fgFirstBBScratch block. Since that
// block won't be in the loop (it's assumed to have no predecessors), we need to update the special local here.
if (!info.compIsStatic && (lvaArg0Var != info.compThisArg))
{
var_types thisType = lvaTable[info.compThisArg].TypeGet();
GenTree* arg0 = gtNewLclvNode(lvaArg0Var, thisType);
GenTree* arg0Assignment = gtNewAssignNode(arg0, gtNewLclvNode(info.compThisArg, thisType));
Statement* arg0AssignmentStmt = gtNewStmt(arg0Assignment, callDI);
fgInsertStmtBefore(block, paramAssignmentInsertionPoint, arg0AssignmentStmt);
}
// If compInitMem is set, we may need to zero-initialize some locals. Normally it's done in the prolog
// but this loop can't include the prolog. Since we don't have liveness information, we insert zero-initialization
// for all non-parameter IL locals as well as temp structs with GC fields.
// Liveness phase will remove unnecessary initializations.
if (info.compInitMem || compSuppressedZeroInit)
{
unsigned varNum;
LclVarDsc* varDsc;
for (varNum = 0, varDsc = lvaTable; varNum < lvaCount; varNum++, varDsc++)
{
#if FEATURE_FIXED_OUT_ARGS
if (varNum == lvaOutgoingArgSpaceVar)
{
continue;
}
#endif // FEATURE_FIXED_OUT_ARGS
if (!varDsc->lvIsParam)
{
var_types lclType = varDsc->TypeGet();
bool isUserLocal = (varNum < info.compLocalsCount);
bool structWithGCFields = ((lclType == TYP_STRUCT) && varDsc->GetLayout()->HasGCPtr());
bool hadSuppressedInit = varDsc->lvSuppressedZeroInit;
if ((info.compInitMem && (isUserLocal || structWithGCFields)) || hadSuppressedInit)
{
GenTree* lcl = gtNewLclvNode(varNum, lclType);
GenTree* init = nullptr;
if (varTypeIsStruct(lclType))
{
const bool isVolatile = false;
const bool isCopyBlock = false;
init = gtNewBlkOpNode(lcl, gtNewIconNode(0), isVolatile, isCopyBlock);
init = fgMorphInitBlock(init);
}
else
{
GenTree* zero = gtNewZeroConNode(genActualType(lclType));
init = gtNewAssignNode(lcl, zero);
}
Statement* initStmt = gtNewStmt(init, callDI);
fgInsertStmtBefore(block, lastStmt, initStmt);
}
}
}
}
// Remove the call
fgRemoveStmt(block, lastStmt);
// Set the loop edge.
if (opts.IsOSR())
{
// Todo: this may not look like a viable loop header.
// Might need the moral equivalent of a scratch BB.
block->bbJumpDest = fgEntryBB;
}
else
{
// Ensure we have a scratch block and then target the next
// block. Loop detection needs to see a pred out of the loop,
// so mark the scratch block BBF_DONT_REMOVE to prevent empty
// block removal on it.
fgEnsureFirstBBisScratch();
fgFirstBB->bbFlags |= BBF_DONT_REMOVE;
block->bbJumpDest = fgFirstBB->bbNext;
}
// Finish hooking things up.
block->bbJumpKind = BBJ_ALWAYS;
fgAddRefPred(block->bbJumpDest, block);
block->bbFlags &= ~BBF_HAS_JMP;
}
//------------------------------------------------------------------------------
// fgAssignRecursiveCallArgToCallerParam : Assign argument to a recursive call to the corresponding caller parameter.
//
//
// Arguments:
// arg - argument to assign
// argTabEntry - argument table entry corresponding to arg
// lclParamNum - the lcl num of the parameter
// block --- basic block the call is in
// callILOffset - IL offset of the call
// tmpAssignmentInsertionPoint - tree before which temp assignment should be inserted (if necessary)
// paramAssignmentInsertionPoint - tree before which parameter assignment should be inserted
//
// Return Value:
// parameter assignment statement if one was inserted; nullptr otherwise.
Statement* Compiler::fgAssignRecursiveCallArgToCallerParam(GenTree* arg,
fgArgTabEntry* argTabEntry,
unsigned lclParamNum,
BasicBlock* block,
const DebugInfo& callDI,
Statement* tmpAssignmentInsertionPoint,
Statement* paramAssignmentInsertionPoint)
{
// Call arguments should be assigned to temps first and then the temps should be assigned to parameters because
// some argument trees may reference parameters directly.
GenTree* argInTemp = nullptr;
bool needToAssignParameter = true;
// TODO-CQ: enable calls with struct arguments passed in registers.
noway_assert(!varTypeIsStruct(arg->TypeGet()));
if ((argTabEntry->isTmp) || arg->IsCnsIntOrI() || arg->IsCnsFltOrDbl())
{
// The argument is already assigned to a temp or is a const.
argInTemp = arg;
}
else if (arg->OperGet() == GT_LCL_VAR)
{
unsigned lclNum = arg->AsLclVar()->GetLclNum();
LclVarDsc* varDsc = lvaGetDesc(lclNum);
if (!varDsc->lvIsParam)
{
// The argument is a non-parameter local so it doesn't need to be assigned to a temp.
argInTemp = arg;
}
else if (lclNum == lclParamNum)
{
// The argument is the same parameter local that we were about to assign so
// we can skip the assignment.
needToAssignParameter = false;
}
}
// TODO: We don't need temp assignments if we can prove that the argument tree doesn't involve
// any caller parameters. Some common cases are handled above but we may be able to eliminate
// more temp assignments.
Statement* paramAssignStmt = nullptr;
if (needToAssignParameter)
{
if (argInTemp == nullptr)
{
// The argument is not assigned to a temp. We need to create a new temp and insert an assignment.
// TODO: we can avoid a temp assignment if we can prove that the argument tree
// doesn't involve any caller parameters.
unsigned tmpNum = lvaGrabTemp(true DEBUGARG("arg temp"));
lvaTable[tmpNum].lvType = arg->gtType;
GenTree* tempSrc = arg;
GenTree* tempDest = gtNewLclvNode(tmpNum, tempSrc->gtType);
GenTree* tmpAssignNode = gtNewAssignNode(tempDest, tempSrc);
Statement* tmpAssignStmt = gtNewStmt(tmpAssignNode, callDI);
fgInsertStmtBefore(block, tmpAssignmentInsertionPoint, tmpAssignStmt);
argInTemp = gtNewLclvNode(tmpNum, tempSrc->gtType);
}
// Now assign the temp to the parameter.
const LclVarDsc* paramDsc = lvaGetDesc(lclParamNum);
assert(paramDsc->lvIsParam);
GenTree* paramDest = gtNewLclvNode(lclParamNum, paramDsc->lvType);
GenTree* paramAssignNode = gtNewAssignNode(paramDest, argInTemp);
paramAssignStmt = gtNewStmt(paramAssignNode, callDI);
fgInsertStmtBefore(block, paramAssignmentInsertionPoint, paramAssignStmt);
}
return paramAssignStmt;
}
/*****************************************************************************
*
* Transform the given GT_CALL tree for code generation.
*/
GenTree* Compiler::fgMorphCall(GenTreeCall* call)
{
if (call->CanTailCall())
{
GenTree* newNode = fgMorphPotentialTailCall(call);
if (newNode != nullptr)
{
return newNode;
}
assert(!call->CanTailCall());
#if FEATURE_MULTIREG_RET
if (fgGlobalMorph && call->HasMultiRegRetVal() && varTypeIsStruct(call->TypeGet()))
{
// The tail call has been rejected so we must finish the work deferred
// by impFixupCallStructReturn for multi-reg-returning calls and transform
// ret call
// into
// temp = call
// ret temp
// Force re-evaluating the argInfo as the return argument has changed.
call->ResetArgInfo();
// Create a new temp.
unsigned tmpNum =
lvaGrabTemp(false DEBUGARG("Return value temp for multi-reg return (rejected tail call)."));
lvaTable[tmpNum].lvIsMultiRegRet = true;
CORINFO_CLASS_HANDLE structHandle = call->gtRetClsHnd;
assert(structHandle != NO_CLASS_HANDLE);
const bool unsafeValueClsCheck = false;
lvaSetStruct(tmpNum, structHandle, unsafeValueClsCheck);
var_types structType = lvaTable[tmpNum].lvType;
GenTree* dst = gtNewLclvNode(tmpNum, structType);
GenTree* assg = gtNewAssignNode(dst, call);
assg = fgMorphTree(assg);
// Create the assignment statement and insert it before the current statement.
Statement* assgStmt = gtNewStmt(assg, compCurStmt->GetDebugInfo());
fgInsertStmtBefore(compCurBB, compCurStmt, assgStmt);
// Return the temp.
GenTree* result = gtNewLclvNode(tmpNum, lvaTable[tmpNum].lvType);
result->gtFlags |= GTF_DONT_CSE;
compCurBB->bbFlags |= BBF_HAS_CALL; // This block has a call
#ifdef DEBUG
if (verbose)
{
printf("\nInserting assignment of a multi-reg call result to a temp:\n");
gtDispStmt(assgStmt);
}
result->gtDebugFlags |= GTF_DEBUG_NODE_MORPHED;
#endif // DEBUG
return result;
}
#endif
}
if ((call->gtCallMoreFlags & GTF_CALL_M_SPECIAL_INTRINSIC) == 0 &&
(call->gtCallMethHnd == eeFindHelper(CORINFO_HELP_VIRTUAL_FUNC_PTR)
#ifdef FEATURE_READYTORUN
|| call->gtCallMethHnd == eeFindHelper(CORINFO_HELP_READYTORUN_VIRTUAL_FUNC_PTR)
#endif
) &&
(call == fgMorphStmt->GetRootNode()))
{
// This is call to CORINFO_HELP_VIRTUAL_FUNC_PTR with ignored result.
// Transform it into a null check.
GenTree* thisPtr = call->gtCallArgs->GetNode();
GenTree* nullCheck = gtNewNullCheck(thisPtr, compCurBB);
return fgMorphTree(nullCheck);
}
noway_assert(call->gtOper == GT_CALL);
//
// Only count calls once (only in the global morph phase)
//
if (fgGlobalMorph)
{
if (call->gtCallType == CT_INDIRECT)
{
optCallCount++;
optIndirectCallCount++;
}
else if (call->gtCallType == CT_USER_FUNC)
{
optCallCount++;
if (call->IsVirtual())
{
optIndirectCallCount++;
}
}
}
// Couldn't inline - remember that this BB contains method calls
// Mark the block as a GC safe point for the call if possible.
// In the event the call indicates the block isn't a GC safe point
// and the call is unmanaged with a GC transition suppression request
// then insert a GC poll.
CLANG_FORMAT_COMMENT_ANCHOR;
if (IsGcSafePoint(call))
{
compCurBB->bbFlags |= BBF_GC_SAFE_POINT;
}
// Regardless of the state of the basic block with respect to GC safe point,
// we will always insert a GC Poll for scenarios involving a suppressed GC
// transition. Only mark the block for GC Poll insertion on the first morph.
if (fgGlobalMorph && call->IsUnmanaged() && call->IsSuppressGCTransition())
{
compCurBB->bbFlags |= (BBF_HAS_SUPPRESSGC_CALL | BBF_GC_SAFE_POINT);
optMethodFlags |= OMF_NEEDS_GCPOLLS;
}
// Morph Type.op_Equality, Type.op_Inequality, and Enum.HasFlag
//
// We need to do these before the arguments are morphed
if ((call->gtCallMoreFlags & GTF_CALL_M_SPECIAL_INTRINSIC))
{
// See if this is foldable
GenTree* optTree = gtFoldExprCall(call);
// If we optimized, morph the result
if (optTree != call)
{
return fgMorphTree(optTree);
}
}
compCurBB->bbFlags |= BBF_HAS_CALL; // This block has a call
// Process the "normal" argument list
call = fgMorphArgs(call);
noway_assert(call->gtOper == GT_CALL);
// Assign DEF flags if it produces a definition from "return buffer".
fgAssignSetVarDef(call);
if (call->OperRequiresAsgFlag())
{
call->gtFlags |= GTF_ASG;
}
// Should we expand this virtual method call target early here?
//
if (call->IsExpandedEarly() && call->IsVirtualVtable())
{
// We only expand the Vtable Call target once in the global morph phase
if (fgGlobalMorph)
{
assert(call->gtControlExpr == nullptr); // We only call this method and assign gtControlExpr once
call->gtControlExpr = fgExpandVirtualVtableCallTarget(call);
}
// We always have to morph or re-morph the control expr
//
call->gtControlExpr = fgMorphTree(call->gtControlExpr);
// Propagate any gtFlags into the call
call->gtFlags |= call->gtControlExpr->gtFlags;
}
// Morph stelem.ref helper call to store a null value, into a store into an array without the helper.
// This needs to be done after the arguments are morphed to ensure constant propagation has already taken place.
if (opts.OptimizationEnabled() && (call->gtCallType == CT_HELPER) &&
(call->gtCallMethHnd == eeFindHelper(CORINFO_HELP_ARRADDR_ST)))
{
GenTree* value = gtArgEntryByArgNum(call, 2)->GetNode();
if (value->IsIntegralConst(0))
{
assert(value->OperGet() == GT_CNS_INT);
GenTree* arr = gtArgEntryByArgNum(call, 0)->GetNode();
GenTree* index = gtArgEntryByArgNum(call, 1)->GetNode();
// Either or both of the array and index arguments may have been spilled to temps by `fgMorphArgs`. Copy
// the spill trees as well if necessary.
GenTreeOp* argSetup = nullptr;
for (GenTreeCall::Use& use : call->Args())
{
GenTree* const arg = use.GetNode();
if (arg->OperGet() != GT_ASG)
{
continue;
}
assert(arg != arr);
assert(arg != index);
arg->gtFlags &= ~GTF_LATE_ARG;
GenTree* op1 = argSetup;
if (op1 == nullptr)
{
op1 = gtNewNothingNode();
#if DEBUG
op1->gtDebugFlags |= GTF_DEBUG_NODE_MORPHED;
#endif // DEBUG
}
argSetup = new (this, GT_COMMA) GenTreeOp(GT_COMMA, TYP_VOID, op1, arg);
#if DEBUG
argSetup->gtDebugFlags |= GTF_DEBUG_NODE_MORPHED;
#endif // DEBUG
}
#ifdef DEBUG
auto resetMorphedFlag = [](GenTree** slot, fgWalkData* data) -> fgWalkResult {
(*slot)->gtDebugFlags &= ~GTF_DEBUG_NODE_MORPHED;
return WALK_CONTINUE;
};
fgWalkTreePost(&arr, resetMorphedFlag);
fgWalkTreePost(&index, resetMorphedFlag);
fgWalkTreePost(&value, resetMorphedFlag);
#endif // DEBUG
GenTree* const arrIndexNode = gtNewIndexRef(TYP_REF, arr, index);
GenTree* const arrStore = gtNewAssignNode(arrIndexNode, value);
GenTree* result = fgMorphTree(arrStore);
if (argSetup != nullptr)
{
result = new (this, GT_COMMA) GenTreeOp(GT_COMMA, TYP_VOID, argSetup, result);
#if DEBUG
result->gtDebugFlags |= GTF_DEBUG_NODE_MORPHED;
#endif // DEBUG
}
return result;
}
}
if (call->IsNoReturn())
{
//
// If we know that the call does not return then we can set fgRemoveRestOfBlock
// to remove all subsequent statements and change the call's basic block to BBJ_THROW.
// As a result the compiler won't need to preserve live registers across the call.
//
// This isn't need for tail calls as there shouldn't be any code after the call anyway.
// Besides, the tail call code is part of the epilog and converting the block to
// BBJ_THROW would result in the tail call being dropped as the epilog is generated
// only for BBJ_RETURN blocks.
//
if (!call->IsTailCall())
{
fgRemoveRestOfBlock = true;
}
}
return call;
}
/*****************************************************************************
*
* Expand and return the call target address for a VirtualCall
* The code here should match that generated by LowerVirtualVtableCall
*/
GenTree* Compiler::fgExpandVirtualVtableCallTarget(GenTreeCall* call)
{
GenTree* result;
JITDUMP("Expanding virtual call target for %d.%s:\n", call->gtTreeID, GenTree::OpName(call->gtOper));
noway_assert(call->gtCallType == CT_USER_FUNC);
// get a reference to the thisPtr being passed
fgArgTabEntry* thisArgTabEntry = gtArgEntryByArgNum(call, 0);
GenTree* thisPtr = thisArgTabEntry->GetNode();
// fgMorphArgs must enforce this invariant by creating a temp
//
assert(thisPtr->OperIsLocal());
// Make a copy of the thisPtr by cloning
//
thisPtr = gtClone(thisPtr, true);
noway_assert(thisPtr != nullptr);
// Get hold of the vtable offset
unsigned vtabOffsOfIndirection;
unsigned vtabOffsAfterIndirection;
bool isRelative;
info.compCompHnd->getMethodVTableOffset(call->gtCallMethHnd, &vtabOffsOfIndirection, &vtabOffsAfterIndirection,
&isRelative);
// Dereference the this pointer to obtain the method table, it is called vtab below
GenTree* vtab;
assert(VPTR_OFFS == 0); // We have to add this value to the thisPtr to get the methodTable
vtab = gtNewOperNode(GT_IND, TYP_I_IMPL, thisPtr);
vtab->gtFlags |= GTF_IND_INVARIANT;
// Get the appropriate vtable chunk
if (vtabOffsOfIndirection != CORINFO_VIRTUALCALL_NO_CHUNK)
{
// Note this isRelative code path is currently never executed
// as the VM doesn't ever return: isRelative == true
//
if (isRelative)
{
// MethodTable offset is a relative pointer.
//
// Additional temporary variable is used to store virtual table pointer.
// Address of method is obtained by the next computations:
//
// Save relative offset to tmp (vtab is virtual table pointer, vtabOffsOfIndirection is offset of
// vtable-1st-level-indirection):
// tmp = vtab
//
// Save address of method to result (vtabOffsAfterIndirection is offset of vtable-2nd-level-indirection):
// result = [tmp + vtabOffsOfIndirection + vtabOffsAfterIndirection + [tmp + vtabOffsOfIndirection]]
//
//
// When isRelative is true we need to setup two temporary variables
// var1 = vtab
// var2 = var1 + vtabOffsOfIndirection + vtabOffsAfterIndirection + [var1 + vtabOffsOfIndirection]
// result = [var2] + var2
//
unsigned varNum1 = lvaGrabTemp(true DEBUGARG("var1 - vtab"));
unsigned varNum2 = lvaGrabTemp(true DEBUGARG("var2 - relative"));
GenTree* asgVar1 = gtNewTempAssign(varNum1, vtab); // var1 = vtab
// [tmp + vtabOffsOfIndirection]
GenTree* tmpTree1 = gtNewOperNode(GT_ADD, TYP_I_IMPL, gtNewLclvNode(varNum1, TYP_I_IMPL),
gtNewIconNode(vtabOffsOfIndirection, TYP_I_IMPL));
tmpTree1 = gtNewOperNode(GT_IND, TYP_I_IMPL, tmpTree1, false);
tmpTree1->gtFlags |= GTF_IND_NONFAULTING;
tmpTree1->gtFlags |= GTF_IND_INVARIANT;
// var1 + vtabOffsOfIndirection + vtabOffsAfterIndirection
GenTree* tmpTree2 =
gtNewOperNode(GT_ADD, TYP_I_IMPL, gtNewLclvNode(varNum1, TYP_I_IMPL),
gtNewIconNode(vtabOffsOfIndirection + vtabOffsAfterIndirection, TYP_I_IMPL));
// var1 + vtabOffsOfIndirection + vtabOffsAfterIndirection + [var1 + vtabOffsOfIndirection]
tmpTree2 = gtNewOperNode(GT_ADD, TYP_I_IMPL, tmpTree2, tmpTree1);
GenTree* asgVar2 = gtNewTempAssign(varNum2, tmpTree2); // var2 = <expression>
// This last indirection is not invariant, but is non-faulting
result = gtNewOperNode(GT_IND, TYP_I_IMPL, gtNewLclvNode(varNum2, TYP_I_IMPL), false); // [var2]
result->gtFlags |= GTF_IND_NONFAULTING;
result = gtNewOperNode(GT_ADD, TYP_I_IMPL, result, gtNewLclvNode(varNum2, TYP_I_IMPL)); // [var2] + var2
// Now stitch together the two assignment and the calculation of result into a single tree
GenTree* commaTree = gtNewOperNode(GT_COMMA, TYP_I_IMPL, asgVar2, result);
result = gtNewOperNode(GT_COMMA, TYP_I_IMPL, asgVar1, commaTree);
}
else
{
// result = [vtab + vtabOffsOfIndirection]
result = gtNewOperNode(GT_ADD, TYP_I_IMPL, vtab, gtNewIconNode(vtabOffsOfIndirection, TYP_I_IMPL));
result = gtNewOperNode(GT_IND, TYP_I_IMPL, result, false);
result->gtFlags |= GTF_IND_NONFAULTING;
result->gtFlags |= GTF_IND_INVARIANT;
}
}
else
{
result = vtab;
assert(!isRelative);
}
if (!isRelative)
{
// Load the function address
// result = [result + vtabOffsAfterIndirection]
result = gtNewOperNode(GT_ADD, TYP_I_IMPL, result, gtNewIconNode(vtabOffsAfterIndirection, TYP_I_IMPL));
// This last indirection is not invariant, but is non-faulting
result = gtNewOperNode(GT_IND, TYP_I_IMPL, result, false);
result->gtFlags |= GTF_IND_NONFAULTING;
}
return result;
}
/*****************************************************************************
*
* Transform the given constant tree for code generation.
*/
GenTree* Compiler::fgMorphConst(GenTree* tree)
{
assert(tree->OperIsConst());
/* Clear any exception flags or other unnecessary flags
* that may have been set before folding this node to a constant */
tree->gtFlags &= ~(GTF_ALL_EFFECT | GTF_REVERSE_OPS);
if (!tree->OperIs(GT_CNS_STR))
{
return tree;
}
if (tree->AsStrCon()->IsStringEmptyField())
{
LPVOID pValue;
InfoAccessType iat = info.compCompHnd->emptyStringLiteral(&pValue);
return fgMorphTree(gtNewStringLiteralNode(iat, pValue));
}
// TODO-CQ: Do this for compCurBB->isRunRarely(). Doing that currently will
// guarantee slow performance for that block. Instead cache the return value
// of CORINFO_HELP_STRCNS and go to cache first giving reasonable perf.
bool useLazyStrCns = false;
if (compCurBB->bbJumpKind == BBJ_THROW)
{
useLazyStrCns = true;
}
else if (fgGlobalMorph && compCurStmt->GetRootNode()->IsCall())
{
// Quick check: if the root node of the current statement happens to be a noreturn call.
GenTreeCall* call = compCurStmt->GetRootNode()->AsCall();
useLazyStrCns = call->IsNoReturn() || fgIsThrow(call);
}
if (useLazyStrCns)
{
CorInfoHelpFunc helper = info.compCompHnd->getLazyStringLiteralHelper(tree->AsStrCon()->gtScpHnd);
if (helper != CORINFO_HELP_UNDEF)
{
// For un-important blocks, we want to construct the string lazily
GenTreeCall::Use* args;
if (helper == CORINFO_HELP_STRCNS_CURRENT_MODULE)
{
args = gtNewCallArgs(gtNewIconNode(RidFromToken(tree->AsStrCon()->gtSconCPX), TYP_INT));
}
else
{
args = gtNewCallArgs(gtNewIconNode(RidFromToken(tree->AsStrCon()->gtSconCPX), TYP_INT),
gtNewIconEmbScpHndNode(tree->AsStrCon()->gtScpHnd));
}
tree = gtNewHelperCallNode(helper, TYP_REF, args);
return fgMorphTree(tree);
}
}
assert(tree->AsStrCon()->gtScpHnd == info.compScopeHnd || !IsUninitialized(tree->AsStrCon()->gtScpHnd));
LPVOID pValue;
InfoAccessType iat =
info.compCompHnd->constructStringLiteral(tree->AsStrCon()->gtScpHnd, tree->AsStrCon()->gtSconCPX, &pValue);
tree = gtNewStringLiteralNode(iat, pValue);
return fgMorphTree(tree);
}
//------------------------------------------------------------------------
// fgMorphTryFoldObjAsLclVar: try to fold an Obj node as a LclVar.
//
// Arguments:
// obj - the obj node.
// destroyNodes -- destroy nodes that are optimized away
//
// Return value:
// GenTreeLclVar if the obj can be replaced by it, null otherwise.
//
// Notes:
// TODO-CQ: currently this transformation is done only under copy block,
// but it is benefitial to do for each OBJ node. However, `PUT_ARG_STACK`
// for some platforms does not expect struct `LCL_VAR` as a source, so
// it needs more work.
//
GenTreeLclVar* Compiler::fgMorphTryFoldObjAsLclVar(GenTreeObj* obj, bool destroyNodes)
{
if (opts.OptimizationEnabled())
{
GenTree* op1 = obj->Addr();
assert(!op1->OperIs(GT_LCL_VAR_ADDR) && "missed an opt opportunity");
if (op1->OperIs(GT_ADDR))
{
GenTreeUnOp* addr = op1->AsUnOp();
GenTree* addrOp = addr->gtGetOp1();
if (addrOp->TypeIs(obj->TypeGet()) && addrOp->OperIs(GT_LCL_VAR))
{
GenTreeLclVar* lclVar = addrOp->AsLclVar();
ClassLayout* lclVarLayout = lvaGetDesc(lclVar)->GetLayout();
ClassLayout* objLayout = obj->GetLayout();
if (ClassLayout::AreCompatible(lclVarLayout, objLayout))
{
#ifdef DEBUG
CORINFO_CLASS_HANDLE objClsHandle = obj->GetLayout()->GetClassHandle();
assert(objClsHandle != NO_CLASS_HANDLE);
if (verbose)
{
CORINFO_CLASS_HANDLE lclClsHnd = gtGetStructHandle(lclVar);
printf("fold OBJ(ADDR(X)) [%06u] into X [%06u], ", dspTreeID(obj), dspTreeID(lclVar));
printf("with %s handles\n", ((lclClsHnd == objClsHandle) ? "matching" : "different"));
}
#endif
// Keep the DONT_CSE flag in sync
// (as the addr always marks it for its op1)
lclVar->gtFlags &= ~GTF_DONT_CSE;
lclVar->gtFlags |= (obj->gtFlags & GTF_DONT_CSE);
if (destroyNodes)
{
DEBUG_DESTROY_NODE(obj);
DEBUG_DESTROY_NODE(addr);
}
return lclVar;
}
}
}
}
return nullptr;
}
/*****************************************************************************
*
* Transform the given GTK_LEAF tree for code generation.
*/
GenTree* Compiler::fgMorphLeaf(GenTree* tree)
{
assert(tree->OperKind() & GTK_LEAF);
if (tree->gtOper == GT_LCL_VAR)
{
const bool forceRemorph = false;
return fgMorphLocalVar(tree, forceRemorph);
}
else if (tree->gtOper == GT_LCL_FLD)
{
if (lvaGetDesc(tree->AsLclFld())->IsAddressExposed())
{
tree->gtFlags |= GTF_GLOB_REF;
}
#ifdef TARGET_X86
if (info.compIsVarArgs)
{
GenTree* newTree = fgMorphStackArgForVarArgs(tree->AsLclFld()->GetLclNum(), tree->TypeGet(),
tree->AsLclFld()->GetLclOffs());
if (newTree != nullptr)
{
if (newTree->OperIsBlk() && ((tree->gtFlags & GTF_VAR_DEF) == 0))
{
newTree->SetOper(GT_IND);
}
return newTree;
}
}
#endif // TARGET_X86
}
else if (tree->gtOper == GT_FTN_ADDR)
{
GenTreeFptrVal* fptrValTree = tree->AsFptrVal();
// A function pointer address is being used. Let the VM know if this is the
// target of a Delegate or a raw function pointer.
bool isUnsafeFunctionPointer = !fptrValTree->gtFptrDelegateTarget;
CORINFO_CONST_LOOKUP addrInfo;
#ifdef FEATURE_READYTORUN
if (fptrValTree->gtEntryPoint.addr != nullptr)
{
addrInfo = fptrValTree->gtEntryPoint;
}
else
#endif
{
info.compCompHnd->getFunctionFixedEntryPoint(fptrValTree->gtFptrMethod, isUnsafeFunctionPointer, &addrInfo);
}
GenTree* indNode = nullptr;
switch (addrInfo.accessType)
{
case IAT_PPVALUE:
indNode = gtNewIndOfIconHandleNode(TYP_I_IMPL, (size_t)addrInfo.handle, GTF_ICON_CONST_PTR, true);
// Add the second indirection
indNode = gtNewOperNode(GT_IND, TYP_I_IMPL, indNode);
// This indirection won't cause an exception.
indNode->gtFlags |= GTF_IND_NONFAULTING;
// This indirection also is invariant.
indNode->gtFlags |= GTF_IND_INVARIANT;
break;
case IAT_PVALUE:
indNode = gtNewIndOfIconHandleNode(TYP_I_IMPL, (size_t)addrInfo.handle, GTF_ICON_FTN_ADDR, true);
break;
case IAT_VALUE:
// Refer to gtNewIconHandleNode() as the template for constructing a constant handle
//
tree->SetOper(GT_CNS_INT);
tree->AsIntConCommon()->SetIconValue(ssize_t(addrInfo.handle));
tree->gtFlags |= GTF_ICON_FTN_ADDR;
break;
default:
noway_assert(!"Unknown addrInfo.accessType");
}
if (indNode != nullptr)
{
DEBUG_DESTROY_NODE(tree);
tree = fgMorphTree(indNode);
}
}
return tree;
}
void Compiler::fgAssignSetVarDef(GenTree* tree)
{
GenTreeLclVarCommon* lclVarCmnTree;
bool isEntire = false;
if (tree->DefinesLocal(this, &lclVarCmnTree, &isEntire))
{
if (isEntire)
{
lclVarCmnTree->gtFlags |= GTF_VAR_DEF;
}
else
{
// We consider partial definitions to be modeled as uses followed by definitions.
// This captures the idea that precedings defs are not necessarily made redundant
// by this definition.
lclVarCmnTree->gtFlags |= (GTF_VAR_DEF | GTF_VAR_USEASG);
}
}
}
//------------------------------------------------------------------------
// fgMorphOneAsgBlockOp: Attempt to replace a block assignment with a scalar assignment
//
// Arguments:
// tree - The block assignment to be possibly morphed
//
// Return Value:
// The modified tree if successful, nullptr otherwise.
//
// Assumptions:
// 'tree' must be a block assignment.
//
// Notes:
// If successful, this method always returns the incoming tree, modifying only
// its arguments.
//
GenTree* Compiler::fgMorphOneAsgBlockOp(GenTree* tree)
{
// This must be a block assignment.
noway_assert(tree->OperIsBlkOp());
var_types asgType = tree->TypeGet();
GenTree* asg = tree;
GenTree* dest = asg->gtGetOp1();
GenTree* src = asg->gtGetOp2();
unsigned destVarNum = BAD_VAR_NUM;
LclVarDsc* destVarDsc = nullptr;
GenTree* destLclVarTree = nullptr;
bool isCopyBlock = asg->OperIsCopyBlkOp();
bool isInitBlock = !isCopyBlock;
unsigned size = 0;
CORINFO_CLASS_HANDLE clsHnd = NO_CLASS_HANDLE;
if (dest->gtEffectiveVal()->OperIsBlk())
{
GenTreeBlk* lhsBlk = dest->gtEffectiveVal()->AsBlk();
size = lhsBlk->Size();
if (impIsAddressInLocal(lhsBlk->Addr(), &destLclVarTree))
{
destVarNum = destLclVarTree->AsLclVarCommon()->GetLclNum();
destVarDsc = lvaGetDesc(destVarNum);
}
if (lhsBlk->OperGet() == GT_OBJ)
{
clsHnd = lhsBlk->AsObj()->GetLayout()->GetClassHandle();
}
}
else
{
// Is this an enregisterable struct that is already a simple assignment?
// This can happen if we are re-morphing.
// Note that we won't do this straightaway if this is a SIMD type, since it
// may be a promoted lclVar (sometimes we promote the individual float fields of
// fixed-size SIMD).
if (dest->OperGet() == GT_IND)
{
noway_assert(asgType != TYP_STRUCT);
if (varTypeIsStruct(asgType))
{
destLclVarTree = fgIsIndirOfAddrOfLocal(dest);
}
if (isCopyBlock && destLclVarTree == nullptr && !src->OperIs(GT_LCL_VAR))
{
fgMorphBlockOperand(src, asgType, genTypeSize(asgType), false /*isBlkReqd*/);
dest->gtFlags |= GTF_DONT_CSE;
return tree;
}
}
else
{
noway_assert(dest->OperIsLocal());
destLclVarTree = dest;
}
if (destLclVarTree != nullptr)
{
destVarNum = destLclVarTree->AsLclVarCommon()->GetLclNum();
destVarDsc = lvaGetDesc(destVarNum);
if (asgType == TYP_STRUCT)
{
clsHnd = destVarDsc->GetStructHnd();
size = destVarDsc->lvExactSize;
}
}
if (asgType != TYP_STRUCT)
{
size = genTypeSize(asgType);
}
}
if (size == 0)
{
return nullptr;
}
if ((destVarDsc != nullptr) && varTypeIsStruct(destLclVarTree) && destVarDsc->lvPromoted)
{
// Let fgMorphCopyBlock handle it.
return nullptr;
}
if (src->IsCall() || src->OperIsSIMD())
{
// Can't take ADDR from these nodes, let fgMorphCopyBlock handle it, #11413.
return nullptr;
}
if ((destVarDsc != nullptr) && !varTypeIsStruct(destVarDsc->TypeGet()))
{
//
// See if we can do a simple transformation:
//
// GT_ASG <TYP_size>
// / \.
// GT_IND GT_IND or CNS_INT
// | |
// [dest] [src]
//
if (asgType == TYP_STRUCT)
{
// It is possible to use `initobj` to init a primitive type on the stack,
// like `ldloca.s 1; initobj 1B000003` where `V01` has type `ref`;
// in this case we generate `ASG struct(BLK<8> struct(ADDR byref(LCL_VAR ref)), 0)`
// and this code path transforms it into `ASG ref(LCL_VARref, 0)` because it is not a real
// struct assignment.
if (size == REGSIZE_BYTES)
{
if (clsHnd == NO_CLASS_HANDLE)
{
// A register-sized cpblk can be treated as an integer asignment.
asgType = TYP_I_IMPL;
}
else
{
BYTE gcPtr;
info.compCompHnd->getClassGClayout(clsHnd, &gcPtr);
asgType = getJitGCType(gcPtr);
}
}
else
{
switch (size)
{
case 1:
asgType = TYP_BYTE;
break;
case 2:
asgType = TYP_SHORT;
break;
#ifdef TARGET_64BIT
case 4:
asgType = TYP_INT;
break;
#endif // TARGET_64BIT
}
}
}
}
GenTree* srcLclVarTree = nullptr;
LclVarDsc* srcVarDsc = nullptr;
if (isCopyBlock)
{
if (src->OperGet() == GT_LCL_VAR)
{
srcLclVarTree = src;
srcVarDsc = lvaGetDesc(src->AsLclVarCommon());
}
else if (src->OperIsIndir() && impIsAddressInLocal(src->AsOp()->gtOp1, &srcLclVarTree))
{
srcVarDsc = lvaGetDesc(srcLclVarTree->AsLclVarCommon());
}
if ((srcVarDsc != nullptr) && varTypeIsStruct(srcLclVarTree) && srcVarDsc->lvPromoted)
{
// Let fgMorphCopyBlock handle it.
return nullptr;
}
}
if (asgType != TYP_STRUCT)
{
noway_assert((size <= REGSIZE_BYTES) || varTypeIsSIMD(asgType));
// For initBlk, a non constant source is not going to allow us to fiddle
// with the bits to create a single assigment.
// Nor do we (for now) support transforming an InitBlock of SIMD type, unless
// it is a direct assignment to a lclVar and the value is zero.
if (isInitBlock)
{
if (!src->IsConstInitVal())
{
return nullptr;
}
if (varTypeIsSIMD(asgType) && (!src->IsIntegralConst(0) || (destVarDsc == nullptr)))
{
return nullptr;
}
}
if (destVarDsc != nullptr)
{
// Kill everything about dest
if (optLocalAssertionProp)
{
if (optAssertionCount > 0)
{
fgKillDependentAssertions(destVarNum DEBUGARG(tree));
}
}
// A previous incarnation of this code also required the local not to be
// address-exposed(=taken). That seems orthogonal to the decision of whether
// to do field-wise assignments: being address-exposed will cause it to be
// "dependently" promoted, so it will be in the right memory location. One possible
// further reason for avoiding field-wise stores is that the struct might have alignment-induced
// holes, whose contents could be meaningful in unsafe code. If we decide that's a valid
// concern, then we could compromise, and say that address-exposed + fields do not completely cover the
// memory of the struct prevent field-wise assignments. Same situation exists for the "src" decision.
if (varTypeIsStruct(destLclVarTree) && destVarDsc->lvPromoted)
{
// Let fgMorphInitBlock handle it. (Since we'll need to do field-var-wise assignments.)
return nullptr;
}
else if (!varTypeIsFloating(destLclVarTree->TypeGet()) && (size == genTypeSize(destVarDsc)))
{
// Use the dest local var directly, as well as its type.
dest = destLclVarTree;
asgType = destVarDsc->lvType;
// If the block operation had been a write to a local var of a small int type,
// of the exact size of the small int type, and the var is NormalizeOnStore,
// we would have labeled it GTF_VAR_USEASG, because the block operation wouldn't
// have done that normalization. If we're now making it into an assignment,
// the NormalizeOnStore will work, and it can be a full def.
if (destVarDsc->lvNormalizeOnStore())
{
dest->gtFlags &= (~GTF_VAR_USEASG);
}
}
else
{
// Could be a non-promoted struct, or a floating point type local, or
// an int subject to a partial write. Don't enregister.
lvaSetVarDoNotEnregister(destVarNum DEBUGARG(DoNotEnregisterReason::OneAsgRetyping));
// Mark the local var tree as a definition point of the local.
destLclVarTree->gtFlags |= GTF_VAR_DEF;
if (size < destVarDsc->lvExactSize)
{ // If it's not a full-width assignment....
destLclVarTree->gtFlags |= GTF_VAR_USEASG;
}
if (dest == destLclVarTree)
{
GenTree* addr = gtNewOperNode(GT_ADDR, TYP_BYREF, dest);
dest = gtNewIndir(asgType, addr);
}
}
}
// Check to ensure we don't have a reducible *(& ... )
if (dest->OperIsIndir() && dest->AsIndir()->Addr()->OperGet() == GT_ADDR)
{
// If dest is an Indir or Block, and it has a child that is a Addr node
//
GenTree* addrNode = dest->AsIndir()->Addr(); // known to be a GT_ADDR
// Can we just remove the Ind(Addr(destOp)) and operate directly on 'destOp'?
//
GenTree* destOp = addrNode->gtGetOp1();
var_types destOpType = destOp->TypeGet();
// We can if we have a primitive integer type and the sizes are exactly the same.
//
if ((varTypeIsIntegralOrI(destOp) && (size == genTypeSize(destOpType))))
{
dest = destOp;
asgType = destOpType;
}
}
if (dest->gtEffectiveVal()->OperIsIndir())
{
// If we have no information about the destination, we have to assume it could
// live anywhere (not just in the GC heap).
// Mark the GT_IND node so that we use the correct write barrier helper in case
// the field is a GC ref.
if (!fgIsIndirOfAddrOfLocal(dest))
{
dest->gtFlags |= (GTF_GLOB_REF | GTF_IND_TGTANYWHERE);
tree->gtFlags |= GTF_GLOB_REF;
}
dest->SetIndirExceptionFlags(this);
tree->gtFlags |= (dest->gtFlags & GTF_EXCEPT);
}
if (isCopyBlock)
{
if (srcVarDsc != nullptr)
{
// Handled above.
assert(!varTypeIsStruct(srcLclVarTree) || !srcVarDsc->lvPromoted);
if (!varTypeIsFloating(srcLclVarTree->TypeGet()) &&
size == genTypeSize(genActualType(srcLclVarTree->TypeGet())))
{
// Use the src local var directly.
src = srcLclVarTree;
}
else
{
// The source argument of the copyblk can potentially be accessed only through indir(addr(lclVar))
// or indir(lclVarAddr) so it must be on the stack.
unsigned lclVarNum = srcLclVarTree->AsLclVarCommon()->GetLclNum();
lvaSetVarDoNotEnregister(lclVarNum DEBUGARG(DoNotEnregisterReason::OneAsgRetyping));
GenTree* srcAddr;
if (src == srcLclVarTree)
{
srcAddr = gtNewOperNode(GT_ADDR, TYP_BYREF, src);
src = gtNewOperNode(GT_IND, asgType, srcAddr);
}
else
{
assert(src->OperIsIndir());
}
}
}
if (src->OperIsIndir())
{
if (!fgIsIndirOfAddrOfLocal(src))
{
// If we have no information about the src, we have to assume it could
// live anywhere (not just in the GC heap).
// Mark the GT_IND node so that we use the correct write barrier helper in case
// the field is a GC ref.
src->gtFlags |= (GTF_GLOB_REF | GTF_IND_TGTANYWHERE);
}
src->SetIndirExceptionFlags(this);
}
}
else // InitBlk
{
#ifdef FEATURE_SIMD
if (varTypeIsSIMD(asgType))
{
assert(!isCopyBlock); // Else we would have returned the tree above.
noway_assert(src->IsIntegralConst(0));
noway_assert(destVarDsc != nullptr);
src = gtNewSIMDNode(asgType, src, SIMDIntrinsicInit, destVarDsc->GetSimdBaseJitType(), size);
}
else
#endif
{
if (src->OperIsInitVal())
{
src = src->gtGetOp1();
}
assert(src->IsCnsIntOrI());
// This will mutate the integer constant, in place, to be the correct
// value for the type we are using in the assignment.
src->AsIntCon()->FixupInitBlkValue(asgType);
}
}
// Ensure that the dest is setup appropriately.
if (dest->gtEffectiveVal()->OperIsIndir())
{
dest = fgMorphBlockOperand(dest, asgType, size, false /*isBlkReqd*/);
}
// Ensure that the rhs is setup appropriately.
if (isCopyBlock)
{
src = fgMorphBlockOperand(src, asgType, size, false /*isBlkReqd*/);
}
// Set the lhs and rhs on the assignment.
if (dest != tree->AsOp()->gtOp1)
{
asg->AsOp()->gtOp1 = dest;
}
if (src != asg->AsOp()->gtOp2)
{
asg->AsOp()->gtOp2 = src;
}
asg->ChangeType(asgType);
dest->gtFlags |= GTF_DONT_CSE;
asg->gtFlags &= ~GTF_EXCEPT;
asg->gtFlags |= ((dest->gtFlags | src->gtFlags) & GTF_ALL_EFFECT);
// Un-set GTF_REVERSE_OPS, and it will be set later if appropriate.
asg->gtFlags &= ~GTF_REVERSE_OPS;
#ifdef DEBUG
if (verbose)
{
printf("fgMorphOneAsgBlock (after):\n");
gtDispTree(tree);
}
#endif
return tree;
}
return nullptr;
}
//------------------------------------------------------------------------
// fgMorphPromoteLocalInitBlock: Attempts to promote a local block init tree
// to a tree of promoted field initialization assignments.
//
// Arguments:
// destLclNode - The destination LclVar node
// initVal - The initialization value
// blockSize - The amount of bytes to initialize
//
// Return Value:
// A tree that performs field by field initialization of the destination
// struct variable if various conditions are met, nullptr otherwise.
//
// Notes:
// This transforms a single block initialization assignment like:
//
// * ASG struct (init)
// +--* BLK(12) struct
// | \--* ADDR long
// | \--* LCL_VAR struct(P) V02 loc0
// | \--* int V02.a (offs=0x00) -> V06 tmp3
// | \--* ubyte V02.c (offs=0x04) -> V07 tmp4
// | \--* float V02.d (offs=0x08) -> V08 tmp5
// \--* INIT_VAL int
// \--* CNS_INT int 42
//
// into a COMMA tree of assignments that initialize each promoted struct
// field:
//
// * COMMA void
// +--* COMMA void
// | +--* ASG int
// | | +--* LCL_VAR int V06 tmp3
// | | \--* CNS_INT int 0x2A2A2A2A
// | \--* ASG ubyte
// | +--* LCL_VAR ubyte V07 tmp4
// | \--* CNS_INT int 42
// \--* ASG float
// +--* LCL_VAR float V08 tmp5
// \--* CNS_DBL float 1.5113661732714390e-13
//
GenTree* Compiler::fgMorphPromoteLocalInitBlock(GenTreeLclVar* destLclNode, GenTree* initVal, unsigned blockSize)
{
assert(destLclNode->OperIs(GT_LCL_VAR));
LclVarDsc* destLclVar = lvaGetDesc(destLclNode);
assert(varTypeIsStruct(destLclVar->TypeGet()));
assert(destLclVar->lvPromoted);
if (blockSize == 0)
{
JITDUMP(" size is zero or unknown.\n");
return nullptr;
}
if (destLclVar->IsAddressExposed() && destLclVar->lvContainsHoles)
{
JITDUMP(" dest is address exposed and contains holes.\n");
return nullptr;
}
if (destLclVar->lvCustomLayout && destLclVar->lvContainsHoles)
{
// TODO-1stClassStructs: there are no reasons for this pessimization, delete it.
JITDUMP(" dest has custom layout and contains holes.\n");
return nullptr;
}
if (destLclVar->lvExactSize != blockSize)
{
JITDUMP(" dest size mismatch.\n");
return nullptr;
}
if (!initVal->OperIs(GT_CNS_INT))
{
JITDUMP(" source is not constant.\n");
return nullptr;
}
const int64_t initPattern = (initVal->AsIntCon()->IconValue() & 0xFF) * 0x0101010101010101LL;
if (initPattern != 0)
{
for (unsigned i = 0; i < destLclVar->lvFieldCnt; ++i)
{
LclVarDsc* fieldDesc = lvaGetDesc(destLclVar->lvFieldLclStart + i);
if (varTypeIsSIMD(fieldDesc->TypeGet()) || varTypeIsGC(fieldDesc->TypeGet()))
{
// Cannot initialize GC or SIMD types with a non-zero constant.
// The former is completly bogus. The later restriction could be
// lifted by supporting non-zero SIMD constants or by generating
// field initialization code that converts an integer constant to
// the appropiate SIMD value. Unlikely to be very useful, though.
JITDUMP(" dest contains GC and/or SIMD fields and source constant is not 0.\n");
return nullptr;
}
}
}
JITDUMP(" using field by field initialization.\n");
GenTree* tree = nullptr;
for (unsigned i = 0; i < destLclVar->lvFieldCnt; ++i)
{
unsigned fieldLclNum = destLclVar->lvFieldLclStart + i;
LclVarDsc* fieldDesc = lvaGetDesc(fieldLclNum);
GenTree* dest = gtNewLclvNode(fieldLclNum, fieldDesc->TypeGet());
// If it had been labeled a "USEASG", assignments to the individual promoted fields are not.
dest->gtFlags |= (destLclNode->gtFlags & ~(GTF_NODE_MASK | GTF_VAR_USEASG));
GenTree* src;
switch (dest->TypeGet())
{
case TYP_BOOL:
case TYP_BYTE:
case TYP_UBYTE:
case TYP_SHORT:
case TYP_USHORT:
// Promoted fields are expected to be "normalize on load". If that changes then
// we may need to adjust this code to widen the constant correctly.
assert(fieldDesc->lvNormalizeOnLoad());
FALLTHROUGH;
case TYP_INT:
{
int64_t mask = (int64_t(1) << (genTypeSize(dest->TypeGet()) * 8)) - 1;
src = gtNewIconNode(static_cast<int32_t>(initPattern & mask));
break;
}
case TYP_LONG:
src = gtNewLconNode(initPattern);
break;
case TYP_FLOAT:
float floatPattern;
memcpy(&floatPattern, &initPattern, sizeof(floatPattern));
src = gtNewDconNode(floatPattern, dest->TypeGet());
break;
case TYP_DOUBLE:
double doublePattern;
memcpy(&doublePattern, &initPattern, sizeof(doublePattern));
src = gtNewDconNode(doublePattern, dest->TypeGet());
break;
case TYP_REF:
case TYP_BYREF:
#ifdef FEATURE_SIMD
case TYP_SIMD8:
case TYP_SIMD12:
case TYP_SIMD16:
case TYP_SIMD32:
#endif // FEATURE_SIMD
assert(initPattern == 0);
src = gtNewIconNode(0, dest->TypeGet());
break;
default:
unreached();
}
GenTree* asg = gtNewAssignNode(dest, src);
if (optLocalAssertionProp)
{
optAssertionGen(asg);
}
if (tree != nullptr)
{
tree = gtNewOperNode(GT_COMMA, TYP_VOID, tree, asg);
}
else
{
tree = asg;
}
}
return tree;
}
//------------------------------------------------------------------------
// fgMorphGetStructAddr: Gets the address of a struct object
//
// Arguments:
// pTree - the parent's pointer to the struct object node
// clsHnd - the class handle for the struct type
// isRValue - true if this is a source (not dest)
//
// Return Value:
// Returns the address of the struct value, possibly modifying the existing tree to
// sink the address below any comma nodes (this is to canonicalize for value numbering).
// If this is a source, it will morph it to an GT_IND before taking its address,
// since it may not be remorphed (and we don't want blk nodes as rvalues).
GenTree* Compiler::fgMorphGetStructAddr(GenTree** pTree, CORINFO_CLASS_HANDLE clsHnd, bool isRValue)
{
GenTree* addr;
GenTree* tree = *pTree;
// If this is an indirection, we can return its op1, unless it's a GTF_IND_ARR_INDEX, in which case we
// need to hang onto that for the purposes of value numbering.
if (tree->OperIsIndir())
{
if ((tree->gtFlags & GTF_IND_ARR_INDEX) == 0)
{
addr = tree->AsOp()->gtOp1;
}
else
{
if (isRValue && tree->OperIsBlk())
{
tree->ChangeOper(GT_IND);
}
addr = gtNewOperNode(GT_ADDR, TYP_BYREF, tree);
}
}
else if (tree->gtOper == GT_COMMA)
{
// If this is a comma, we're going to "sink" the GT_ADDR below it.
(void)fgMorphGetStructAddr(&(tree->AsOp()->gtOp2), clsHnd, isRValue);
tree->gtType = TYP_BYREF;
addr = tree;
}
else
{
switch (tree->gtOper)
{
case GT_LCL_FLD:
case GT_LCL_VAR:
case GT_INDEX:
case GT_FIELD:
case GT_ARR_ELEM:
addr = gtNewOperNode(GT_ADDR, TYP_BYREF, tree);
break;
case GT_INDEX_ADDR:
addr = tree;
break;
default:
{
// TODO: Consider using lvaGrabTemp and gtNewTempAssign instead, since we're
// not going to use "temp"
GenTree* temp = fgInsertCommaFormTemp(pTree, clsHnd);
unsigned lclNum = temp->gtEffectiveVal()->AsLclVar()->GetLclNum();
lvaSetVarDoNotEnregister(lclNum DEBUG_ARG(DoNotEnregisterReason::VMNeedsStackAddr));
addr = fgMorphGetStructAddr(pTree, clsHnd, isRValue);
break;
}
}
}
*pTree = addr;
return addr;
}
//------------------------------------------------------------------------
// fgMorphBlockOperand: Canonicalize an operand of a block assignment
//
// Arguments:
// tree - The block operand
// asgType - The type of the assignment
// blockWidth - The size of the block
// isBlkReqd - true iff this operand must remain a block node
//
// Return Value:
// Returns the morphed block operand
//
// Notes:
// This does the following:
// - Ensures that a struct operand is a block node or lclVar.
// - Ensures that any COMMAs are above ADDR nodes.
// Although 'tree' WAS an operand of a block assignment, the assignment
// may have been retyped to be a scalar assignment.
GenTree* Compiler::fgMorphBlockOperand(GenTree* tree, var_types asgType, unsigned blockWidth, bool isBlkReqd)
{
GenTree* effectiveVal = tree->gtEffectiveVal();
if (asgType != TYP_STRUCT)
{
if (effectiveVal->OperIsIndir())
{
if (!isBlkReqd)
{
GenTree* addr = effectiveVal->AsIndir()->Addr();
if ((addr->OperGet() == GT_ADDR) && (addr->gtGetOp1()->TypeGet() == asgType))
{
effectiveVal = addr->gtGetOp1();
}
else if (effectiveVal->OperIsBlk())
{
effectiveVal->SetOper(GT_IND);
}
}
effectiveVal->gtType = asgType;
}
else if (effectiveVal->TypeGet() != asgType)
{
if (effectiveVal->IsCall())
{
#ifdef DEBUG
GenTreeCall* call = effectiveVal->AsCall();
assert(call->TypeGet() == TYP_STRUCT);
assert(blockWidth == info.compCompHnd->getClassSize(call->gtRetClsHnd));
#endif
}
else
{
GenTree* addr = gtNewOperNode(GT_ADDR, TYP_BYREF, effectiveVal);
effectiveVal = gtNewIndir(asgType, addr);
}
}
}
else
{
GenTreeIndir* indirTree = nullptr;
GenTreeLclVarCommon* lclNode = nullptr;
bool needsIndirection = true;
if (effectiveVal->OperIsIndir())
{
indirTree = effectiveVal->AsIndir();
GenTree* addr = effectiveVal->AsIndir()->Addr();
if ((addr->OperGet() == GT_ADDR) && (addr->gtGetOp1()->OperGet() == GT_LCL_VAR))
{
lclNode = addr->gtGetOp1()->AsLclVarCommon();
}
}
else if (effectiveVal->OperGet() == GT_LCL_VAR)
{
lclNode = effectiveVal->AsLclVarCommon();
}
else if (effectiveVal->IsCall())
{
needsIndirection = false;
#ifdef DEBUG
GenTreeCall* call = effectiveVal->AsCall();
assert(call->TypeGet() == TYP_STRUCT);
assert(blockWidth == info.compCompHnd->getClassSize(call->gtRetClsHnd));
#endif
}
#ifdef TARGET_ARM64
else if (effectiveVal->OperIsHWIntrinsic())
{
needsIndirection = false;
#ifdef DEBUG
GenTreeHWIntrinsic* intrinsic = effectiveVal->AsHWIntrinsic();
assert(intrinsic->TypeGet() == TYP_STRUCT);
assert(HWIntrinsicInfo::IsMultiReg(intrinsic->GetHWIntrinsicId()));
#endif
}
#endif // TARGET_ARM64
if (lclNode != nullptr)
{
const LclVarDsc* varDsc = lvaGetDesc(lclNode);
if (varTypeIsStruct(varDsc) && (varDsc->lvExactSize == blockWidth) && (varDsc->lvType == asgType))
{
if (effectiveVal != lclNode)
{
JITDUMP("Replacing block node [%06d] with lclVar V%02u\n", dspTreeID(tree), lclNode->GetLclNum());
effectiveVal = lclNode;
}
needsIndirection = false;
}
else
{
// This may be a lclVar that was determined to be address-exposed.
effectiveVal->gtFlags |= (lclNode->gtFlags & GTF_ALL_EFFECT);
}
}
if (needsIndirection)
{
if (indirTree != nullptr)
{
// If we have an indirection and a block is required, it should already be a block.
assert(indirTree->OperIsBlk() || !isBlkReqd);
effectiveVal->gtType = asgType;
}
else
{
GenTree* newTree;
GenTree* addr = gtNewOperNode(GT_ADDR, TYP_BYREF, effectiveVal);
if (isBlkReqd)
{
CORINFO_CLASS_HANDLE clsHnd = gtGetStructHandleIfPresent(effectiveVal);
if (clsHnd == NO_CLASS_HANDLE)
{
newTree = new (this, GT_BLK) GenTreeBlk(GT_BLK, TYP_STRUCT, addr, typGetBlkLayout(blockWidth));
}
else
{
newTree = gtNewObjNode(clsHnd, addr);
gtSetObjGcInfo(newTree->AsObj());
}
}
else
{
newTree = gtNewIndir(asgType, addr);
}
effectiveVal = newTree;
}
}
}
assert(effectiveVal->TypeIs(asgType) || (varTypeIsSIMD(asgType) && varTypeIsStruct(effectiveVal)));
tree = effectiveVal;
return tree;
}
//------------------------------------------------------------------------
// fgMorphCanUseLclFldForCopy: check if we can access LclVar2 using LclVar1's fields.
//
// Arguments:
// lclNum1 - a promoted lclVar that is used in fieldwise assignment;
// lclNum2 - the local variable on the other side of ASG, can be BAD_VAR_NUM.
//
// Return Value:
// True if the second local is valid and has the same struct handle as the first,
// false otherwise.
//
// Notes:
// This check is needed to avoid accessing LCL_VARs with incorrect
// CORINFO_FIELD_HANDLE that would confuse VN optimizations.
//
bool Compiler::fgMorphCanUseLclFldForCopy(unsigned lclNum1, unsigned lclNum2)
{
assert(lclNum1 != BAD_VAR_NUM);
if (lclNum2 == BAD_VAR_NUM)
{
return false;
}
const LclVarDsc* varDsc1 = lvaGetDesc(lclNum1);
const LclVarDsc* varDsc2 = lvaGetDesc(lclNum2);
assert(varTypeIsStruct(varDsc1));
if (!varTypeIsStruct(varDsc2))
{
return false;
}
CORINFO_CLASS_HANDLE struct1 = varDsc1->GetStructHnd();
CORINFO_CLASS_HANDLE struct2 = varDsc2->GetStructHnd();
assert(struct1 != NO_CLASS_HANDLE);
assert(struct2 != NO_CLASS_HANDLE);
if (struct1 != struct2)
{
return false;
}
return true;
}
// insert conversions and normalize to make tree amenable to register
// FP architectures
GenTree* Compiler::fgMorphForRegisterFP(GenTree* tree)
{
if (tree->OperIsArithmetic())
{
if (varTypeIsFloating(tree))
{
GenTree* op1 = tree->AsOp()->gtOp1;
GenTree* op2 = tree->gtGetOp2();
assert(varTypeIsFloating(op1->TypeGet()) && varTypeIsFloating(op2->TypeGet()));
if (op1->TypeGet() != tree->TypeGet())
{
tree->AsOp()->gtOp1 = gtNewCastNode(tree->TypeGet(), op1, false, tree->TypeGet());
}
if (op2->TypeGet() != tree->TypeGet())
{
tree->AsOp()->gtOp2 = gtNewCastNode(tree->TypeGet(), op2, false, tree->TypeGet());
}
}
}
else if (tree->OperIsCompare())
{
GenTree* op1 = tree->AsOp()->gtOp1;
if (varTypeIsFloating(op1))
{
GenTree* op2 = tree->gtGetOp2();
assert(varTypeIsFloating(op2));
if (op1->TypeGet() != op2->TypeGet())
{
// both had better be floating, just one bigger than other
if (op1->TypeGet() == TYP_FLOAT)
{
assert(op2->TypeGet() == TYP_DOUBLE);
tree->AsOp()->gtOp1 = gtNewCastNode(TYP_DOUBLE, op1, false, TYP_DOUBLE);
}
else if (op2->TypeGet() == TYP_FLOAT)
{
assert(op1->TypeGet() == TYP_DOUBLE);
tree->AsOp()->gtOp2 = gtNewCastNode(TYP_DOUBLE, op2, false, TYP_DOUBLE);
}
}
}
}
return tree;
}
#ifdef FEATURE_SIMD
//--------------------------------------------------------------------------------------------------------------
// getSIMDStructFromField:
// Checking whether the field belongs to a simd struct or not. If it is, return the GenTree* for
// the struct node, also base type, field index and simd size. If it is not, just return nullptr.
// Usually if the tree node is from a simd lclvar which is not used in any SIMD intrinsic, then we
// should return nullptr, since in this case we should treat SIMD struct as a regular struct.
// However if no matter what, you just want get simd struct node, you can set the ignoreUsedInSIMDIntrinsic
// as true. Then there will be no IsUsedInSIMDIntrinsic checking, and it will return SIMD struct node
// if the struct is a SIMD struct.
//
// Arguments:
// tree - GentreePtr. This node will be checked to see this is a field which belongs to a simd
// struct used for simd intrinsic or not.
// simdBaseJitTypeOut - CorInfoType pointer, if the tree node is the tree we want, we set *simdBaseJitTypeOut
// to simd lclvar's base JIT type.
// indexOut - unsigned pointer, if the tree is used for simd intrinsic, we will set *indexOut
// equals to the index number of this field.
// simdSizeOut - unsigned pointer, if the tree is used for simd intrinsic, set the *simdSizeOut
// equals to the simd struct size which this tree belongs to.
// ignoreUsedInSIMDIntrinsic - bool. If this is set to true, then this function will ignore
// the UsedInSIMDIntrinsic check.
//
// return value:
// A GenTree* which points the simd lclvar tree belongs to. If the tree is not the simd
// instrinic related field, return nullptr.
//
GenTree* Compiler::getSIMDStructFromField(GenTree* tree,
CorInfoType* simdBaseJitTypeOut,
unsigned* indexOut,
unsigned* simdSizeOut,
bool ignoreUsedInSIMDIntrinsic /*false*/)
{
GenTree* ret = nullptr;
if (tree->OperGet() == GT_FIELD)
{
GenTree* objRef = tree->AsField()->GetFldObj();
if (objRef != nullptr)
{
GenTree* obj = nullptr;
if (objRef->gtOper == GT_ADDR)
{
obj = objRef->AsOp()->gtOp1;
}
else if (ignoreUsedInSIMDIntrinsic)
{
obj = objRef;
}
else
{
return nullptr;
}
if (isSIMDTypeLocal(obj))
{
LclVarDsc* varDsc = lvaGetDesc(obj->AsLclVarCommon());
if (varDsc->lvIsUsedInSIMDIntrinsic() || ignoreUsedInSIMDIntrinsic)
{
*simdSizeOut = varDsc->lvExactSize;
*simdBaseJitTypeOut = getBaseJitTypeOfSIMDLocal(obj);
ret = obj;
}
}
else if (obj->OperGet() == GT_SIMD)
{
ret = obj;
GenTreeSIMD* simdNode = obj->AsSIMD();
*simdSizeOut = simdNode->GetSimdSize();
*simdBaseJitTypeOut = simdNode->GetSimdBaseJitType();
}
#ifdef FEATURE_HW_INTRINSICS
else if (obj->OperIsHWIntrinsic())
{
ret = obj;
GenTreeHWIntrinsic* simdNode = obj->AsHWIntrinsic();
*simdSizeOut = simdNode->GetSimdSize();
*simdBaseJitTypeOut = simdNode->GetSimdBaseJitType();
}
#endif // FEATURE_HW_INTRINSICS
}
}
if (ret != nullptr)
{
var_types fieldType = tree->TypeGet();
if (fieldType == TYP_LONG)
{
// Vector2/3/4 expose public float fields while Vector<T>
// and Vector64/128/256<T> have internal ulong fields. So
// we should only ever encounter accesses for TYP_FLOAT or
// TYP_LONG and in the case of the latter we don't want the
// generic type since we are executing some algorithm on the
// raw underlying bits instead.
*simdBaseJitTypeOut = CORINFO_TYPE_ULONG;
}
else
{
assert(fieldType == TYP_FLOAT);
}
unsigned baseTypeSize = genTypeSize(JITtype2varType(*simdBaseJitTypeOut));
*indexOut = tree->AsField()->gtFldOffset / baseTypeSize;
}
return ret;
}
/*****************************************************************************
* If a read operation tries to access simd struct field, then transform the operation
* to the SimdGetElementNode, and return the new tree. Otherwise, return the old tree.
* Argument:
* tree - GenTree*. If this pointer points to simd struct which is used for simd
* intrinsic, we will morph it as simd intrinsic NI_Vector128_GetElement.
* Return:
* A GenTree* which points to the new tree. If the tree is not for simd intrinsic,
* return nullptr.
*/
GenTree* Compiler::fgMorphFieldToSimdGetElement(GenTree* tree)
{
unsigned index = 0;
CorInfoType simdBaseJitType = CORINFO_TYPE_UNDEF;
unsigned simdSize = 0;
GenTree* simdStructNode = getSIMDStructFromField(tree, &simdBaseJitType, &index, &simdSize);
if (simdStructNode != nullptr)
{
var_types simdBaseType = JitType2PreciseVarType(simdBaseJitType);
GenTree* op2 = gtNewIconNode(index, TYP_INT);
assert(simdSize <= 16);
assert(simdSize >= ((index + 1) * genTypeSize(simdBaseType)));
#if defined(TARGET_XARCH)
switch (simdBaseType)
{
case TYP_BYTE:
case TYP_UBYTE:
case TYP_INT:
case TYP_UINT:
case TYP_LONG:
case TYP_ULONG:
{
if (!compOpportunisticallyDependsOn(InstructionSet_SSE41))
{
return tree;
}
break;
}
case TYP_DOUBLE:
case TYP_FLOAT:
case TYP_SHORT:
case TYP_USHORT:
{
if (!compOpportunisticallyDependsOn(InstructionSet_SSE2))
{
return tree;
}
break;
}
default:
{
unreached();
}
}
#elif defined(TARGET_ARM64)
if (!compOpportunisticallyDependsOn(InstructionSet_AdvSimd))
{
return tree;
}
#endif // !TARGET_XARCH && !TARGET_ARM64
tree = gtNewSimdGetElementNode(simdBaseType, simdStructNode, op2, simdBaseJitType, simdSize,
/* isSimdAsHWIntrinsic */ true);
}
return tree;
}
/*****************************************************************************
* Transform an assignment of a SIMD struct field to SimdWithElementNode, and
* return a new tree. If it is not such an assignment, then return the old tree.
* Argument:
* tree - GenTree*. If this pointer points to simd struct which is used for simd
* intrinsic, we will morph it as simd intrinsic set.
* Return:
* A GenTree* which points to the new tree. If the tree is not for simd intrinsic,
* return nullptr.
*/
GenTree* Compiler::fgMorphFieldAssignToSimdSetElement(GenTree* tree)
{
assert(tree->OperGet() == GT_ASG);
unsigned index = 0;
CorInfoType simdBaseJitType = CORINFO_TYPE_UNDEF;
unsigned simdSize = 0;
GenTree* simdStructNode = getSIMDStructFromField(tree->gtGetOp1(), &simdBaseJitType, &index, &simdSize);
if (simdStructNode != nullptr)
{
var_types simdType = simdStructNode->gtType;
var_types simdBaseType = JitType2PreciseVarType(simdBaseJitType);
assert(simdSize <= 16);
assert(simdSize >= ((index + 1) * genTypeSize(simdBaseType)));
GenTree* op2 = gtNewIconNode(index, TYP_INT);
GenTree* op3 = tree->gtGetOp2();
NamedIntrinsic intrinsicId = NI_Vector128_WithElement;
GenTree* target = gtClone(simdStructNode);
assert(target != nullptr);
GenTree* simdTree = gtNewSimdWithElementNode(simdType, simdStructNode, op2, op3, simdBaseJitType, simdSize,
/* isSimdAsHWIntrinsic */ true);
tree->AsOp()->gtOp1 = target;
tree->AsOp()->gtOp2 = simdTree;
// fgMorphTree has already called fgMorphImplicitByRefArgs() on this assignment, but the source
// and target have not yet been morphed.
// Therefore, in case the source and/or target are now implicit byrefs, we need to call it again.
if (fgMorphImplicitByRefArgs(tree))
{
if (tree->gtGetOp1()->OperIsBlk())
{
assert(tree->gtGetOp1()->TypeGet() == simdType);
tree->gtGetOp1()->SetOper(GT_IND);
tree->gtGetOp1()->gtType = simdType;
}
}
#ifdef DEBUG
tree->gtDebugFlags |= GTF_DEBUG_NODE_MORPHED;
#endif
}
return tree;
}
#endif // FEATURE_SIMD
//------------------------------------------------------------------------------
// fgMorphCommutative : Try to simplify "(X op C1) op C2" to "X op C3"
// for commutative operators.
//
// Arguments:
// tree - node to fold
//
// return value:
// A folded GenTree* instance or nullptr if something prevents folding.
//
GenTreeOp* Compiler::fgMorphCommutative(GenTreeOp* tree)
{
assert(varTypeIsIntegralOrI(tree->TypeGet()));
assert(tree->OperIs(GT_ADD, GT_MUL, GT_OR, GT_AND, GT_XOR));
// op1 can be GT_COMMA, in this case we're going to fold
// "(op (COMMA(... (op X C1))) C2)" to "(COMMA(... (op X C3)))"
GenTree* op1 = tree->gtGetOp1()->gtEffectiveVal(true);
genTreeOps oper = tree->OperGet();
if (!op1->OperIs(oper) || !tree->gtGetOp2()->IsCnsIntOrI() || !op1->gtGetOp2()->IsCnsIntOrI() ||
op1->gtGetOp1()->IsCnsIntOrI())
{
return nullptr;
}
if (!fgGlobalMorph && (op1 != tree->gtGetOp1()))
{
// Since 'tree->gtGetOp1()' can have complex structure (e.g. COMMA(..(COMMA(..,op1)))
// don't run the optimization for such trees outside of global morph.
// Otherwise, there is a chance of violating VNs invariants and/or modifying a tree
// that is an active CSE candidate.
return nullptr;
}
if (gtIsActiveCSE_Candidate(tree) || gtIsActiveCSE_Candidate(op1))
{
// The optimization removes 'tree' from IR and changes the value of 'op1'.
return nullptr;
}
if (tree->OperMayOverflow() && (tree->gtOverflow() || op1->gtOverflow()))
{
return nullptr;
}
GenTreeIntCon* cns1 = op1->gtGetOp2()->AsIntCon();
GenTreeIntCon* cns2 = tree->gtGetOp2()->AsIntCon();
if (!varTypeIsIntegralOrI(tree->TypeGet()) || cns1->TypeIs(TYP_REF) || !cns1->TypeIs(cns2->TypeGet()))
{
return nullptr;
}
if (gtIsActiveCSE_Candidate(cns1) || gtIsActiveCSE_Candidate(cns2))
{
// The optimization removes 'cns2' from IR and changes the value of 'cns1'.
return nullptr;
}
GenTree* folded = gtFoldExprConst(gtNewOperNode(oper, cns1->TypeGet(), cns1, cns2));
if (!folded->IsCnsIntOrI())
{
// Give up if we can't fold "C1 op C2"
return nullptr;
}
auto foldedCns = folded->AsIntCon();
cns1->SetIconValue(foldedCns->IconValue());
cns1->SetVNsFromNode(foldedCns);
cns1->gtFieldSeq = foldedCns->gtFieldSeq;
op1 = tree->gtGetOp1();
op1->SetVNsFromNode(tree);
DEBUG_DESTROY_NODE(tree);
DEBUG_DESTROY_NODE(cns2);
DEBUG_DESTROY_NODE(foldedCns);
INDEBUG(cns1->gtDebugFlags |= GTF_DEBUG_NODE_MORPHED);
return op1->AsOp();
}
//------------------------------------------------------------------------------
// fgMorphCastedBitwiseOp : Try to simplify "(T)x op (T)y" to "(T)(x op y)".
//
// Arguments:
// tree - node to fold
//
// Return Value:
// A folded GenTree* instance, or nullptr if it couldn't be folded
GenTree* Compiler::fgMorphCastedBitwiseOp(GenTreeOp* tree)
{
// This transform does not preserve VNs and deletes a node.
assert(fgGlobalMorph);
assert(varTypeIsIntegralOrI(tree));
assert(tree->OperIs(GT_OR, GT_AND, GT_XOR));
GenTree* op1 = tree->gtGetOp1();
GenTree* op2 = tree->gtGetOp2();
genTreeOps oper = tree->OperGet();
// see whether both ops are casts, with matching to and from types.
if (op1->OperIs(GT_CAST) && op2->OperIs(GT_CAST))
{
// bail if either operand is a checked cast
if (op1->gtOverflow() || op2->gtOverflow())
{
return nullptr;
}
var_types fromType = op1->AsCast()->CastOp()->TypeGet();
var_types toType = op1->AsCast()->CastToType();
bool isUnsigned = op1->IsUnsigned();
if (varTypeIsFloating(fromType) || (op2->CastFromType() != fromType) || (op2->CastToType() != toType) ||
(op2->IsUnsigned() != isUnsigned))
{
return nullptr;
}
/*
// Reuse gentree nodes:
//
// tree op1
// / \ |
// op1 op2 ==> tree
// | | / \.
// x y x y
//
// (op2 becomes garbage)
*/
tree->gtOp1 = op1->AsCast()->CastOp();
tree->gtOp2 = op2->AsCast()->CastOp();
tree->gtType = genActualType(fromType);
op1->gtType = genActualType(toType);
op1->AsCast()->gtOp1 = tree;
op1->AsCast()->CastToType() = toType;
op1->SetAllEffectsFlags(tree);
// no need to update isUnsigned
DEBUG_DESTROY_NODE(op2);
INDEBUG(op1->gtDebugFlags |= GTF_DEBUG_NODE_MORPHED);
return op1;
}
return nullptr;
}
/*****************************************************************************
*
* Transform the given GTK_SMPOP tree for code generation.
*/
#ifdef _PREFAST_
#pragma warning(push)
#pragma warning(disable : 21000) // Suppress PREFast warning about overly large function
#endif
GenTree* Compiler::fgMorphSmpOp(GenTree* tree, MorphAddrContext* mac)
{
ALLOCA_CHECK();
assert(tree->OperKind() & GTK_SMPOP);
/* The steps in this function are :
o Perform required preorder processing
o Process the first, then second operand, if any
o Perform required postorder morphing
o Perform optional postorder morphing if optimizing
*/
bool isQmarkColon = false;
AssertionIndex origAssertionCount = DUMMY_INIT(0);
AssertionDsc* origAssertionTab = DUMMY_INIT(NULL);
AssertionIndex thenAssertionCount = DUMMY_INIT(0);
AssertionDsc* thenAssertionTab = DUMMY_INIT(NULL);
if (fgGlobalMorph)
{
tree = fgMorphForRegisterFP(tree);
}
genTreeOps oper = tree->OperGet();
var_types typ = tree->TypeGet();
GenTree* op1 = tree->AsOp()->gtOp1;
GenTree* op2 = tree->gtGetOp2IfPresent();
/*-------------------------------------------------------------------------
* First do any PRE-ORDER processing
*/
switch (oper)
{
// Some arithmetic operators need to use a helper call to the EE
int helper;
case GT_ASG:
tree = fgDoNormalizeOnStore(tree);
/* fgDoNormalizeOnStore can change op2 */
noway_assert(op1 == tree->AsOp()->gtOp1);
op2 = tree->AsOp()->gtOp2;
#ifdef FEATURE_SIMD
if (IsBaselineSimdIsaSupported())
{
// We should check whether op2 should be assigned to a SIMD field or not.
// If it is, we should tranlate the tree to simd intrinsic.
assert(!fgGlobalMorph || ((tree->gtDebugFlags & GTF_DEBUG_NODE_MORPHED) == 0));
GenTree* newTree = fgMorphFieldAssignToSimdSetElement(tree);
typ = tree->TypeGet();
op1 = tree->gtGetOp1();
op2 = tree->gtGetOp2();
#ifdef DEBUG
assert((tree == newTree) && (tree->OperGet() == oper));
if ((tree->gtDebugFlags & GTF_DEBUG_NODE_MORPHED) != 0)
{
tree->gtDebugFlags &= ~GTF_DEBUG_NODE_MORPHED;
}
#endif // DEBUG
}
#endif
// We can't CSE the LHS of an assignment. Only r-values can be CSEed.
// Previously, the "lhs" (addr) of a block op was CSE'd. So, to duplicate the former
// behavior, allow CSE'ing if is a struct type (or a TYP_REF transformed from a struct type)
// TODO-1stClassStructs: improve this.
if (op1->IsLocal() || (op1->TypeGet() != TYP_STRUCT))
{
op1->gtFlags |= GTF_DONT_CSE;
}
break;
case GT_ADDR:
/* op1 of a GT_ADDR is an l-value. Only r-values can be CSEed */
op1->gtFlags |= GTF_DONT_CSE;
break;
case GT_QMARK:
case GT_JTRUE:
noway_assert(op1);
if (op1->OperIsCompare())
{
/* Mark the comparison node with GTF_RELOP_JMP_USED so it knows that it does
not need to materialize the result as a 0 or 1. */
/* We also mark it as DONT_CSE, as we don't handle QMARKs with nonRELOP op1s */
op1->gtFlags |= (GTF_RELOP_JMP_USED | GTF_DONT_CSE);
// Request that the codegen for op1 sets the condition flags
// when it generates the code for op1.
//
// Codegen for op1 must set the condition flags if
// this method returns true.
//
op1->gtRequestSetFlags();
}
else
{
GenTree* effOp1 = op1->gtEffectiveVal();
noway_assert((effOp1->gtOper == GT_CNS_INT) &&
(effOp1->IsIntegralConst(0) || effOp1->IsIntegralConst(1)));
}
break;
case GT_COLON:
if (optLocalAssertionProp)
{
isQmarkColon = true;
}
break;
case GT_FIELD:
return fgMorphField(tree, mac);
case GT_INDEX:
return fgMorphArrayIndex(tree);
case GT_CAST:
{
GenTree* morphedCast = fgMorphExpandCast(tree->AsCast());
if (morphedCast != nullptr)
{
return morphedCast;
}
op1 = tree->AsCast()->CastOp();
}
break;
case GT_MUL:
noway_assert(op2 != nullptr);
if (opts.OptimizationEnabled() && !optValnumCSE_phase && !tree->gtOverflow())
{
// MUL(NEG(a), C) => MUL(a, NEG(C))
if (op1->OperIs(GT_NEG) && !op1->gtGetOp1()->IsCnsIntOrI() && op2->IsCnsIntOrI() &&
!op2->IsIconHandle())
{
GenTree* newOp1 = op1->gtGetOp1();
GenTree* newConst = gtNewIconNode(-op2->AsIntCon()->IconValue(), op2->TypeGet());
DEBUG_DESTROY_NODE(op1);
DEBUG_DESTROY_NODE(op2);
tree->AsOp()->gtOp1 = newOp1;
tree->AsOp()->gtOp2 = newConst;
return fgMorphSmpOp(tree, mac);
}
}
#ifndef TARGET_64BIT
if (typ == TYP_LONG)
{
// For (long)int1 * (long)int2, we dont actually do the
// casts, and just multiply the 32 bit values, which will
// give us the 64 bit result in edx:eax.
if (tree->Is64RsltMul())
{
// We are seeing this node again.
// Morph only the children of casts,
// so as to avoid losing them.
tree = fgMorphLongMul(tree->AsOp());
goto DONE_MORPHING_CHILDREN;
}
tree = fgRecognizeAndMorphLongMul(tree->AsOp());
op1 = tree->AsOp()->gtGetOp1();
op2 = tree->AsOp()->gtGetOp2();
if (tree->Is64RsltMul())
{
goto DONE_MORPHING_CHILDREN;
}
else
{
if (tree->gtOverflow())
helper = tree->IsUnsigned() ? CORINFO_HELP_ULMUL_OVF : CORINFO_HELP_LMUL_OVF;
else
helper = CORINFO_HELP_LMUL;
goto USE_HELPER_FOR_ARITH;
}
}
#endif // !TARGET_64BIT
break;
case GT_ARR_LENGTH:
if (op1->OperIs(GT_CNS_STR))
{
// Optimize `ldstr + String::get_Length()` to CNS_INT
// e.g. "Hello".Length => 5
GenTreeIntCon* iconNode = gtNewStringLiteralLength(op1->AsStrCon());
if (iconNode != nullptr)
{
INDEBUG(iconNode->gtDebugFlags |= GTF_DEBUG_NODE_MORPHED);
return iconNode;
}
}
break;
case GT_DIV:
// Replace "val / dcon" with "val * (1.0 / dcon)" if dcon is a power of two.
// Powers of two within range are always exactly represented,
// so multiplication by the reciprocal is safe in this scenario
if (fgGlobalMorph && op2->IsCnsFltOrDbl())
{
double divisor = op2->AsDblCon()->gtDconVal;
if (((typ == TYP_DOUBLE) && FloatingPointUtils::hasPreciseReciprocal(divisor)) ||
((typ == TYP_FLOAT) && FloatingPointUtils::hasPreciseReciprocal(forceCastToFloat(divisor))))
{
oper = GT_MUL;
tree->ChangeOper(oper);
op2->AsDblCon()->gtDconVal = 1.0 / divisor;
}
}
// Convert DIV to UDIV if boths op1 and op2 are known to be never negative
if (!gtIsActiveCSE_Candidate(tree) && varTypeIsIntegral(tree) && op1->IsNeverNegative(this) &&
op2->IsNeverNegative(this))
{
assert(tree->OperIs(GT_DIV));
tree->ChangeOper(GT_UDIV, GenTree::PRESERVE_VN);
return fgMorphSmpOp(tree, mac);
}
#ifndef TARGET_64BIT
if (typ == TYP_LONG)
{
helper = CORINFO_HELP_LDIV;
goto USE_HELPER_FOR_ARITH;
}
#if USE_HELPERS_FOR_INT_DIV
if (typ == TYP_INT)
{
helper = CORINFO_HELP_DIV;
goto USE_HELPER_FOR_ARITH;
}
#endif
#endif // !TARGET_64BIT
break;
case GT_UDIV:
#ifndef TARGET_64BIT
if (typ == TYP_LONG)
{
helper = CORINFO_HELP_ULDIV;
goto USE_HELPER_FOR_ARITH;
}
#if USE_HELPERS_FOR_INT_DIV
if (typ == TYP_INT)
{
helper = CORINFO_HELP_UDIV;
goto USE_HELPER_FOR_ARITH;
}
#endif
#endif // TARGET_64BIT
break;
case GT_MOD:
if (varTypeIsFloating(typ))
{
helper = CORINFO_HELP_DBLREM;
noway_assert(op2);
if (op1->TypeGet() == TYP_FLOAT)
{
if (op2->TypeGet() == TYP_FLOAT)
{
helper = CORINFO_HELP_FLTREM;
}
else
{
tree->AsOp()->gtOp1 = op1 = gtNewCastNode(TYP_DOUBLE, op1, false, TYP_DOUBLE);
}
}
else if (op2->TypeGet() == TYP_FLOAT)
{
tree->AsOp()->gtOp2 = op2 = gtNewCastNode(TYP_DOUBLE, op2, false, TYP_DOUBLE);
}
goto USE_HELPER_FOR_ARITH;
}
// Convert MOD to UMOD if boths op1 and op2 are known to be never negative
if (!gtIsActiveCSE_Candidate(tree) && varTypeIsIntegral(tree) && op1->IsNeverNegative(this) &&
op2->IsNeverNegative(this))
{
assert(tree->OperIs(GT_MOD));
tree->ChangeOper(GT_UMOD, GenTree::PRESERVE_VN);
return fgMorphSmpOp(tree, mac);
}
// Do not use optimizations (unlike UMOD's idiv optimizing during codegen) for signed mod.
// A similar optimization for signed mod will not work for a negative perfectly divisible
// HI-word. To make it correct, we would need to divide without the sign and then flip the
// result sign after mod. This requires 18 opcodes + flow making it not worthy to inline.
goto ASSIGN_HELPER_FOR_MOD;
case GT_UMOD:
#ifdef TARGET_ARMARCH
//
// Note for TARGET_ARMARCH we don't have a remainder instruction, so we don't do this optimization
//
#else // TARGET_XARCH
// If this is an unsigned long mod with a constant divisor,
// then don't morph to a helper call - it can be done faster inline using idiv.
noway_assert(op2);
if ((typ == TYP_LONG) && opts.OptEnabled(CLFLG_CONSTANTFOLD))
{
if (op2->OperIs(GT_CNS_NATIVELONG) && op2->AsIntConCommon()->LngValue() >= 2 &&
op2->AsIntConCommon()->LngValue() <= 0x3fffffff)
{
tree->AsOp()->gtOp1 = op1 = fgMorphTree(op1);
noway_assert(op1->TypeIs(TYP_LONG));
// Update flags for op1 morph.
tree->gtFlags &= ~GTF_ALL_EFFECT;
// Only update with op1 as op2 is a constant.
tree->gtFlags |= (op1->gtFlags & GTF_ALL_EFFECT);
// If op1 is a constant, then do constant folding of the division operator.
if (op1->OperIs(GT_CNS_NATIVELONG))
{
tree = gtFoldExpr(tree);
}
if (!tree->OperIsConst())
{
tree->AsOp()->CheckDivideByConstOptimized(this);
}
return tree;
}
}
#endif // TARGET_XARCH
ASSIGN_HELPER_FOR_MOD:
// For "val % 1", return 0 if op1 doesn't have any side effects
// and we are not in the CSE phase, we cannot discard 'tree'
// because it may contain CSE expressions that we haven't yet examined.
//
if (((op1->gtFlags & GTF_SIDE_EFFECT) == 0) && !optValnumCSE_phase)
{
if (op2->IsIntegralConst(1))
{
GenTree* zeroNode = gtNewZeroConNode(typ);
#ifdef DEBUG
zeroNode->gtDebugFlags |= GTF_DEBUG_NODE_MORPHED;
#endif
DEBUG_DESTROY_NODE(tree);
return zeroNode;
}
}
#ifndef TARGET_64BIT
if (typ == TYP_LONG)
{
helper = (oper == GT_UMOD) ? CORINFO_HELP_ULMOD : CORINFO_HELP_LMOD;
goto USE_HELPER_FOR_ARITH;
}
#if USE_HELPERS_FOR_INT_DIV
if (typ == TYP_INT)
{
if (oper == GT_UMOD)
{
helper = CORINFO_HELP_UMOD;
goto USE_HELPER_FOR_ARITH;
}
else if (oper == GT_MOD)
{
helper = CORINFO_HELP_MOD;
goto USE_HELPER_FOR_ARITH;
}
}
#endif
#endif // !TARGET_64BIT
if (!optValnumCSE_phase)
{
#ifdef TARGET_ARM64
if (tree->OperIs(GT_UMOD) && op2->IsIntegralConstUnsignedPow2())
{
// Transformation: a % b = a & (b - 1);
tree = fgMorphUModToAndSub(tree->AsOp());
op1 = tree->AsOp()->gtOp1;
op2 = tree->AsOp()->gtOp2;
}
// ARM64 architecture manual suggests this transformation
// for the mod operator.
else
#else
// XARCH only applies this transformation if we know
// that magic division will be used - which is determined
// when 'b' is not a power of 2 constant and mod operator is signed.
// Lowering for XARCH does this optimization already,
// but is also done here to take advantage of CSE.
if (tree->OperIs(GT_MOD) && op2->IsIntegralConst() && !op2->IsIntegralConstAbsPow2())
#endif
{
// Transformation: a % b = a - (a / b) * b;
tree = fgMorphModToSubMulDiv(tree->AsOp());
op1 = tree->AsOp()->gtOp1;
op2 = tree->AsOp()->gtOp2;
}
}
break;
USE_HELPER_FOR_ARITH:
{
// TODO: this comment is wrong now, do an appropriate fix.
/* We have to morph these arithmetic operations into helper calls
before morphing the arguments (preorder), else the arguments
won't get correct values of fgPtrArgCntCur.
However, try to fold the tree first in case we end up with a
simple node which won't need a helper call at all */
noway_assert(tree->OperIsBinary());
GenTree* oldTree = tree;
tree = gtFoldExpr(tree);
// Were we able to fold it ?
// Note that gtFoldExpr may return a non-leaf even if successful
// e.g. for something like "expr / 1" - see also bug #290853
if (tree->OperIsLeaf() || (oldTree != tree))
{
return (oldTree != tree) ? fgMorphTree(tree) : fgMorphLeaf(tree);
}
// Did we fold it into a comma node with throw?
if (tree->gtOper == GT_COMMA)
{
noway_assert(fgIsCommaThrow(tree));
return fgMorphTree(tree);
}
}
return fgMorphIntoHelperCall(tree, helper, gtNewCallArgs(op1, op2));
case GT_RETURN:
if (!tree->TypeIs(TYP_VOID))
{
if (op1->OperIs(GT_OBJ, GT_BLK, GT_IND))
{
op1 = fgMorphRetInd(tree->AsUnOp());
}
if (op1->OperIs(GT_LCL_VAR))
{
// With a `genReturnBB` this `RETURN(src)` tree will be replaced by a `ASG(genReturnLocal, src)`
// and `ASG` will be tranformed into field by field copy without parent local referencing if
// possible.
GenTreeLclVar* lclVar = op1->AsLclVar();
unsigned lclNum = lclVar->GetLclNum();
if ((genReturnLocal == BAD_VAR_NUM) || (genReturnLocal == lclNum))
{
LclVarDsc* varDsc = lvaGetDesc(lclVar);
if (varDsc->CanBeReplacedWithItsField(this))
{
// We can replace the struct with its only field and allow copy propagation to replace
// return value that was written as a field.
unsigned fieldLclNum = varDsc->lvFieldLclStart;
LclVarDsc* fieldDsc = lvaGetDesc(fieldLclNum);
JITDUMP("Replacing an independently promoted local var V%02u with its only field "
"V%02u for "
"the return [%06u]\n",
lclVar->GetLclNum(), fieldLclNum, dspTreeID(tree));
lclVar->SetLclNum(fieldLclNum);
lclVar->ChangeType(fieldDsc->lvType);
}
}
}
}
// normalize small integer return values
if (fgGlobalMorph && varTypeIsSmall(info.compRetType) && (op1 != nullptr) && !op1->TypeIs(TYP_VOID) &&
fgCastNeeded(op1, info.compRetType))
{
// Small-typed return values are normalized by the callee
op1 = gtNewCastNode(TYP_INT, op1, false, info.compRetType);
// Propagate GTF_COLON_COND
op1->gtFlags |= (tree->gtFlags & GTF_COLON_COND);
tree->AsOp()->gtOp1 = fgMorphTree(op1);
// Propagate side effect flags
tree->SetAllEffectsFlags(tree->AsOp()->gtGetOp1());
return tree;
}
break;
case GT_EQ:
case GT_NE:
{
GenTree* optimizedTree = gtFoldTypeCompare(tree);
if (optimizedTree != tree)
{
return fgMorphTree(optimizedTree);
}
// Pattern-matching optimization:
// (a % c) ==/!= 0
// for power-of-2 constant `c`
// =>
// a & (c - 1) ==/!= 0
// For integer `a`, even if negative.
if (opts.OptimizationEnabled() && !optValnumCSE_phase)
{
assert(tree->OperIs(GT_EQ, GT_NE));
if (op1->OperIs(GT_MOD) && varTypeIsIntegral(op1) && op2->IsIntegralConst(0))
{
GenTree* op1op2 = op1->AsOp()->gtOp2;
if (op1op2->IsCnsIntOrI())
{
const ssize_t modValue = op1op2->AsIntCon()->IconValue();
if (isPow2(modValue))
{
JITDUMP("\nTransforming:\n");
DISPTREE(tree);
op1->SetOper(GT_AND); // Change % => &
op1op2->AsIntConCommon()->SetIconValue(modValue - 1); // Change c => c - 1
fgUpdateConstTreeValueNumber(op1op2);
JITDUMP("\ninto:\n");
DISPTREE(tree);
}
}
}
}
}
FALLTHROUGH;
case GT_GT:
{
// Try and optimize nullable boxes feeding compares
GenTree* optimizedTree = gtFoldBoxNullable(tree);
if (optimizedTree->OperGet() != tree->OperGet())
{
return optimizedTree;
}
else
{
tree = optimizedTree;
}
op1 = tree->AsOp()->gtOp1;
op2 = tree->gtGetOp2IfPresent();
break;
}
case GT_RUNTIMELOOKUP:
return fgMorphTree(op1);
#ifdef TARGET_ARM
case GT_INTRINSIC:
if (tree->AsIntrinsic()->gtIntrinsicName == NI_System_Math_Round)
{
switch (tree->TypeGet())
{
case TYP_DOUBLE:
return fgMorphIntoHelperCall(tree, CORINFO_HELP_DBLROUND, gtNewCallArgs(op1));
case TYP_FLOAT:
return fgMorphIntoHelperCall(tree, CORINFO_HELP_FLTROUND, gtNewCallArgs(op1));
default:
unreached();
}
}
break;
#endif
case GT_PUTARG_TYPE:
return fgMorphTree(tree->AsUnOp()->gtGetOp1());
case GT_NULLCHECK:
{
op1 = tree->AsUnOp()->gtGetOp1();
if (op1->IsCall())
{
GenTreeCall* const call = op1->AsCall();
if (call->IsHelperCall() && s_helperCallProperties.NonNullReturn(eeGetHelperNum(call->gtCallMethHnd)))
{
JITDUMP("\nNULLCHECK on [%06u] will always succeed\n", dspTreeID(call));
// TODO: Can we also remove the call?
//
return fgMorphTree(call);
}
}
}
break;
default:
break;
}
if (opts.OptimizationEnabled() && fgGlobalMorph)
{
GenTree* morphed = fgMorphReduceAddOps(tree);
if (morphed != tree)
return fgMorphTree(morphed);
}
/*-------------------------------------------------------------------------
* Process the first operand, if any
*/
if (op1)
{
// If we are entering the "then" part of a Qmark-Colon we must
// save the state of the current copy assignment table
// so that we can restore this state when entering the "else" part
if (isQmarkColon)
{
noway_assert(optLocalAssertionProp);
if (optAssertionCount)
{
noway_assert(optAssertionCount <= optMaxAssertionCount); // else ALLOCA() is a bad idea
unsigned tabSize = optAssertionCount * sizeof(AssertionDsc);
origAssertionTab = (AssertionDsc*)ALLOCA(tabSize);
origAssertionCount = optAssertionCount;
memcpy(origAssertionTab, optAssertionTabPrivate, tabSize);
}
else
{
origAssertionCount = 0;
origAssertionTab = nullptr;
}
}
// We might need a new MorphAddressContext context. (These are used to convey
// parent context about how addresses being calculated will be used; see the
// specification comment for MorphAddrContext for full details.)
// Assume it's an Ind context to start.
MorphAddrContext subIndMac1(MACK_Ind);
MorphAddrContext* subMac1 = mac;
if (subMac1 == nullptr || subMac1->m_kind == MACK_Ind)
{
switch (tree->gtOper)
{
case GT_ADDR:
// A non-null mac here implies this node is part of an address computation.
// If so, we need to pass the existing mac down to the child node.
//
// Otherwise, use a new mac.
if (subMac1 == nullptr)
{
subMac1 = &subIndMac1;
subMac1->m_kind = MACK_Addr;
}
break;
case GT_COMMA:
// In a comma, the incoming context only applies to the rightmost arg of the
// comma list. The left arg (op1) gets a fresh context.
subMac1 = nullptr;
break;
case GT_OBJ:
case GT_BLK:
case GT_IND:
// A non-null mac here implies this node is part of an address computation (the tree parent is
// GT_ADDR).
// If so, we need to pass the existing mac down to the child node.
//
// Otherwise, use a new mac.
if (subMac1 == nullptr)
{
subMac1 = &subIndMac1;
}
break;
default:
break;
}
}
// For additions, if we're in an IND context keep track of whether
// all offsets added to the address are constant, and their sum.
if (tree->gtOper == GT_ADD && subMac1 != nullptr)
{
assert(subMac1->m_kind == MACK_Ind || subMac1->m_kind == MACK_Addr); // Can't be a CopyBlock.
GenTree* otherOp = tree->AsOp()->gtOp2;
// Is the other operator a constant?
if (otherOp->IsCnsIntOrI())
{
ClrSafeInt<size_t> totalOffset(subMac1->m_totalOffset);
totalOffset += otherOp->AsIntConCommon()->IconValue();
if (totalOffset.IsOverflow())
{
// We will consider an offset so large as to overflow as "not a constant" --
// we will do a null check.
subMac1->m_allConstantOffsets = false;
}
else
{
subMac1->m_totalOffset += otherOp->AsIntConCommon()->IconValue();
}
}
else
{
subMac1->m_allConstantOffsets = false;
}
}
// If op1 is a GT_FIELD or indir, we need to pass down the mac if
// its parent is GT_ADDR, since the address of op1
// is part of an ongoing address computation. Otherwise
// op1 represents the value of the field and so any address
// calculations it does are in a new context.
if (((op1->gtOper == GT_FIELD) || op1->OperIsIndir()) && (tree->gtOper != GT_ADDR))
{
subMac1 = nullptr;
// The impact of op1's value to any ongoing
// address computation is handled below when looking
// at op2.
}
tree->AsOp()->gtOp1 = op1 = fgMorphTree(op1, subMac1);
// If we are exiting the "then" part of a Qmark-Colon we must
// save the state of the current copy assignment table
// so that we can merge this state with the "else" part exit
if (isQmarkColon)
{
noway_assert(optLocalAssertionProp);
if (optAssertionCount)
{
noway_assert(optAssertionCount <= optMaxAssertionCount); // else ALLOCA() is a bad idea
unsigned tabSize = optAssertionCount * sizeof(AssertionDsc);
thenAssertionTab = (AssertionDsc*)ALLOCA(tabSize);
thenAssertionCount = optAssertionCount;
memcpy(thenAssertionTab, optAssertionTabPrivate, tabSize);
}
else
{
thenAssertionCount = 0;
thenAssertionTab = nullptr;
}
}
/* Morphing along with folding and inlining may have changed the
* side effect flags, so we have to reset them
*
* NOTE: Don't reset the exception flags on nodes that may throw */
assert(tree->gtOper != GT_CALL);
if (!tree->OperRequiresCallFlag(this))
{
tree->gtFlags &= ~GTF_CALL;
}
/* Propagate the new flags */
tree->gtFlags |= (op1->gtFlags & GTF_ALL_EFFECT);
// &aliasedVar doesn't need GTF_GLOB_REF, though alisasedVar does
// Similarly for clsVar
if (oper == GT_ADDR && (op1->gtOper == GT_LCL_VAR || op1->gtOper == GT_CLS_VAR))
{
tree->gtFlags &= ~GTF_GLOB_REF;
}
} // if (op1)
/*-------------------------------------------------------------------------
* Process the second operand, if any
*/
if (op2)
{
// If we are entering the "else" part of a Qmark-Colon we must
// reset the state of the current copy assignment table
if (isQmarkColon)
{
noway_assert(optLocalAssertionProp);
optAssertionReset(0);
if (origAssertionCount)
{
size_t tabSize = origAssertionCount * sizeof(AssertionDsc);
memcpy(optAssertionTabPrivate, origAssertionTab, tabSize);
optAssertionReset(origAssertionCount);
}
}
// We might need a new MorphAddressContext context to use in evaluating op2.
// (These are used to convey parent context about how addresses being calculated
// will be used; see the specification comment for MorphAddrContext for full details.)
// Assume it's an Ind context to start.
switch (tree->gtOper)
{
case GT_ADD:
if (mac != nullptr && mac->m_kind == MACK_Ind)
{
GenTree* otherOp = tree->AsOp()->gtOp1;
// Is the other operator a constant?
if (otherOp->IsCnsIntOrI())
{
mac->m_totalOffset += otherOp->AsIntConCommon()->IconValue();
}
else
{
mac->m_allConstantOffsets = false;
}
}
break;
default:
break;
}
// If op2 is a GT_FIELD or indir, we must be taking its value,
// so it should evaluate its address in a new context.
if ((op2->gtOper == GT_FIELD) || op2->OperIsIndir())
{
// The impact of op2's value to any ongoing
// address computation is handled above when looking
// at op1.
mac = nullptr;
}
tree->AsOp()->gtOp2 = op2 = fgMorphTree(op2, mac);
/* Propagate the side effect flags from op2 */
tree->gtFlags |= (op2->gtFlags & GTF_ALL_EFFECT);
// If we are exiting the "else" part of a Qmark-Colon we must
// merge the state of the current copy assignment table with
// that of the exit of the "then" part.
if (isQmarkColon)
{
noway_assert(optLocalAssertionProp);
// If either exit table has zero entries then
// the merged table also has zero entries
if (optAssertionCount == 0 || thenAssertionCount == 0)
{
optAssertionReset(0);
}
else
{
size_t tabSize = optAssertionCount * sizeof(AssertionDsc);
if ((optAssertionCount != thenAssertionCount) ||
(memcmp(thenAssertionTab, optAssertionTabPrivate, tabSize) != 0))
{
// Yes they are different so we have to find the merged set
// Iterate over the copy asgn table removing any entries
// that do not have an exact match in the thenAssertionTab
AssertionIndex index = 1;
while (index <= optAssertionCount)
{
AssertionDsc* curAssertion = optGetAssertion(index);
for (unsigned j = 0; j < thenAssertionCount; j++)
{
AssertionDsc* thenAssertion = &thenAssertionTab[j];
// Do the left sides match?
if ((curAssertion->op1.lcl.lclNum == thenAssertion->op1.lcl.lclNum) &&
(curAssertion->assertionKind == thenAssertion->assertionKind))
{
// Do the right sides match?
if ((curAssertion->op2.kind == thenAssertion->op2.kind) &&
(curAssertion->op2.lconVal == thenAssertion->op2.lconVal))
{
goto KEEP;
}
else
{
goto REMOVE;
}
}
}
//
// If we fall out of the loop above then we didn't find
// any matching entry in the thenAssertionTab so it must
// have been killed on that path so we remove it here
//
REMOVE:
// The data at optAssertionTabPrivate[i] is to be removed
CLANG_FORMAT_COMMENT_ANCHOR;
#ifdef DEBUG
if (verbose)
{
printf("The QMARK-COLON ");
printTreeID(tree);
printf(" removes assertion candidate #%d\n", index);
}
#endif
optAssertionRemove(index);
continue;
KEEP:
// The data at optAssertionTabPrivate[i] is to be kept
index++;
}
}
}
}
} // if (op2)
#ifndef TARGET_64BIT
DONE_MORPHING_CHILDREN:
#endif // !TARGET_64BIT
if (tree->OperIsIndirOrArrLength())
{
tree->SetIndirExceptionFlags(this);
}
else
{
if (tree->OperMayThrow(this))
{
// Mark the tree node as potentially throwing an exception
tree->gtFlags |= GTF_EXCEPT;
}
else
{
if (((op1 == nullptr) || ((op1->gtFlags & GTF_EXCEPT) == 0)) &&
((op2 == nullptr) || ((op2->gtFlags & GTF_EXCEPT) == 0)))
{
tree->gtFlags &= ~GTF_EXCEPT;
}
}
}
if (tree->OperRequiresAsgFlag())
{
tree->gtFlags |= GTF_ASG;
}
else
{
if (((op1 == nullptr) || ((op1->gtFlags & GTF_ASG) == 0)) &&
((op2 == nullptr) || ((op2->gtFlags & GTF_ASG) == 0)))
{
tree->gtFlags &= ~GTF_ASG;
}
}
if (tree->OperRequiresCallFlag(this))
{
tree->gtFlags |= GTF_CALL;
}
else
{
if (((op1 == nullptr) || ((op1->gtFlags & GTF_CALL) == 0)) &&
((op2 == nullptr) || ((op2->gtFlags & GTF_CALL) == 0)))
{
tree->gtFlags &= ~GTF_CALL;
}
}
/*-------------------------------------------------------------------------
* Now do POST-ORDER processing
*/
if (varTypeIsGC(tree->TypeGet()) && (op1 && !varTypeIsGC(op1->TypeGet())) && (op2 && !varTypeIsGC(op2->TypeGet())))
{
// The tree is really not GC but was marked as such. Now that the
// children have been unmarked, unmark the tree too.
// Remember that GT_COMMA inherits it's type only from op2
if (tree->gtOper == GT_COMMA)
{
tree->gtType = genActualType(op2->TypeGet());
}
else
{
tree->gtType = genActualType(op1->TypeGet());
}
}
GenTree* oldTree = tree;
GenTree* qmarkOp1 = nullptr;
GenTree* qmarkOp2 = nullptr;
if ((tree->OperGet() == GT_QMARK) && (tree->AsOp()->gtOp2->OperGet() == GT_COLON))
{
qmarkOp1 = oldTree->AsOp()->gtOp2->AsOp()->gtOp1;
qmarkOp2 = oldTree->AsOp()->gtOp2->AsOp()->gtOp2;
}
// Try to fold it, maybe we get lucky,
tree = gtFoldExpr(tree);
if (oldTree != tree)
{
/* if gtFoldExpr returned op1 or op2 then we are done */
if ((tree == op1) || (tree == op2) || (tree == qmarkOp1) || (tree == qmarkOp2))
{
return tree;
}
/* If we created a comma-throw tree then we need to morph op1 */
if (fgIsCommaThrow(tree))
{
tree->AsOp()->gtOp1 = fgMorphTree(tree->AsOp()->gtOp1);
fgMorphTreeDone(tree);
return tree;
}
return tree;
}
else if (tree->OperIsConst())
{
return tree;
}
/* gtFoldExpr could have used setOper to change the oper */
oper = tree->OperGet();
typ = tree->TypeGet();
/* gtFoldExpr could have changed op1 and op2 */
op1 = tree->AsOp()->gtOp1;
op2 = tree->gtGetOp2IfPresent();
// Do we have an integer compare operation?
//
if (tree->OperIsCompare() && varTypeIsIntegralOrI(tree->TypeGet()))
{
// Are we comparing against zero?
//
if (op2->IsIntegralConst(0))
{
// Request that the codegen for op1 sets the condition flags
// when it generates the code for op1.
//
// Codegen for op1 must set the condition flags if
// this method returns true.
//
op1->gtRequestSetFlags();
}
}
/*-------------------------------------------------------------------------
* Perform the required oper-specific postorder morphing
*/
GenTree* temp;
size_t ival1;
GenTree* lclVarTree;
GenTree* effectiveOp1;
FieldSeqNode* fieldSeq = nullptr;
switch (oper)
{
case GT_ASG:
if (op1->OperIs(GT_LCL_VAR) && ((op1->gtFlags & GTF_VAR_FOLDED_IND) != 0))
{
op1->gtFlags &= ~GTF_VAR_FOLDED_IND;
tree = fgDoNormalizeOnStore(tree);
op2 = tree->gtGetOp2();
}
lclVarTree = fgIsIndirOfAddrOfLocal(op1);
if (lclVarTree != nullptr)
{
lclVarTree->gtFlags |= GTF_VAR_DEF;
}
effectiveOp1 = op1->gtEffectiveVal();
// If we are storing a small type, we might be able to omit a cast.
if (effectiveOp1->OperIs(GT_IND, GT_CLS_VAR) && varTypeIsSmall(effectiveOp1))
{
if (!gtIsActiveCSE_Candidate(op2) && op2->OperIs(GT_CAST) &&
varTypeIsIntegral(op2->AsCast()->CastOp()) && !op2->gtOverflow())
{
var_types castType = op2->CastToType();
// If we are performing a narrowing cast and
// castType is larger or the same as op1's type
// then we can discard the cast.
if (varTypeIsSmall(castType) && (genTypeSize(castType) >= genTypeSize(effectiveOp1)))
{
tree->AsOp()->gtOp2 = op2 = op2->AsCast()->CastOp();
}
}
}
fgAssignSetVarDef(tree);
/* We can't CSE the LHS of an assignment */
/* We also must set in the pre-morphing phase, otherwise assertionProp doesn't see it */
if (op1->IsLocal() || (op1->TypeGet() != TYP_STRUCT))
{
op1->gtFlags |= GTF_DONT_CSE;
}
break;
case GT_CAST:
tree = fgOptimizeCast(tree->AsCast());
if (!tree->OperIsSimple())
{
return tree;
}
if (tree->OperIs(GT_CAST) && tree->gtOverflow())
{
fgAddCodeRef(compCurBB, bbThrowIndex(compCurBB), SCK_OVERFLOW);
}
typ = tree->TypeGet();
oper = tree->OperGet();
op1 = tree->AsOp()->gtGetOp1();
op2 = tree->gtGetOp2IfPresent();
break;
case GT_EQ:
case GT_NE:
// It is not safe to reorder/delete CSE's
if (!optValnumCSE_phase && op2->IsIntegralConst())
{
tree = fgOptimizeEqualityComparisonWithConst(tree->AsOp());
assert(tree->OperIsCompare());
oper = tree->OperGet();
op1 = tree->gtGetOp1();
op2 = tree->gtGetOp2();
}
goto COMPARE;
case GT_LT:
case GT_LE:
case GT_GE:
case GT_GT:
if (!optValnumCSE_phase && (op1->OperIs(GT_CAST) || op2->OperIs(GT_CAST)))
{
tree = fgOptimizeRelationalComparisonWithCasts(tree->AsOp());
oper = tree->OperGet();
op1 = tree->gtGetOp1();
op2 = tree->gtGetOp2();
}
// op2's value may be changed, so it cannot be a CSE candidate.
if (op2->IsIntegralConst() && !gtIsActiveCSE_Candidate(op2))
{
tree = fgOptimizeRelationalComparisonWithConst(tree->AsOp());
oper = tree->OperGet();
assert(op1 == tree->AsOp()->gtGetOp1());
assert(op2 == tree->AsOp()->gtGetOp2());
}
COMPARE:
noway_assert(tree->OperIsCompare());
break;
case GT_MUL:
#ifndef TARGET_64BIT
if (typ == TYP_LONG)
{
// This must be GTF_MUL_64RSLT
INDEBUG(tree->AsOp()->DebugCheckLongMul());
return tree;
}
#endif // TARGET_64BIT
goto CM_OVF_OP;
case GT_SUB:
if (tree->gtOverflow())
{
goto CM_OVF_OP;
}
// TODO #4104: there are a lot of other places where
// this condition is not checked before transformations.
if (fgGlobalMorph)
{
/* Check for "op1 - cns2" , we change it to "op1 + (-cns2)" */
noway_assert(op2);
if (op2->IsCnsIntOrI() && !op2->IsIconHandle())
{
// Negate the constant and change the node to be "+",
// except when `op2` is a const byref.
op2->AsIntConCommon()->SetIconValue(-op2->AsIntConCommon()->IconValue());
op2->AsIntConRef().gtFieldSeq = FieldSeqStore::NotAField();
oper = GT_ADD;
tree->ChangeOper(oper);
goto CM_ADD_OP;
}
/* Check for "cns1 - op2" , we change it to "(cns1 + (-op2))" */
noway_assert(op1);
if (op1->IsCnsIntOrI())
{
noway_assert(varTypeIsIntOrI(tree));
// The type of the new GT_NEG node cannot just be op2->TypeGet().
// Otherwise we may sign-extend incorrectly in cases where the GT_NEG
// node ends up feeding directly into a cast, for example in
// GT_CAST<ubyte>(GT_SUB(0, s_1.ubyte))
tree->AsOp()->gtOp2 = op2 = gtNewOperNode(GT_NEG, genActualType(op2->TypeGet()), op2);
fgMorphTreeDone(op2);
oper = GT_ADD;
tree->ChangeOper(oper);
goto CM_ADD_OP;
}
/* No match - exit */
}
// Skip optimization if non-NEG operand is constant.
// Both op1 and op2 are not constant because it was already checked above.
if (opts.OptimizationEnabled() && fgGlobalMorph)
{
// a - -b = > a + b
// SUB(a, (NEG(b)) => ADD(a, b)
if (!op1->OperIs(GT_NEG) && op2->OperIs(GT_NEG))
{
// tree: SUB
// op1: a
// op2: NEG
// op2Child: b
GenTree* op2Child = op2->AsOp()->gtOp1; // b
oper = GT_ADD;
tree->SetOper(oper, GenTree::PRESERVE_VN);
tree->AsOp()->gtOp2 = op2Child;
DEBUG_DESTROY_NODE(op2);
op2 = op2Child;
}
// -a - -b = > b - a
// SUB(NEG(a), (NEG(b)) => SUB(b, a)
else if (op1->OperIs(GT_NEG) && op2->OperIs(GT_NEG) && gtCanSwapOrder(op1, op2))
{
// tree: SUB
// op1: NEG
// op1Child: a
// op2: NEG
// op2Child: b
GenTree* op1Child = op1->AsOp()->gtOp1; // a
GenTree* op2Child = op2->AsOp()->gtOp1; // b
tree->AsOp()->gtOp1 = op2Child;
tree->AsOp()->gtOp2 = op1Child;
DEBUG_DESTROY_NODE(op1);
DEBUG_DESTROY_NODE(op2);
op1 = op2Child;
op2 = op1Child;
}
}
break;
#ifdef TARGET_ARM64
case GT_DIV:
if (!varTypeIsFloating(tree->gtType))
{
// Codegen for this instruction needs to be able to throw two exceptions:
fgAddCodeRef(compCurBB, bbThrowIndex(compCurBB), SCK_OVERFLOW);
fgAddCodeRef(compCurBB, bbThrowIndex(compCurBB), SCK_DIV_BY_ZERO);
}
break;
case GT_UDIV:
// Codegen for this instruction needs to be able to throw one exception:
fgAddCodeRef(compCurBB, bbThrowIndex(compCurBB), SCK_DIV_BY_ZERO);
break;
#endif
case GT_ADD:
CM_OVF_OP:
if (tree->gtOverflow())
{
tree->gtRequestSetFlags();
// Add the excptn-throwing basic block to jump to on overflow
fgAddCodeRef(compCurBB, bbThrowIndex(compCurBB), SCK_OVERFLOW);
// We can't do any commutative morphing for overflow instructions
break;
}
CM_ADD_OP:
FALLTHROUGH;
case GT_OR:
case GT_XOR:
case GT_AND:
tree = fgOptimizeCommutativeArithmetic(tree->AsOp());
if (!tree->OperIsSimple())
{
return tree;
}
typ = tree->TypeGet();
oper = tree->OperGet();
op1 = tree->gtGetOp1();
op2 = tree->gtGetOp2IfPresent();
break;
case GT_NOT:
case GT_NEG:
// Remove double negation/not.
// Note: this is not a safe tranformation if "tree" is a CSE candidate.
// Consider for example the following expression: NEG(NEG(OP)), where any
// NEG is a CSE candidate. Were we to morph this to just OP, CSE would fail to find
// the original NEG in the statement.
if (op1->OperIs(oper) && opts.OptimizationEnabled() && !gtIsActiveCSE_Candidate(tree) &&
!gtIsActiveCSE_Candidate(op1))
{
JITDUMP("Remove double negation/not\n")
GenTree* op1op1 = op1->gtGetOp1();
DEBUG_DESTROY_NODE(tree);
DEBUG_DESTROY_NODE(op1);
return op1op1;
}
// Distribute negation over simple multiplication/division expressions
if (opts.OptimizationEnabled() && !optValnumCSE_phase && tree->OperIs(GT_NEG) &&
op1->OperIs(GT_MUL, GT_DIV))
{
GenTreeOp* mulOrDiv = op1->AsOp();
GenTree* op1op1 = mulOrDiv->gtGetOp1();
GenTree* op1op2 = mulOrDiv->gtGetOp2();
if (!op1op1->IsCnsIntOrI() && op1op2->IsCnsIntOrI() && !op1op2->IsIconHandle())
{
// NEG(MUL(a, C)) => MUL(a, -C)
// NEG(DIV(a, C)) => DIV(a, -C), except when C = {-1, 1}
ssize_t constVal = op1op2->AsIntCon()->IconValue();
if ((mulOrDiv->OperIs(GT_DIV) && (constVal != -1) && (constVal != 1)) ||
(mulOrDiv->OperIs(GT_MUL) && !mulOrDiv->gtOverflow()))
{
GenTree* newOp1 = op1op1; // a
GenTree* newOp2 = gtNewIconNode(-constVal, op1op2->TypeGet()); // -C
mulOrDiv->gtOp1 = newOp1;
mulOrDiv->gtOp2 = newOp2;
mulOrDiv->SetVNsFromNode(tree);
DEBUG_DESTROY_NODE(tree);
DEBUG_DESTROY_NODE(op1op2);
return mulOrDiv;
}
}
}
/* Any constant cases should have been folded earlier */
noway_assert(!op1->OperIsConst() || !opts.OptEnabled(CLFLG_CONSTANTFOLD) || optValnumCSE_phase);
break;
case GT_CKFINITE:
noway_assert(varTypeIsFloating(op1->TypeGet()));
fgAddCodeRef(compCurBB, bbThrowIndex(compCurBB), SCK_ARITH_EXCPN);
break;
case GT_BOUNDS_CHECK:
fgSetRngChkTarget(tree);
break;
case GT_OBJ:
case GT_BLK:
case GT_IND:
{
// If we have IND(ADDR(X)) and X has GTF_GLOB_REF, we must set GTF_GLOB_REF on
// the OBJ. Note that the GTF_GLOB_REF will have been cleared on ADDR(X) where X
// is a local or CLS_VAR, even if it has been address-exposed.
if (op1->OperIs(GT_ADDR))
{
tree->gtFlags |= (op1->AsUnOp()->gtGetOp1()->gtFlags & GTF_GLOB_REF);
}
if (!tree->OperIs(GT_IND))
{
break;
}
// Can not remove a GT_IND if it is currently a CSE candidate.
if (gtIsActiveCSE_Candidate(tree))
{
break;
}
bool foldAndReturnTemp = false;
temp = nullptr;
ival1 = 0;
// Don't remove a volatile GT_IND, even if the address points to a local variable.
// For TYP_STRUCT INDs, we do not know their size, and so will not morph as well.
if (!tree->AsIndir()->IsVolatile() && !tree->TypeIs(TYP_STRUCT))
{
/* Try to Fold *(&X) into X */
if (op1->gtOper == GT_ADDR)
{
// Can not remove a GT_ADDR if it is currently a CSE candidate.
if (gtIsActiveCSE_Candidate(op1))
{
break;
}
temp = op1->AsOp()->gtOp1; // X
if (typ == temp->TypeGet())
{
assert(typ != TYP_STRUCT);
foldAndReturnTemp = true;
}
else if (temp->OperIsLocal())
{
unsigned lclNum = temp->AsLclVarCommon()->GetLclNum();
LclVarDsc* varDsc = lvaGetDesc(lclNum);
// We will try to optimize when we have a promoted struct promoted with a zero lvFldOffset
if (varDsc->lvPromoted && (varDsc->lvFldOffset == 0))
{
noway_assert(varTypeIsStruct(varDsc));
// We will try to optimize when we have a single field struct that is being struct promoted
if (varDsc->lvFieldCnt == 1)
{
unsigned lclNumFld = varDsc->lvFieldLclStart;
// just grab the promoted field
LclVarDsc* fieldVarDsc = lvaGetDesc(lclNumFld);
// Also make sure that the tree type matches the fieldVarType and that it's lvFldOffset
// is zero
if (fieldVarDsc->TypeGet() == typ && (fieldVarDsc->lvFldOffset == 0))
{
// We can just use the existing promoted field LclNum
temp->AsLclVarCommon()->SetLclNum(lclNumFld);
temp->gtType = fieldVarDsc->TypeGet();
foldAndReturnTemp = true;
}
}
}
// If the type of the IND (typ) is a "small int", and the type of the local has the
// same width, then we can reduce to just the local variable -- it will be
// correctly normalized.
//
// The below transformation cannot be applied if the local var needs to be normalized on load.
else if (varTypeIsSmall(typ) && (genTypeSize(varDsc) == genTypeSize(typ)) &&
!lvaTable[lclNum].lvNormalizeOnLoad())
{
const bool definitelyLoad = (tree->gtFlags & GTF_DONT_CSE) == 0;
const bool possiblyStore = !definitelyLoad;
if (possiblyStore || (varTypeIsUnsigned(varDsc) == varTypeIsUnsigned(typ)))
{
typ = temp->TypeGet();
tree->gtType = typ;
foldAndReturnTemp = true;
if (possiblyStore)
{
// This node can be on the left-hand-side of an assignment node.
// Mark this node with GTF_VAR_FOLDED_IND to make sure that fgDoNormalizeOnStore()
// is called on its parent in post-order morph.
temp->gtFlags |= GTF_VAR_FOLDED_IND;
}
}
}
// For matching types we can fold
else if (!varTypeIsStruct(typ) && (lvaTable[lclNum].lvType == typ) &&
!lvaTable[lclNum].lvNormalizeOnLoad())
{
tree->gtType = typ = temp->TypeGet();
foldAndReturnTemp = true;
}
else
{
// Assumes that when Lookup returns "false" it will leave "fieldSeq" unmodified (i.e.
// nullptr)
assert(fieldSeq == nullptr);
bool b = GetZeroOffsetFieldMap()->Lookup(op1, &fieldSeq);
assert(b || fieldSeq == nullptr);
if ((fieldSeq != nullptr) && (temp->OperGet() == GT_LCL_FLD))
{
// Append the field sequence, change the type.
temp->AsLclFld()->SetFieldSeq(
GetFieldSeqStore()->Append(temp->AsLclFld()->GetFieldSeq(), fieldSeq));
temp->gtType = typ;
foldAndReturnTemp = true;
}
}
// Otherwise will will fold this into a GT_LCL_FLD below
// where we check (temp != nullptr)
}
else // !temp->OperIsLocal()
{
// We don't try to fold away the GT_IND/GT_ADDR for this case
temp = nullptr;
}
}
else if (op1->OperGet() == GT_ADD)
{
#ifdef TARGET_ARM
// Check for a misalignment floating point indirection.
if (varTypeIsFloating(typ))
{
GenTree* addOp2 = op1->AsOp()->gtGetOp2();
if (addOp2->IsCnsIntOrI())
{
ssize_t offset = addOp2->AsIntCon()->gtIconVal;
if ((offset % emitTypeSize(TYP_FLOAT)) != 0)
{
tree->gtFlags |= GTF_IND_UNALIGNED;
}
}
}
#endif // TARGET_ARM
/* Try to change *(&lcl + cns) into lcl[cns] to prevent materialization of &lcl */
if (op1->AsOp()->gtOp1->OperGet() == GT_ADDR && op1->AsOp()->gtOp2->OperGet() == GT_CNS_INT &&
opts.OptimizationEnabled())
{
// No overflow arithmetic with pointers
noway_assert(!op1->gtOverflow());
temp = op1->AsOp()->gtOp1->AsOp()->gtOp1;
if (!temp->OperIsLocal())
{
temp = nullptr;
break;
}
// Can not remove the GT_ADDR if it is currently a CSE candidate.
if (gtIsActiveCSE_Candidate(op1->AsOp()->gtOp1))
{
break;
}
ival1 = op1->AsOp()->gtOp2->AsIntCon()->gtIconVal;
fieldSeq = op1->AsOp()->gtOp2->AsIntCon()->gtFieldSeq;
// Does the address have an associated zero-offset field sequence?
FieldSeqNode* addrFieldSeq = nullptr;
if (GetZeroOffsetFieldMap()->Lookup(op1->AsOp()->gtOp1, &addrFieldSeq))
{
fieldSeq = GetFieldSeqStore()->Append(addrFieldSeq, fieldSeq);
}
if (ival1 == 0 && typ == temp->TypeGet() && temp->TypeGet() != TYP_STRUCT)
{
noway_assert(!varTypeIsGC(temp->TypeGet()));
foldAndReturnTemp = true;
}
else
{
// The emitter can't handle large offsets
if (ival1 != (unsigned short)ival1)
{
break;
}
// The emitter can get confused by invalid offsets
if (ival1 >= Compiler::lvaLclSize(temp->AsLclVarCommon()->GetLclNum()))
{
break;
}
}
// Now we can fold this into a GT_LCL_FLD below
// where we check (temp != nullptr)
}
}
}
// At this point we may have a lclVar or lclFld that might be foldable with a bit of extra massaging:
// - We may have a load of a local where the load has a different type than the local
// - We may have a load of a local plus an offset
//
// In these cases, we will change the lclVar or lclFld into a lclFld of the appropriate type and
// offset if doing so is legal. The only cases in which this transformation is illegal are if the load
// begins before the local or if the load extends beyond the end of the local (i.e. if the load is
// out-of-bounds w.r.t. the local).
if ((temp != nullptr) && !foldAndReturnTemp)
{
assert(temp->OperIsLocalRead());
unsigned lclNum = temp->AsLclVarCommon()->GetLclNum();
// Make sure we do not enregister this lclVar.
lvaSetVarDoNotEnregister(lclNum DEBUGARG(DoNotEnregisterReason::LocalField));
// If the size of the load is greater than the size of the lclVar, we cannot fold this access into
// a lclFld: the access represented by an lclFld node must begin at or after the start of the
// lclVar and must not extend beyond the end of the lclVar.
if ((ival1 >= 0) && ((ival1 + genTypeSize(typ)) <= lvaLclExactSize(lclNum)))
{
GenTreeLclFld* lclFld;
// We will turn a GT_LCL_VAR into a GT_LCL_FLD with an gtLclOffs of 'ival'
// or if we already have a GT_LCL_FLD we will adjust the gtLclOffs by adding 'ival'
// Then we change the type of the GT_LCL_FLD to match the orginal GT_IND type.
//
if (temp->OperGet() == GT_LCL_FLD)
{
lclFld = temp->AsLclFld();
lclFld->SetLclOffs(lclFld->GetLclOffs() + static_cast<unsigned>(ival1));
lclFld->SetFieldSeq(GetFieldSeqStore()->Append(lclFld->GetFieldSeq(), fieldSeq));
}
else // We have a GT_LCL_VAR.
{
assert(temp->OperGet() == GT_LCL_VAR);
temp->ChangeOper(GT_LCL_FLD); // Note that this makes the gtFieldSeq "NotAField".
lclFld = temp->AsLclFld();
lclFld->SetLclOffs(static_cast<unsigned>(ival1));
if (fieldSeq != nullptr)
{
// If it does represent a field, note that.
lclFld->SetFieldSeq(fieldSeq);
}
}
temp->gtType = tree->gtType;
foldAndReturnTemp = true;
}
}
if (foldAndReturnTemp)
{
assert(temp != nullptr);
assert(temp->TypeGet() == typ);
assert((op1->OperGet() == GT_ADD) || (op1->OperGet() == GT_ADDR));
// Copy the value of GTF_DONT_CSE from the original tree to `temp`: it can be set for
// 'temp' because a GT_ADDR always marks it for its operand.
temp->gtFlags &= ~GTF_DONT_CSE;
temp->gtFlags |= (tree->gtFlags & GTF_DONT_CSE);
if (op1->OperGet() == GT_ADD)
{
DEBUG_DESTROY_NODE(op1->AsOp()->gtOp1); // GT_ADDR
DEBUG_DESTROY_NODE(op1->AsOp()->gtOp2); // GT_CNS_INT
}
DEBUG_DESTROY_NODE(op1); // GT_ADD or GT_ADDR
DEBUG_DESTROY_NODE(tree); // GT_IND
// If the result of the fold is a local var, we may need to perform further adjustments e.g. for
// normalization.
if (temp->OperIs(GT_LCL_VAR))
{
#ifdef DEBUG
// We clear this flag on `temp` because `fgMorphLocalVar` may assert that this bit is clear
// and the node in question must have this bit set (as it has already been morphed).
temp->gtDebugFlags &= ~GTF_DEBUG_NODE_MORPHED;
#endif // DEBUG
const bool forceRemorph = true;
temp = fgMorphLocalVar(temp, forceRemorph);
#ifdef DEBUG
// We then set this flag on `temp` because `fgMorhpLocalVar` may not set it itself, and the
// caller of `fgMorphSmpOp` may assert that this flag is set on `temp` once this function
// returns.
temp->gtDebugFlags |= GTF_DEBUG_NODE_MORPHED;
#endif // DEBUG
}
return temp;
}
// Only do this optimization when we are in the global optimizer. Doing this after value numbering
// could result in an invalid value number for the newly generated GT_IND node.
if ((op1->OperGet() == GT_COMMA) && fgGlobalMorph)
{
// Perform the transform IND(COMMA(x, ..., z)) == COMMA(x, ..., IND(z)).
// TBD: this transformation is currently necessary for correctness -- it might
// be good to analyze the failures that result if we don't do this, and fix them
// in other ways. Ideally, this should be optional.
GenTree* commaNode = op1;
GenTreeFlags treeFlags = tree->gtFlags;
commaNode->gtType = typ;
commaNode->gtFlags = (treeFlags & ~GTF_REVERSE_OPS); // Bashing the GT_COMMA flags here is
// dangerous, clear the GTF_REVERSE_OPS at
// least.
#ifdef DEBUG
commaNode->gtDebugFlags |= GTF_DEBUG_NODE_MORPHED;
#endif
while (commaNode->AsOp()->gtOp2->gtOper == GT_COMMA)
{
commaNode = commaNode->AsOp()->gtOp2;
commaNode->gtType = typ;
commaNode->gtFlags =
(treeFlags & ~GTF_REVERSE_OPS & ~GTF_ASG & ~GTF_CALL); // Bashing the GT_COMMA flags here is
// dangerous, clear the GTF_REVERSE_OPS, GT_ASG, and GT_CALL at
// least.
commaNode->gtFlags |= ((commaNode->AsOp()->gtOp1->gtFlags | commaNode->AsOp()->gtOp2->gtFlags) &
(GTF_ASG | GTF_CALL));
#ifdef DEBUG
commaNode->gtDebugFlags |= GTF_DEBUG_NODE_MORPHED;
#endif
}
bool wasArrIndex = (tree->gtFlags & GTF_IND_ARR_INDEX) != 0;
ArrayInfo arrInfo;
if (wasArrIndex)
{
bool b = GetArrayInfoMap()->Lookup(tree, &arrInfo);
assert(b);
GetArrayInfoMap()->Remove(tree);
}
tree = op1;
GenTree* addr = commaNode->AsOp()->gtOp2;
// TODO-1stClassStructs: we often create a struct IND without a handle, fix it.
op1 = gtNewIndir(typ, addr);
// This is very conservative
op1->gtFlags |= treeFlags & ~GTF_ALL_EFFECT & ~GTF_IND_NONFAULTING;
op1->gtFlags |= (addr->gtFlags & GTF_ALL_EFFECT);
if (wasArrIndex)
{
GetArrayInfoMap()->Set(op1, arrInfo);
}
#ifdef DEBUG
op1->gtDebugFlags |= GTF_DEBUG_NODE_MORPHED;
#endif
commaNode->AsOp()->gtOp2 = op1;
commaNode->gtFlags |= (op1->gtFlags & GTF_ALL_EFFECT);
return tree;
}
break;
}
case GT_ADDR:
// Can not remove op1 if it is currently a CSE candidate.
if (gtIsActiveCSE_Candidate(op1))
{
break;
}
if (op1->OperGet() == GT_IND)
{
if ((op1->gtFlags & GTF_IND_ARR_INDEX) == 0)
{
// Can not remove a GT_ADDR if it is currently a CSE candidate.
if (gtIsActiveCSE_Candidate(tree))
{
break;
}
// Perform the transform ADDR(IND(...)) == (...).
GenTree* addr = op1->AsOp()->gtOp1;
// If tree has a zero field sequence annotation, update the annotation
// on addr node.
FieldSeqNode* zeroFieldSeq = nullptr;
if (GetZeroOffsetFieldMap()->Lookup(tree, &zeroFieldSeq))
{
fgAddFieldSeqForZeroOffset(addr, zeroFieldSeq);
}
noway_assert(varTypeIsGC(addr->gtType) || addr->gtType == TYP_I_IMPL);
DEBUG_DESTROY_NODE(op1);
DEBUG_DESTROY_NODE(tree);
return addr;
}
}
else if (op1->OperGet() == GT_OBJ)
{
// Can not remove a GT_ADDR if it is currently a CSE candidate.
if (gtIsActiveCSE_Candidate(tree))
{
break;
}
// Perform the transform ADDR(OBJ(...)) == (...).
GenTree* addr = op1->AsObj()->Addr();
noway_assert(varTypeIsGC(addr->gtType) || addr->gtType == TYP_I_IMPL);
DEBUG_DESTROY_NODE(op1);
DEBUG_DESTROY_NODE(tree);
return addr;
}
else if ((op1->gtOper == GT_COMMA) && !optValnumCSE_phase)
{
// Perform the transform ADDR(COMMA(x, ..., z)) == COMMA(x, ..., ADDR(z)).
// (Be sure to mark "z" as an l-value...)
ArrayStack<GenTree*> commas(getAllocator(CMK_ArrayStack));
for (GenTree* comma = op1; comma != nullptr && comma->gtOper == GT_COMMA; comma = comma->gtGetOp2())
{
commas.Push(comma);
}
GenTree* commaNode = commas.Top();
// The top-level addr might be annotated with a zeroOffset field.
FieldSeqNode* zeroFieldSeq = nullptr;
bool isZeroOffset = GetZeroOffsetFieldMap()->Lookup(tree, &zeroFieldSeq);
tree = op1;
commaNode->AsOp()->gtOp2->gtFlags |= GTF_DONT_CSE;
// If the node we're about to put under a GT_ADDR is an indirection, it
// doesn't need to be materialized, since we only want the addressing mode. Because
// of this, this GT_IND is not a faulting indirection and we don't have to extract it
// as a side effect.
GenTree* commaOp2 = commaNode->AsOp()->gtOp2;
if (commaOp2->OperIsBlk())
{
commaOp2->SetOper(GT_IND);
}
if (commaOp2->gtOper == GT_IND)
{
commaOp2->gtFlags |= GTF_IND_NONFAULTING;
commaOp2->gtFlags &= ~GTF_EXCEPT;
commaOp2->gtFlags |= (commaOp2->AsOp()->gtOp1->gtFlags & GTF_EXCEPT);
}
op1 = gtNewOperNode(GT_ADDR, TYP_BYREF, commaOp2);
if (isZeroOffset)
{
// Transfer the annotation to the new GT_ADDR node.
fgAddFieldSeqForZeroOffset(op1, zeroFieldSeq);
}
commaNode->AsOp()->gtOp2 = op1;
// Originally, I gave all the comma nodes type "byref". But the ADDR(IND(x)) == x transform
// might give op1 a type different from byref (like, say, native int). So now go back and give
// all the comma nodes the type of op1.
// TODO: the comma flag update below is conservative and can be improved.
// For example, if we made the ADDR(IND(x)) == x transformation, we may be able to
// get rid of some of the IND flags on the COMMA nodes (e.g., GTF_GLOB_REF).
while (!commas.Empty())
{
GenTree* comma = commas.Pop();
comma->gtType = op1->gtType;
comma->gtFlags |= op1->gtFlags;
#ifdef DEBUG
comma->gtDebugFlags |= GTF_DEBUG_NODE_MORPHED;
#endif
gtUpdateNodeSideEffects(comma);
}
return tree;
}
break;
case GT_COLON:
if (fgGlobalMorph)
{
/* Mark the nodes that are conditionally executed */
fgWalkTreePre(&tree, gtMarkColonCond);
}
/* Since we're doing this postorder we clear this if it got set by a child */
fgRemoveRestOfBlock = false;
break;
case GT_COMMA:
/* Special case: trees that don't produce a value */
if (op2->OperIs(GT_ASG) || (op2->OperGet() == GT_COMMA && op2->TypeGet() == TYP_VOID) || fgIsThrow(op2))
{
typ = tree->gtType = TYP_VOID;
}
// If we are in the Valuenum CSE phase then don't morph away anything as these
// nodes may have CSE defs/uses in them.
//
if (!optValnumCSE_phase)
{
// Extract the side effects from the left side of the comma. Since they don't "go" anywhere, this
// is all we need.
GenTree* op1SideEffects = nullptr;
// The addition of "GTF_MAKE_CSE" below prevents us from throwing away (for example)
// hoisted expressions in loops.
gtExtractSideEffList(op1, &op1SideEffects, (GTF_SIDE_EFFECT | GTF_MAKE_CSE));
if (op1SideEffects)
{
// Replace the left hand side with the side effect list.
op1 = op1SideEffects;
tree->AsOp()->gtOp1 = op1SideEffects;
gtUpdateNodeSideEffects(tree);
}
else
{
op2->gtFlags |= (tree->gtFlags & (GTF_DONT_CSE | GTF_LATE_ARG));
DEBUG_DESTROY_NODE(tree);
DEBUG_DESTROY_NODE(op1);
return op2;
}
// If the right operand is just a void nop node, throw it away. Unless this is a
// comma throw, in which case we want the top-level morphing loop to recognize it.
if (op2->IsNothingNode() && op1->TypeIs(TYP_VOID) && !fgIsCommaThrow(tree))
{
op1->gtFlags |= (tree->gtFlags & (GTF_DONT_CSE | GTF_LATE_ARG));
DEBUG_DESTROY_NODE(tree);
DEBUG_DESTROY_NODE(op2);
return op1;
}
}
break;
case GT_JTRUE:
/* Special case if fgRemoveRestOfBlock is set to true */
if (fgRemoveRestOfBlock)
{
if (fgIsCommaThrow(op1, true))
{
GenTree* throwNode = op1->AsOp()->gtOp1;
JITDUMP("Removing [%06d] GT_JTRUE as the block now unconditionally throws an exception.\n",
dspTreeID(tree));
DEBUG_DESTROY_NODE(tree);
return throwNode;
}
noway_assert(op1->OperIsCompare());
noway_assert(op1->gtFlags & GTF_EXCEPT);
// We need to keep op1 for the side-effects. Hang it off
// a GT_COMMA node
JITDUMP("Keeping side-effects by bashing [%06d] GT_JTRUE into a GT_COMMA.\n", dspTreeID(tree));
tree->ChangeOper(GT_COMMA);
tree->AsOp()->gtOp2 = op2 = gtNewNothingNode();
// Additionally since we're eliminating the JTRUE
// codegen won't like it if op1 is a RELOP of longs, floats or doubles.
// So we change it into a GT_COMMA as well.
JITDUMP("Also bashing [%06d] (a relop) into a GT_COMMA.\n", dspTreeID(op1));
op1->ChangeOper(GT_COMMA);
op1->gtFlags &= ~GTF_UNSIGNED; // Clear the unsigned flag if it was set on the relop
op1->gtType = op1->AsOp()->gtOp1->gtType;
return tree;
}
break;
case GT_INTRINSIC:
if (tree->AsIntrinsic()->gtIntrinsicName ==
NI_System_Runtime_CompilerServices_RuntimeHelpers_IsKnownConstant)
{
// Should be expanded by the time it reaches CSE phase
assert(!optValnumCSE_phase);
JITDUMP("\nExpanding RuntimeHelpers.IsKnownConstant to ");
if (op1->OperIsConst())
{
// We're lucky to catch a constant here while importer was not
JITDUMP("true\n");
DEBUG_DESTROY_NODE(tree, op1);
tree = gtNewIconNode(1);
}
else
{
GenTree* op1SideEffects = nullptr;
gtExtractSideEffList(op1, &op1SideEffects, GTF_ALL_EFFECT);
if (op1SideEffects != nullptr)
{
DEBUG_DESTROY_NODE(tree);
// Keep side-effects of op1
tree = gtNewOperNode(GT_COMMA, TYP_INT, op1SideEffects, gtNewIconNode(0));
JITDUMP("false with side effects:\n")
DISPTREE(tree);
}
else
{
JITDUMP("false\n");
DEBUG_DESTROY_NODE(tree, op1);
tree = gtNewIconNode(0);
}
}
INDEBUG(tree->gtDebugFlags |= GTF_DEBUG_NODE_MORPHED);
return tree;
}
break;
default:
break;
}
assert(oper == tree->gtOper);
// Propagate comma throws.
// If we are in the Valuenum CSE phase then don't morph away anything as these
// nodes may have CSE defs/uses in them.
if (fgGlobalMorph && (oper != GT_ASG) && (oper != GT_COLON))
{
if ((op1 != nullptr) && fgIsCommaThrow(op1, true))
{
GenTree* propagatedThrow = fgPropagateCommaThrow(tree, op1->AsOp(), GTF_EMPTY);
if (propagatedThrow != nullptr)
{
return propagatedThrow;
}
}
if ((op2 != nullptr) && fgIsCommaThrow(op2, true))
{
GenTree* propagatedThrow = fgPropagateCommaThrow(tree, op2->AsOp(), op1->gtFlags & GTF_ALL_EFFECT);
if (propagatedThrow != nullptr)
{
return propagatedThrow;
}
}
}
/*-------------------------------------------------------------------------
* Optional morphing is done if tree transformations is permitted
*/
if ((opts.compFlags & CLFLG_TREETRANS) == 0)
{
return tree;
}
tree = fgMorphSmpOpOptional(tree->AsOp());
return tree;
}
//------------------------------------------------------------------------
// fgOptimizeCast: Optimizes the supplied GT_CAST tree.
//
// Tries to get rid of the cast, its operand, the GTF_OVERFLOW flag, calls
// calls "optNarrowTree". Called in post-order by "fgMorphSmpOp".
//
// Arguments:
// tree - the cast tree to optimize
//
// Return Value:
// The optimized tree (that can have any shape).
//
GenTree* Compiler::fgOptimizeCast(GenTreeCast* cast)
{
GenTree* src = cast->CastOp();
if (gtIsActiveCSE_Candidate(cast) || gtIsActiveCSE_Candidate(src))
{
return cast;
}
// See if we can discard the cast.
if (varTypeIsIntegral(cast) && varTypeIsIntegral(src))
{
IntegralRange srcRange = IntegralRange::ForNode(src, this);
IntegralRange noOvfRange = IntegralRange::ForCastInput(cast);
if (noOvfRange.Contains(srcRange))
{
// Casting between same-sized types is a no-op,
// given we have proven this cast cannot overflow.
if (genActualType(cast) == genActualType(src))
{
return src;
}
cast->ClearOverflow();
cast->SetAllEffectsFlags(src);
// Try and see if we can make this cast into a cheaper zero-extending version.
if (genActualTypeIsInt(src) && cast->TypeIs(TYP_LONG) && srcRange.IsPositive())
{
cast->SetUnsigned();
}
}
// For checked casts, we're done.
if (cast->gtOverflow())
{
return cast;
}
var_types castToType = cast->CastToType();
// For indir-like nodes, we may be able to change their type to satisfy (and discard) the cast.
if (varTypeIsSmall(castToType) && (genTypeSize(castToType) == genTypeSize(src)) &&
src->OperIs(GT_IND, GT_CLS_VAR, GT_LCL_FLD))
{
// We're changing the type here so we need to update the VN;
// in other cases we discard the cast without modifying src
// so the VN doesn't change.
src->ChangeType(castToType);
src->SetVNsFromNode(cast);
return src;
}
// Try to narrow the operand of the cast and discard the cast.
if (opts.OptEnabled(CLFLG_TREETRANS) && (genTypeSize(src) > genTypeSize(castToType)) &&
optNarrowTree(src, src->TypeGet(), castToType, cast->gtVNPair, false))
{
optNarrowTree(src, src->TypeGet(), castToType, cast->gtVNPair, true);
// "optNarrowTree" may leave a dead cast behind.
if (src->OperIs(GT_CAST) && (src->AsCast()->CastToType() == genActualType(src->AsCast()->CastOp())))
{
src = src->AsCast()->CastOp();
}
return src;
}
// Check for two consecutive casts, we may be able to discard the intermediate one.
if (opts.OptimizationEnabled() && src->OperIs(GT_CAST) && !src->gtOverflow())
{
var_types dstCastToType = castToType;
var_types srcCastToType = src->AsCast()->CastToType();
// CAST(ubyte <- CAST(short <- X)): CAST(ubyte <- X).
// CAST(ushort <- CAST(short <- X)): CAST(ushort <- X).
if (varTypeIsSmall(srcCastToType) && (genTypeSize(dstCastToType) <= genTypeSize(srcCastToType)))
{
cast->CastOp() = src->AsCast()->CastOp();
DEBUG_DESTROY_NODE(src);
}
}
}
return cast;
}
//------------------------------------------------------------------------
// fgOptimizeEqualityComparisonWithConst: optimizes various EQ/NE(OP, CONST) patterns.
//
// Arguments:
// cmp - The GT_NE/GT_EQ tree the second operand of which is an integral constant
//
// Return Value:
// The optimized tree, "cmp" in case no optimizations were done.
// Currently only returns relop trees.
//
GenTree* Compiler::fgOptimizeEqualityComparisonWithConst(GenTreeOp* cmp)
{
assert(cmp->OperIs(GT_EQ, GT_NE));
assert(cmp->gtGetOp2()->IsIntegralConst());
assert(!optValnumCSE_phase);
GenTree* op1 = cmp->gtGetOp1();
GenTreeIntConCommon* op2 = cmp->gtGetOp2()->AsIntConCommon();
// Check for "(expr +/- icon1) ==/!= (non-zero-icon2)".
if (op2->IsCnsIntOrI() && (op2->IconValue() != 0))
{
// Since this can occur repeatedly we use a while loop.
while (op1->OperIs(GT_ADD, GT_SUB) && op1->AsOp()->gtGetOp2()->IsCnsIntOrI() && op1->TypeIs(TYP_INT) &&
!op1->gtOverflow())
{
// Got it; change "x + icon1 == icon2" to "x == icon2 - icon1".
ssize_t op1Value = op1->AsOp()->gtGetOp2()->AsIntCon()->IconValue();
ssize_t op2Value = op2->IconValue();
if (op1->OperIs(GT_ADD))
{
op2Value -= op1Value;
}
else
{
op2Value += op1Value;
}
op1 = op1->AsOp()->gtGetOp1();
op2->SetIconValue(static_cast<int32_t>(op2Value));
}
cmp->gtOp1 = op1;
fgUpdateConstTreeValueNumber(op2);
}
// Here we look for the following tree
//
// EQ/NE
// / \.
// op1 CNS 0/1
//
if (op2->IsIntegralConst(0) || op2->IsIntegralConst(1))
{
ssize_t op2Value = static_cast<ssize_t>(op2->IntegralValue());
if (op1->OperIsCompare())
{
// Here we look for the following tree
//
// EQ/NE -> RELOP/!RELOP
// / \ / \.
// RELOP CNS 0/1
// / \.
//
// Note that we will remove/destroy the EQ/NE node and move
// the RELOP up into it's location.
// Here we reverse the RELOP if necessary.
bool reverse = ((op2Value == 0) == (cmp->OperIs(GT_EQ)));
if (reverse)
{
gtReverseCond(op1);
}
noway_assert((op1->gtFlags & GTF_RELOP_JMP_USED) == 0);
op1->gtFlags |= cmp->gtFlags & (GTF_RELOP_JMP_USED | GTF_DONT_CSE);
op1->SetVNsFromNode(cmp);
DEBUG_DESTROY_NODE(cmp);
return op1;
}
//
// Now we check for a compare with the result of an '&' operator
//
// Here we look for the following transformation:
//
// EQ/NE EQ/NE
// / \ / \.
// AND CNS 0/1 -> AND CNS 0
// / \ / \.
// RSZ/RSH CNS 1 x CNS (1 << y)
// / \.
// x CNS_INT +y
if (fgGlobalMorph && op1->OperIs(GT_AND) && op1->AsOp()->gtGetOp1()->OperIs(GT_RSZ, GT_RSH))
{
GenTreeOp* andOp = op1->AsOp();
GenTreeOp* rshiftOp = andOp->gtGetOp1()->AsOp();
if (!rshiftOp->gtGetOp2()->IsCnsIntOrI())
{
goto SKIP;
}
ssize_t shiftAmount = rshiftOp->gtGetOp2()->AsIntCon()->IconValue();
if (shiftAmount < 0)
{
goto SKIP;
}
if (!andOp->gtGetOp2()->IsIntegralConst(1))
{
goto SKIP;
}
GenTreeIntConCommon* andMask = andOp->gtGetOp2()->AsIntConCommon();
if (andOp->TypeIs(TYP_INT))
{
if (shiftAmount > 31)
{
goto SKIP;
}
andMask->SetIconValue(static_cast<int32_t>(1 << shiftAmount));
// Reverse the condition if necessary.
if (op2Value == 1)
{
gtReverseCond(cmp);
op2->SetIconValue(0);
}
}
else if (andOp->TypeIs(TYP_LONG))
{
if (shiftAmount > 63)
{
goto SKIP;
}
andMask->SetLngValue(1ll << shiftAmount);
// Reverse the cond if necessary
if (op2Value == 1)
{
gtReverseCond(cmp);
op2->SetLngValue(0);
}
}
andOp->gtOp1 = rshiftOp->gtGetOp1();
DEBUG_DESTROY_NODE(rshiftOp->gtGetOp2());
DEBUG_DESTROY_NODE(rshiftOp);
}
}
SKIP:
// Now check for compares with small constant longs that can be cast to int.
// Note that we filter out negative values here so that the transformations
// below are correct. E. g. "EQ(-1L, CAST_UN(int))" is always "false", but were
// we to make it into "EQ(-1, int)", "true" becomes possible for negative inputs.
if (!op2->TypeIs(TYP_LONG) || ((op2->LngValue() >> 31) != 0))
{
return cmp;
}
if (!op1->OperIs(GT_AND))
{
// Another interesting case: cast from int.
if (op1->OperIs(GT_CAST) && op1->AsCast()->CastOp()->TypeIs(TYP_INT) && !op1->gtOverflow())
{
// Simply make this into an integer comparison.
cmp->gtOp1 = op1->AsCast()->CastOp();
op2->BashToConst(static_cast<int32_t>(op2->LngValue()));
fgUpdateConstTreeValueNumber(op2);
}
return cmp;
}
// Now we perform the following optimization:
// EQ/NE(AND(OP long, CNS_LNG), CNS_LNG) =>
// EQ/NE(AND(CAST(int <- OP), CNS_INT), CNS_INT)
// when the constants are sufficiently small.
// This transform cannot preserve VNs.
if (fgGlobalMorph)
{
assert(op1->TypeIs(TYP_LONG) && op1->OperIs(GT_AND));
// Is the result of the mask effectively an INT?
GenTreeOp* andOp = op1->AsOp();
if (!andOp->gtGetOp2()->OperIs(GT_CNS_NATIVELONG))
{
return cmp;
}
GenTreeIntConCommon* andMask = andOp->gtGetOp2()->AsIntConCommon();
if ((andMask->LngValue() >> 32) != 0)
{
return cmp;
}
// Now we narrow the first operand of AND to int.
if (optNarrowTree(andOp->gtGetOp1(), TYP_LONG, TYP_INT, ValueNumPair(), false))
{
optNarrowTree(andOp->gtGetOp1(), TYP_LONG, TYP_INT, ValueNumPair(), true);
}
else
{
andOp->gtOp1 = gtNewCastNode(TYP_INT, andOp->gtGetOp1(), false, TYP_INT);
}
assert(andMask == andOp->gtGetOp2());
// Now replace the mask node.
andMask->BashToConst(static_cast<int32_t>(andMask->LngValue()));
// Now change the type of the AND node.
andOp->ChangeType(TYP_INT);
// Finally we replace the comparand.
op2->BashToConst(static_cast<int32_t>(op2->LngValue()));
}
return cmp;
}
//------------------------------------------------------------------------
// fgOptimizeRelationalComparisonWithConst: optimizes a comparison operation.
//
// Recognizes comparisons against various constant operands and morphs
// them, if possible, into comparisons against zero.
//
// Arguments:
// cmp - the GT_LE/GT_LT/GT_GE/GT_GT tree to morph.
//
// Return Value:
// The "cmp" tree, possibly with a modified oper.
// The second operand's constant value may be modified as well.
//
// Assumptions:
// The operands have been swapped so that any constants are on the right.
// The second operand is an integral constant.
//
GenTree* Compiler::fgOptimizeRelationalComparisonWithConst(GenTreeOp* cmp)
{
assert(cmp->OperIs(GT_LE, GT_LT, GT_GE, GT_GT));
assert(cmp->gtGetOp2()->IsIntegralConst());
assert(!gtIsActiveCSE_Candidate(cmp->gtGetOp2()));
GenTree* op1 = cmp->gtGetOp1();
GenTreeIntConCommon* op2 = cmp->gtGetOp2()->AsIntConCommon();
assert(genActualType(op1) == genActualType(op2));
genTreeOps oper = cmp->OperGet();
int64_t op2Value = op2->IntegralValue();
if (op2Value == 1)
{
// Check for "expr >= 1".
if (oper == GT_GE)
{
// Change to "expr != 0" for unsigned and "expr > 0" for signed.
oper = cmp->IsUnsigned() ? GT_NE : GT_GT;
}
// Check for "expr < 1".
else if (oper == GT_LT)
{
// Change to "expr == 0" for unsigned and "expr <= 0".
oper = cmp->IsUnsigned() ? GT_EQ : GT_LE;
}
}
// Check for "expr relop -1".
else if (!cmp->IsUnsigned() && (op2Value == -1))
{
// Check for "expr <= -1".
if (oper == GT_LE)
{
// Change to "expr < 0".
oper = GT_LT;
}
// Check for "expr > -1".
else if (oper == GT_GT)
{
// Change to "expr >= 0".
oper = GT_GE;
}
}
else if (cmp->IsUnsigned())
{
if ((oper == GT_LE) || (oper == GT_GT))
{
if (op2Value == 0)
{
// IL doesn't have a cne instruction so compilers use cgt.un instead. The JIT
// recognizes certain patterns that involve GT_NE (e.g (x & 4) != 0) and fails
// if GT_GT is used instead. Transform (x GT_GT.unsigned 0) into (x GT_NE 0)
// and (x GT_LE.unsigned 0) into (x GT_EQ 0). The later case is rare, it sometimes
// occurs as a result of branch inversion.
oper = (oper == GT_LE) ? GT_EQ : GT_NE;
cmp->gtFlags &= ~GTF_UNSIGNED;
}
// LE_UN/GT_UN(expr, int/long.MaxValue) => GE/LT(expr, 0).
else if (((op1->TypeIs(TYP_LONG) && (op2Value == INT64_MAX))) ||
((genActualType(op1) == TYP_INT) && (op2Value == INT32_MAX)))
{
oper = (oper == GT_LE) ? GT_GE : GT_LT;
cmp->gtFlags &= ~GTF_UNSIGNED;
}
}
}
if (!cmp->OperIs(oper))
{
// Keep the old ValueNumber for 'tree' as the new expr
// will still compute the same value as before.
cmp->SetOper(oper, GenTree::PRESERVE_VN);
op2->SetIntegralValue(0);
fgUpdateConstTreeValueNumber(op2);
}
return cmp;
}
#ifdef FEATURE_HW_INTRINSICS
//------------------------------------------------------------------------
// fgOptimizeHWIntrinsic: optimize a HW intrinsic node
//
// Arguments:
// node - HWIntrinsic node to examine
//
// Returns:
// The original node if no optimization happened or if tree bashing occured.
// An alternative tree if an optimization happened.
//
// Notes:
// Checks for HWIntrinsic nodes: Vector64.Create/Vector128.Create/Vector256.Create,
// and if the call is one of these, attempt to optimize.
// This is post-order, meaning that it will not morph the children.
//
GenTree* Compiler::fgOptimizeHWIntrinsic(GenTreeHWIntrinsic* node)
{
assert(!optValnumCSE_phase);
if (opts.OptimizationDisabled())
{
return node;
}
switch (node->GetHWIntrinsicId())
{
case NI_Vector128_Create:
#if defined(TARGET_XARCH)
case NI_Vector256_Create:
#elif defined(TARGET_ARM64)
case NI_Vector64_Create:
#endif
{
bool hwAllArgsAreConstZero = true;
for (GenTree* arg : node->Operands())
{
if (!arg->IsIntegralConst(0) && !arg->IsFloatPositiveZero())
{
hwAllArgsAreConstZero = false;
break;
}
}
if (hwAllArgsAreConstZero)
{
switch (node->GetHWIntrinsicId())
{
case NI_Vector128_Create:
{
node->ResetHWIntrinsicId(NI_Vector128_get_Zero);
break;
}
#if defined(TARGET_XARCH)
case NI_Vector256_Create:
{
node->ResetHWIntrinsicId(NI_Vector256_get_Zero);
break;
}
#elif defined(TARGET_ARM64)
case NI_Vector64_Create:
{
node->ResetHWIntrinsicId(NI_Vector64_get_Zero);
break;
}
#endif
default:
unreached();
}
}
break;
}
default:
break;
}
return node;
}
#endif
//------------------------------------------------------------------------
// fgOptimizeCommutativeArithmetic: Optimizes commutative operations.
//
// Arguments:
// tree - the unchecked GT_ADD/GT_MUL/GT_OR/GT_XOR/GT_AND tree to optimize.
//
// Return Value:
// The optimized tree that can have any shape.
//
GenTree* Compiler::fgOptimizeCommutativeArithmetic(GenTreeOp* tree)
{
assert(tree->OperIs(GT_ADD, GT_MUL, GT_OR, GT_XOR, GT_AND));
assert(!tree->gtOverflowEx());
// Commute constants to the right.
if (tree->gtGetOp1()->OperIsConst() && !tree->gtGetOp1()->TypeIs(TYP_REF))
{
// TODO-Review: We used to assert here that "(!op2->OperIsConst() || !opts.OptEnabled(CLFLG_CONSTANTFOLD))".
// This may indicate a missed "remorph". Task is to re-enable this assertion and investigate.
std::swap(tree->gtOp1, tree->gtOp2);
}
if (fgOperIsBitwiseRotationRoot(tree->OperGet()))
{
GenTree* rotationTree = fgRecognizeAndMorphBitwiseRotation(tree);
if (rotationTree != nullptr)
{
return rotationTree;
}
}
if (fgGlobalMorph && tree->OperIs(GT_AND, GT_OR, GT_XOR))
{
GenTree* castTree = fgMorphCastedBitwiseOp(tree->AsOp());
if (castTree != nullptr)
{
return castTree;
}
}
if (varTypeIsIntegralOrI(tree))
{
genTreeOps oldTreeOper = tree->OperGet();
GenTreeOp* optimizedTree = fgMorphCommutative(tree->AsOp());
if (optimizedTree != nullptr)
{
if (!optimizedTree->OperIs(oldTreeOper))
{
// "optimizedTree" could end up being a COMMA.
return optimizedTree;
}
tree = optimizedTree;
}
}
if (!optValnumCSE_phase)
{
GenTree* optimizedTree = nullptr;
if (tree->OperIs(GT_ADD))
{
optimizedTree = fgOptimizeAddition(tree);
}
else if (tree->OperIs(GT_MUL))
{
optimizedTree = fgOptimizeMultiply(tree);
}
else if (tree->OperIs(GT_AND))
{
optimizedTree = fgOptimizeBitwiseAnd(tree);
}
else if (tree->OperIs(GT_XOR))
{
optimizedTree = fgOptimizeBitwiseXor(tree);
}
if (optimizedTree != nullptr)
{
return optimizedTree;
}
}
return tree;
}
//------------------------------------------------------------------------
// fgOptimizeAddition: optimizes addition.
//
// Arguments:
// add - the unchecked GT_ADD tree to optimize.
//
// Return Value:
// The optimized tree, that can have any shape, in case any transformations
// were performed. Otherwise, "nullptr", guaranteeing no state change.
//
GenTree* Compiler::fgOptimizeAddition(GenTreeOp* add)
{
assert(add->OperIs(GT_ADD) && !add->gtOverflow());
assert(!optValnumCSE_phase);
GenTree* op1 = add->gtGetOp1();
GenTree* op2 = add->gtGetOp2();
// Fold "((x + icon1) + (y + icon2))" to ((x + y) + (icon1 + icon2))".
// Be careful not to create a byref pointer that may point outside of the ref object.
// Only do this in global morph as we don't recompute the VN for "(x + y)", the new "op2".
if (op1->OperIs(GT_ADD) && op2->OperIs(GT_ADD) && !op1->gtOverflow() && !op2->gtOverflow() &&
op1->AsOp()->gtGetOp2()->IsCnsIntOrI() && op2->AsOp()->gtGetOp2()->IsCnsIntOrI() &&
!varTypeIsGC(op1->AsOp()->gtGetOp1()) && !varTypeIsGC(op2->AsOp()->gtGetOp1()) && fgGlobalMorph)
{
GenTreeOp* addOne = op1->AsOp();
GenTreeOp* addTwo = op2->AsOp();
GenTreeIntCon* constOne = addOne->gtGetOp2()->AsIntCon();
GenTreeIntCon* constTwo = addTwo->gtGetOp2()->AsIntCon();
addOne->gtOp2 = addTwo->gtGetOp1();
addOne->SetAllEffectsFlags(addOne->gtGetOp1(), addOne->gtGetOp2());
DEBUG_DESTROY_NODE(addTwo);
constOne->SetValueTruncating(constOne->IconValue() + constTwo->IconValue());
op2 = constOne;
add->gtOp2 = constOne;
DEBUG_DESTROY_NODE(constTwo);
}
// Fold (x + 0) - given it won't change the tree type to TYP_REF.
// TODO-Bug: this code will lose the GC-ness of a tree like "native int + byref(0)".
if (op2->IsIntegralConst(0) && ((add->TypeGet() == op1->TypeGet()) || !op1->TypeIs(TYP_REF)))
{
if (op2->IsCnsIntOrI() && varTypeIsI(op1))
{
fgAddFieldSeqForZeroOffset(op1, op2->AsIntCon()->gtFieldSeq);
}
DEBUG_DESTROY_NODE(op2);
DEBUG_DESTROY_NODE(add);
return op1;
}
// Reduce local addresses: ADD(ADDR(LCL_VAR), OFFSET) => ADDR(LCL_FLD OFFSET).
// TODO-ADDR: do ADD(LCL_FLD/VAR_ADDR, OFFSET) => LCL_FLD_ADDR instead.
//
if (opts.OptimizationEnabled() && fgGlobalMorph && op1->OperIs(GT_ADDR) && op2->IsCnsIntOrI() &&
op1->AsUnOp()->gtGetOp1()->OperIs(GT_LCL_VAR, GT_LCL_FLD))
{
GenTreeUnOp* addrNode = op1->AsUnOp();
GenTreeLclVarCommon* lclNode = addrNode->gtGetOp1()->AsLclVarCommon();
GenTreeIntCon* offsetNode = op2->AsIntCon();
if (FitsIn<uint16_t>(offsetNode->IconValue()))
{
unsigned offset = lclNode->GetLclOffs() + static_cast<uint16_t>(offsetNode->IconValue());
// Note: the emitter does not expect out-of-bounds access for LCL_FLD_ADDR.
if (FitsIn<uint16_t>(offset) && (offset < lvaLclExactSize(lclNode->GetLclNum())))
{
// Compose the field sequence: [LCL, ADDR, OFFSET].
FieldSeqNode* fieldSeq = lclNode->GetFieldSeq();
FieldSeqNode* zeroOffsetFieldSeq = nullptr;
if (GetZeroOffsetFieldMap()->Lookup(addrNode, &zeroOffsetFieldSeq))
{
fieldSeq = GetFieldSeqStore()->Append(fieldSeq, zeroOffsetFieldSeq);
GetZeroOffsetFieldMap()->Remove(addrNode);
}
fieldSeq = GetFieldSeqStore()->Append(fieldSeq, offsetNode->gtFieldSeq);
// Types of location nodes under ADDRs do not matter. We arbitrarily choose TYP_UBYTE.
lclNode->ChangeType(TYP_UBYTE);
lclNode->SetOper(GT_LCL_FLD);
lclNode->AsLclFld()->SetLclOffs(offset);
lclNode->AsLclFld()->SetFieldSeq(fieldSeq);
lvaSetVarDoNotEnregister(lclNode->GetLclNum() DEBUGARG(DoNotEnregisterReason::LocalField));
DEBUG_DESTROY_NODE(offsetNode);
DEBUG_DESTROY_NODE(add);
return addrNode;
}
}
}
// Note that these transformations are legal for floating-point ADDs as well.
if (opts.OptimizationEnabled())
{
// - a + b = > b - a
// ADD((NEG(a), b) => SUB(b, a)
// Do not do this if "op2" is constant for canonicalization purposes.
if (op1->OperIs(GT_NEG) && !op2->OperIs(GT_NEG) && !op2->IsIntegralConst() && gtCanSwapOrder(op1, op2))
{
add->SetOper(GT_SUB);
add->gtOp1 = op2;
add->gtOp2 = op1->AsOp()->gtGetOp1();
DEBUG_DESTROY_NODE(op1);
return add;
}
// a + -b = > a - b
// ADD(a, (NEG(b)) => SUB(a, b)
if (!op1->OperIs(GT_NEG) && op2->OperIs(GT_NEG))
{
add->SetOper(GT_SUB);
add->gtOp2 = op2->AsOp()->gtGetOp1();
DEBUG_DESTROY_NODE(op2);
return add;
}
}
return nullptr;
}
//------------------------------------------------------------------------
// fgOptimizeMultiply: optimizes multiplication.
//
// Arguments:
// mul - the unchecked TYP_I_IMPL/TYP_INT GT_MUL tree to optimize.
//
// Return Value:
// The optimized tree, that can have any shape, in case any transformations
// were performed. Otherwise, "nullptr", guaranteeing no state change.
//
GenTree* Compiler::fgOptimizeMultiply(GenTreeOp* mul)
{
assert(mul->OperIs(GT_MUL));
assert(varTypeIsIntOrI(mul) || varTypeIsFloating(mul));
assert(!mul->gtOverflow());
assert(!optValnumCSE_phase);
GenTree* op1 = mul->gtGetOp1();
GenTree* op2 = mul->gtGetOp2();
assert(mul->TypeGet() == genActualType(op1));
assert(mul->TypeGet() == genActualType(op2));
if (opts.OptimizationEnabled() && op2->IsCnsFltOrDbl())
{
double multiplierValue = op2->AsDblCon()->gtDconVal;
if (multiplierValue == 1.0)
{
// Fold "x * 1.0" to "x".
DEBUG_DESTROY_NODE(op2);
DEBUG_DESTROY_NODE(mul);
return op1;
}
// Fold "x * 2.0" to "x + x".
// If op1 is not a local we will have to introduce a temporary via GT_COMMA.
// Unfortunately, it's not optHoistLoopCode-friendly (yet), so we'll only do
// this for locals / after hoisting has run (when rationalization remorphs
// math INTRINSICSs into calls...).
if ((multiplierValue == 2.0) && (op1->IsLocal() || (fgOrder == FGOrderLinear)))
{
op2 = fgMakeMultiUse(&op1);
GenTree* add = gtNewOperNode(GT_ADD, mul->TypeGet(), op1, op2);
INDEBUG(add->gtDebugFlags |= GTF_DEBUG_NODE_MORPHED);
return add;
}
}
if (op2->IsIntegralConst())
{
ssize_t mult = op2->AsIntConCommon()->IconValue();
bool op2IsConstIndex = op2->OperGet() == GT_CNS_INT && op2->AsIntCon()->gtFieldSeq != nullptr &&
op2->AsIntCon()->gtFieldSeq->IsConstantIndexFieldSeq();
assert(!op2IsConstIndex || op2->AsIntCon()->gtFieldSeq->GetNext() == nullptr);
if (mult == 0)
{
// We may be able to throw away op1 (unless it has side-effects)
if ((op1->gtFlags & GTF_SIDE_EFFECT) == 0)
{
DEBUG_DESTROY_NODE(op1);
DEBUG_DESTROY_NODE(mul);
return op2; // Just return the "0" node
}
// We need to keep op1 for the side-effects. Hang it off a GT_COMMA node.
mul->ChangeOper(GT_COMMA, GenTree::PRESERVE_VN);
return mul;
}
#ifdef TARGET_XARCH
// Should we try to replace integer multiplication with lea/add/shift sequences?
bool mulShiftOpt = compCodeOpt() != SMALL_CODE;
#else // !TARGET_XARCH
bool mulShiftOpt = false;
#endif // !TARGET_XARCH
size_t abs_mult = (mult >= 0) ? mult : -mult;
size_t lowestBit = genFindLowestBit(abs_mult);
bool changeToShift = false;
// is it a power of two? (positive or negative)
if (abs_mult == lowestBit)
{
// if negative negate (min-int does not need negation)
if (mult < 0 && mult != SSIZE_T_MIN)
{
op1 = gtNewOperNode(GT_NEG, genActualType(op1), op1);
mul->gtOp1 = op1;
fgMorphTreeDone(op1);
}
// If "op2" is a constant array index, the other multiplicand must be a constant.
// Transfer the annotation to the other one.
if (op2->OperGet() == GT_CNS_INT && op2->AsIntCon()->gtFieldSeq != nullptr &&
op2->AsIntCon()->gtFieldSeq->IsConstantIndexFieldSeq())
{
assert(op2->AsIntCon()->gtFieldSeq->GetNext() == nullptr);
GenTree* otherOp = op1;
if (otherOp->OperGet() == GT_NEG)
{
otherOp = otherOp->AsOp()->gtOp1;
}
assert(otherOp->OperGet() == GT_CNS_INT);
assert(otherOp->AsIntCon()->gtFieldSeq == FieldSeqStore::NotAField());
otherOp->AsIntCon()->gtFieldSeq = op2->AsIntCon()->gtFieldSeq;
}
if (abs_mult == 1)
{
DEBUG_DESTROY_NODE(op2);
DEBUG_DESTROY_NODE(mul);
return op1;
}
// Change the multiplication into a shift by log2(val) bits.
op2->AsIntConCommon()->SetIconValue(genLog2(abs_mult));
changeToShift = true;
}
else if (mulShiftOpt && (lowestBit > 1) && jitIsScaleIndexMul(lowestBit))
{
int shift = genLog2(lowestBit);
ssize_t factor = abs_mult >> shift;
if (factor == 3 || factor == 5 || factor == 9)
{
// if negative negate (min-int does not need negation)
if (mult < 0 && mult != SSIZE_T_MIN)
{
op1 = gtNewOperNode(GT_NEG, genActualType(op1), op1);
mul->gtOp1 = op1;
fgMorphTreeDone(op1);
}
GenTree* factorIcon = gtNewIconNode(factor, mul->TypeGet());
if (op2IsConstIndex)
{
factorIcon->AsIntCon()->gtFieldSeq =
GetFieldSeqStore()->CreateSingleton(FieldSeqStore::ConstantIndexPseudoField);
}
// change the multiplication into a smaller multiplication (by 3, 5 or 9) and a shift
op1 = gtNewOperNode(GT_MUL, mul->TypeGet(), op1, factorIcon);
mul->gtOp1 = op1;
fgMorphTreeDone(op1);
op2->AsIntConCommon()->SetIconValue(shift);
changeToShift = true;
}
}
if (changeToShift)
{
fgUpdateConstTreeValueNumber(op2);
mul->ChangeOper(GT_LSH, GenTree::PRESERVE_VN);
return mul;
}
}
return nullptr;
}
//------------------------------------------------------------------------
// fgOptimizeBitwiseAnd: optimizes the "and" operation.
//
// Arguments:
// andOp - the GT_AND tree to optimize.
//
// Return Value:
// The optimized tree, currently always a relop, in case any transformations
// were performed. Otherwise, "nullptr", guaranteeing no state change.
//
GenTree* Compiler::fgOptimizeBitwiseAnd(GenTreeOp* andOp)
{
assert(andOp->OperIs(GT_AND));
assert(!optValnumCSE_phase);
GenTree* op1 = andOp->gtGetOp1();
GenTree* op2 = andOp->gtGetOp2();
// Fold "cmp & 1" to just "cmp".
if (andOp->TypeIs(TYP_INT) && op1->OperIsCompare() && op2->IsIntegralConst(1))
{
DEBUG_DESTROY_NODE(op2);
DEBUG_DESTROY_NODE(andOp);
return op1;
}
return nullptr;
}
//------------------------------------------------------------------------
// fgOptimizeRelationalComparisonWithCasts: Recognizes comparisons against
// various cast operands and tries to remove them. E.g.:
//
// * GE int
// +--* CAST long <- ulong <- uint
// | \--* X int
// \--* CNS_INT long
//
// to:
//
// * GE_un int
// +--* X int
// \--* CNS_INT int
//
// same for:
//
// * GE int
// +--* CAST long <- ulong <- uint
// | \--* X int
// \--* CAST long <- [u]long <- int
// \--* ARR_LEN int
//
// These patterns quite often show up along with index checks
//
// Arguments:
// cmp - the GT_LE/GT_LT/GT_GE/GT_GT tree to morph.
//
// Return Value:
// Returns the same tree where operands might have narrower types
//
// Notes:
// TODO-Casts: consider unifying this function with "optNarrowTree"
//
GenTree* Compiler::fgOptimizeRelationalComparisonWithCasts(GenTreeOp* cmp)
{
assert(cmp->OperIs(GT_LE, GT_LT, GT_GE, GT_GT));
assert(!optValnumCSE_phase);
GenTree* op1 = cmp->gtGetOp1();
GenTree* op2 = cmp->gtGetOp2();
// Caller is expected to call this function only if we have CAST nodes
assert(op1->OperIs(GT_CAST) || op2->OperIs(GT_CAST));
if (!op1->TypeIs(TYP_LONG))
{
// We can extend this logic to handle small types as well, but currently it's done mostly to
// assist range check elimination
return cmp;
}
GenTree* castOp;
GenTree* knownPositiveOp;
bool knownPositiveIsOp2;
if (op2->IsIntegralConst() || ((op2->OperIs(GT_CAST) && op2->AsCast()->CastOp()->OperIs(GT_ARR_LENGTH))))
{
// op2 is either a LONG constant or (T)ARR_LENGTH
knownPositiveIsOp2 = true;
castOp = cmp->gtGetOp1();
knownPositiveOp = cmp->gtGetOp2();
}
else
{
// op1 is either a LONG constant (yes, it's pretty normal for relops)
// or (T)ARR_LENGTH
castOp = cmp->gtGetOp2();
knownPositiveOp = cmp->gtGetOp1();
knownPositiveIsOp2 = false;
}
if (castOp->OperIs(GT_CAST) && varTypeIsLong(castOp->CastToType()) && castOp->AsCast()->CastOp()->TypeIs(TYP_INT) &&
castOp->IsUnsigned() && !castOp->gtOverflow())
{
bool knownPositiveFitsIntoU32 = false;
if (knownPositiveOp->IsIntegralConst() && FitsIn<UINT32>(knownPositiveOp->AsIntConCommon()->IntegralValue()))
{
// BTW, we can fold the whole condition if op2 doesn't fit into UINT_MAX.
knownPositiveFitsIntoU32 = true;
}
else if (knownPositiveOp->OperIs(GT_CAST) && varTypeIsLong(knownPositiveOp->CastToType()) &&
knownPositiveOp->AsCast()->CastOp()->OperIs(GT_ARR_LENGTH))
{
knownPositiveFitsIntoU32 = true;
// TODO-Casts: recognize Span.Length here as well.
}
if (!knownPositiveFitsIntoU32)
{
return cmp;
}
JITDUMP("Removing redundant cast(s) for:\n")
DISPTREE(cmp)
JITDUMP("\n\nto:\n\n")
cmp->SetUnsigned();
// Drop cast from castOp
if (knownPositiveIsOp2)
{
cmp->gtOp1 = castOp->AsCast()->CastOp();
}
else
{
cmp->gtOp2 = castOp->AsCast()->CastOp();
}
DEBUG_DESTROY_NODE(castOp);
if (knownPositiveOp->OperIs(GT_CAST))
{
// Drop cast from knownPositiveOp too
if (knownPositiveIsOp2)
{
cmp->gtOp2 = knownPositiveOp->AsCast()->CastOp();
}
else
{
cmp->gtOp1 = knownPositiveOp->AsCast()->CastOp();
}
DEBUG_DESTROY_NODE(knownPositiveOp);
}
else
{
// Change type for constant from LONG to INT
knownPositiveOp->ChangeType(TYP_INT);
#ifndef TARGET_64BIT
assert(knownPositiveOp->OperIs(GT_CNS_LNG));
knownPositiveOp->BashToConst(static_cast<int>(knownPositiveOp->AsIntConCommon()->IntegralValue()));
#endif
fgUpdateConstTreeValueNumber(knownPositiveOp);
}
DISPTREE(cmp)
JITDUMP("\n")
}
return cmp;
}
// fgOptimizeBitwiseXor: optimizes the "xor" operation.
//
// Arguments:
// xorOp - the GT_XOR tree to optimize.
//
// Return Value:
// The optimized tree, currently always a local variable, in case any transformations
// were performed. Otherwise, "nullptr", guaranteeing no state change.
//
GenTree* Compiler::fgOptimizeBitwiseXor(GenTreeOp* xorOp)
{
assert(xorOp->OperIs(GT_XOR));
assert(!optValnumCSE_phase);
GenTree* op1 = xorOp->gtGetOp1();
GenTree* op2 = xorOp->gtGetOp2();
if (op2->IsIntegralConst(0))
{
/* "x ^ 0" is "x" */
DEBUG_DESTROY_NODE(xorOp, op2);
return op1;
}
else if (op2->IsIntegralConst(-1))
{
/* "x ^ -1" is "~x" */
xorOp->ChangeOper(GT_NOT);
xorOp->gtOp2 = nullptr;
DEBUG_DESTROY_NODE(op2);
return xorOp;
}
else if (op2->IsIntegralConst(1) && op1->OperIsCompare())
{
/* "binaryVal ^ 1" is "!binaryVal" */
gtReverseCond(op1);
DEBUG_DESTROY_NODE(op2);
DEBUG_DESTROY_NODE(xorOp);
return op1;
}
return nullptr;
}
//------------------------------------------------------------------------
// fgPropagateCommaThrow: propagate a "comma throw" up the tree.
//
// "Comma throws" in the compiler represent the canonical form of an always
// throwing expression. They have the shape of COMMA(THROW, ZERO), to satisfy
// the semantic that the original expression produced some value and are
// generated by "gtFoldExprConst" when it encounters checked arithmetic that
// will determinably overflow.
//
// In the global morphing phase, "comma throws" are "propagated" up the tree,
// in post-order, to eliminate nodes that will never execute. This method,
// called by "fgMorphSmpOp", encapsulates this optimization.
//
// Arguments:
// parent - the node currently being processed.
// commaThrow - the comma throw in question, "parent"'s operand.
// precedingSideEffects - side effects of nodes preceding "comma" in execution order.
//
// Return Value:
// If "parent" is to be replaced with a comma throw, i. e. the propagation was successful,
// the new "parent", otherwise "nullptr", guaranteeing no state change, with one exception:
// the "fgRemoveRestOfBlock" "global" may be set. Note that the new returned tree does not
// have to be a "comma throw", it can be "bare" throw call if the "parent" node did not
// produce any value.
//
// Notes:
// "Comma throws" are very rare.
//
GenTree* Compiler::fgPropagateCommaThrow(GenTree* parent, GenTreeOp* commaThrow, GenTreeFlags precedingSideEffects)
{
// Comma throw propagation does not preserve VNs, and deletes nodes.
assert(fgGlobalMorph);
assert(fgIsCommaThrow(commaThrow));
if ((commaThrow->gtFlags & GTF_COLON_COND) == 0)
{
fgRemoveRestOfBlock = true;
}
if ((precedingSideEffects & GTF_ALL_EFFECT) == 0)
{
if (parent->TypeIs(TYP_VOID))
{
// Return the throw node as the new tree.
return commaThrow->gtGetOp1();
}
// Fix up the COMMA's type if needed.
if (genActualType(parent) != genActualType(commaThrow))
{
commaThrow->gtGetOp2()->BashToZeroConst(genActualType(parent));
commaThrow->ChangeType(genActualType(parent));
}
return commaThrow;
}
return nullptr;
}
//----------------------------------------------------------------------------------------------
// fgMorphRetInd: Try to get rid of extra IND(ADDR()) pairs in a return tree.
//
// Arguments:
// node - The return node that uses an indirection.
//
// Return Value:
// the original op1 of the ret if there was no optimization or an optimized new op1.
//
GenTree* Compiler::fgMorphRetInd(GenTreeUnOp* ret)
{
assert(ret->OperIs(GT_RETURN));
assert(ret->gtGetOp1()->OperIs(GT_IND, GT_BLK, GT_OBJ));
GenTreeIndir* ind = ret->gtGetOp1()->AsIndir();
GenTree* addr = ind->Addr();
if (addr->OperIs(GT_ADDR) && addr->gtGetOp1()->OperIs(GT_LCL_VAR))
{
// If struct promotion was undone, adjust the annotations
if (fgGlobalMorph && fgMorphImplicitByRefArgs(addr))
{
return ind;
}
// If `return` retypes LCL_VAR as a smaller struct it should not set `doNotEnregister` on that
// LclVar.
// Example: in `Vector128:AsVector2` we have RETURN SIMD8(OBJ SIMD8(ADDR byref(LCL_VAR SIMD16))).
GenTreeLclVar* lclVar = addr->gtGetOp1()->AsLclVar();
if (!lvaIsImplicitByRefLocal(lclVar->GetLclNum()))
{
assert(!gtIsActiveCSE_Candidate(addr) && !gtIsActiveCSE_Candidate(ind));
unsigned indSize;
if (ind->OperIs(GT_IND))
{
indSize = genTypeSize(ind);
}
else
{
indSize = ind->AsBlk()->GetLayout()->GetSize();
}
LclVarDsc* varDsc = lvaGetDesc(lclVar);
unsigned lclVarSize;
if (!lclVar->TypeIs(TYP_STRUCT))
{
lclVarSize = genTypeSize(varDsc->TypeGet());
}
else
{
lclVarSize = varDsc->lvExactSize;
}
// TODO: change conditions in `canFold` to `indSize <= lclVarSize`, but currently do not support `BITCAST
// int<-SIMD16` etc.
assert((indSize <= lclVarSize) || varDsc->lvDoNotEnregister);
#if defined(TARGET_64BIT)
bool canFold = (indSize == lclVarSize);
#else // !TARGET_64BIT
// TODO: improve 32 bit targets handling for LONG returns if necessary, nowadays we do not support `BITCAST
// long<->double` there.
bool canFold = (indSize == lclVarSize) && (lclVarSize <= REGSIZE_BYTES);
#endif
// TODO: support `genReturnBB != nullptr`, it requires #11413 to avoid `Incompatible types for
// gtNewTempAssign`.
if (canFold && (genReturnBB == nullptr))
{
// Fold (TYPE1)*(&(TYPE2)x) even if types do not match, lowering will handle it.
// Getting rid of this IND(ADDR()) pair allows to keep lclVar as not address taken
// and enregister it.
DEBUG_DESTROY_NODE(ind);
DEBUG_DESTROY_NODE(addr);
ret->gtOp1 = lclVar;
// We use GTF_DONT_CSE as an "is under GT_ADDR" check. We can
// get rid of it now since the GT_RETURN node should never have
// its address taken.
assert((ret->gtFlags & GTF_DONT_CSE) == 0);
lclVar->gtFlags &= ~GTF_DONT_CSE;
return lclVar;
}
else if (!varDsc->lvDoNotEnregister)
{
lvaSetVarDoNotEnregister(lclVar->GetLclNum() DEBUGARG(DoNotEnregisterReason::BlockOpRet));
}
}
}
return ind;
}
#ifdef _PREFAST_
#pragma warning(pop)
#endif
GenTree* Compiler::fgMorphSmpOpOptional(GenTreeOp* tree)
{
genTreeOps oper = tree->gtOper;
GenTree* op1 = tree->gtOp1;
GenTree* op2 = tree->gtOp2;
var_types typ = tree->TypeGet();
if (fgGlobalMorph && GenTree::OperIsCommutative(oper))
{
/* Swap the operands so that the more expensive one is 'op1' */
if (tree->gtFlags & GTF_REVERSE_OPS)
{
tree->gtOp1 = op2;
tree->gtOp2 = op1;
op2 = op1;
op1 = tree->gtOp1;
tree->gtFlags &= ~GTF_REVERSE_OPS;
}
if (oper == op2->gtOper)
{
/* Reorder nested operators at the same precedence level to be
left-recursive. For example, change "(a+(b+c))" to the
equivalent expression "((a+b)+c)".
*/
/* Things are handled differently for floating-point operators */
if (!varTypeIsFloating(tree->TypeGet()))
{
fgMoveOpsLeft(tree);
op1 = tree->gtOp1;
op2 = tree->gtOp2;
}
}
}
#if REARRANGE_ADDS
/* Change "((x+icon)+y)" to "((x+y)+icon)"
Don't reorder floating-point operations */
if (fgGlobalMorph && (oper == GT_ADD) && !tree->gtOverflow() && (op1->gtOper == GT_ADD) && !op1->gtOverflow() &&
varTypeIsIntegralOrI(typ))
{
GenTree* ad1 = op1->AsOp()->gtOp1;
GenTree* ad2 = op1->AsOp()->gtOp2;
if (!op2->OperIsConst() && ad2->OperIsConst())
{
// This takes
// + (tree)
// / \.
// / \.
// / \.
// + (op1) op2
// / \.
// / \.
// ad1 ad2
//
// and it swaps ad2 and op2.
// Don't create a byref pointer that may point outside of the ref object.
// If a GC happens, the byref won't get updated. This can happen if one
// of the int components is negative. It also requires the address generation
// be in a fully-interruptible code region.
if (!varTypeIsGC(ad1->TypeGet()) && !varTypeIsGC(op2->TypeGet()))
{
tree->gtOp2 = ad2;
op1->AsOp()->gtOp2 = op2;
op1->gtFlags |= op2->gtFlags & GTF_ALL_EFFECT;
op2 = tree->gtOp2;
}
}
}
#endif
/*-------------------------------------------------------------------------
* Perform optional oper-specific postorder morphing
*/
switch (oper)
{
case GT_ASG:
// Make sure we're allowed to do this.
if (optValnumCSE_phase)
{
// It is not safe to reorder/delete CSE's
break;
}
if (varTypeIsStruct(typ) && !tree->IsPhiDefn())
{
if (tree->OperIsCopyBlkOp())
{
return fgMorphCopyBlock(tree);
}
else
{
return fgMorphInitBlock(tree);
}
}
if (typ == TYP_LONG)
{
break;
}
if (op2->gtFlags & GTF_ASG)
{
break;
}
if ((op2->gtFlags & GTF_CALL) && (op1->gtFlags & GTF_ALL_EFFECT))
{
break;
}
/* Special case: a cast that can be thrown away */
// TODO-Cleanup: fgMorphSmp does a similar optimization. However, it removes only
// one cast and sometimes there is another one after it that gets removed by this
// code. fgMorphSmp should be improved to remove all redundant casts so this code
// can be removed.
if (op1->gtOper == GT_IND && op2->gtOper == GT_CAST && !op2->gtOverflow())
{
var_types srct;
var_types cast;
var_types dstt;
srct = op2->AsCast()->CastOp()->TypeGet();
cast = (var_types)op2->CastToType();
dstt = op1->TypeGet();
/* Make sure these are all ints and precision is not lost */
if (genTypeSize(cast) >= genTypeSize(dstt) && dstt <= TYP_INT && srct <= TYP_INT)
{
op2 = tree->gtOp2 = op2->AsCast()->CastOp();
}
}
break;
case GT_MUL:
/* Check for the case "(val + icon) * icon" */
if (op2->gtOper == GT_CNS_INT && op1->gtOper == GT_ADD)
{
GenTree* add = op1->AsOp()->gtOp2;
if (add->IsCnsIntOrI() && (op2->GetScaleIndexMul() != 0))
{
if (tree->gtOverflow() || op1->gtOverflow())
{
break;
}
ssize_t imul = op2->AsIntCon()->gtIconVal;
ssize_t iadd = add->AsIntCon()->gtIconVal;
/* Change '(val + iadd) * imul' -> '(val * imul) + (iadd * imul)' */
oper = GT_ADD;
tree->ChangeOper(oper);
op2->AsIntCon()->SetValueTruncating(iadd * imul);
op1->ChangeOper(GT_MUL);
add->AsIntCon()->SetIconValue(imul);
}
}
break;
case GT_DIV:
/* For "val / 1", just return "val" */
if (op2->IsIntegralConst(1))
{
DEBUG_DESTROY_NODE(tree);
return op1;
}
break;
case GT_UDIV:
case GT_UMOD:
tree->CheckDivideByConstOptimized(this);
break;
case GT_LSH:
/* Check for the case "(val + icon) << icon" */
if (!optValnumCSE_phase && op2->IsCnsIntOrI() && op1->gtOper == GT_ADD && !op1->gtOverflow())
{
GenTree* cns = op1->AsOp()->gtOp2;
if (cns->IsCnsIntOrI() && (op2->GetScaleIndexShf() != 0))
{
ssize_t ishf = op2->AsIntConCommon()->IconValue();
ssize_t iadd = cns->AsIntConCommon()->IconValue();
// printf("Changing '(val+icon1)<<icon2' into '(val<<icon2+icon1<<icon2)'\n");
/* Change "(val + iadd) << ishf" into "(val<<ishf + iadd<<ishf)" */
tree->ChangeOper(GT_ADD);
// we are reusing the shift amount node here, but the type we want is that of the shift result
op2->gtType = op1->gtType;
op2->AsIntConCommon()->SetValueTruncating(iadd << ishf);
if (cns->gtOper == GT_CNS_INT && cns->AsIntCon()->gtFieldSeq != nullptr &&
cns->AsIntCon()->gtFieldSeq->IsConstantIndexFieldSeq())
{
assert(cns->AsIntCon()->gtFieldSeq->GetNext() == nullptr);
op2->AsIntCon()->gtFieldSeq = cns->AsIntCon()->gtFieldSeq;
}
op1->ChangeOper(GT_LSH);
cns->AsIntConCommon()->SetIconValue(ishf);
}
}
break;
case GT_INIT_VAL:
// Initialization values for initBlk have special semantics - their lower
// byte is used to fill the struct. However, we allow 0 as a "bare" value,
// which enables them to get a VNForZero, and be propagated.
if (op1->IsIntegralConst(0))
{
return op1;
}
break;
default:
break;
}
return tree;
}
#if defined(FEATURE_SIMD) || defined(FEATURE_HW_INTRINSICS)
//------------------------------------------------------------------------
// fgMorphMultiOp: Morph a GenTreeMultiOp (SIMD/HWINTRINSIC) tree.
//
// Arguments:
// multiOp - The tree to morph
//
// Return Value:
// The fully morphed tree.
//
GenTree* Compiler::fgMorphMultiOp(GenTreeMultiOp* multiOp)
{
gtUpdateNodeOperSideEffects(multiOp);
bool dontCseConstArguments = false;
#if defined(FEATURE_HW_INTRINSICS)
// Opportunistically, avoid unexpected CSE for hw intrinsics with IMM arguments
if (multiOp->OperIs(GT_HWINTRINSIC))
{
NamedIntrinsic hwIntrinsic = multiOp->AsHWIntrinsic()->GetHWIntrinsicId();
#if defined(TARGET_XARCH)
if (HWIntrinsicInfo::lookupCategory(hwIntrinsic) == HW_Category_IMM)
{
dontCseConstArguments = true;
}
#elif defined(TARGET_ARMARCH)
if (HWIntrinsicInfo::HasImmediateOperand(hwIntrinsic))
{
dontCseConstArguments = true;
}
#endif
}
#endif
for (GenTree** use : multiOp->UseEdges())
{
*use = fgMorphTree(*use);
GenTree* operand = *use;
multiOp->gtFlags |= (operand->gtFlags & GTF_ALL_EFFECT);
if (dontCseConstArguments && operand->OperIsConst())
{
operand->SetDoNotCSE();
}
// Promoted structs after morph must be in one of two states:
// a) Fully eliminated from the IR (independent promotion) OR only be
// used by "special" nodes (e. g. LHS of ASGs for multi-reg structs).
// b) Marked as do-not-enregister (dependent promotion).
//
// So here we preserve this invariant and mark any promoted structs as do-not-enreg.
//
if (operand->OperIs(GT_LCL_VAR) && lvaGetDesc(operand->AsLclVar())->lvPromoted)
{
lvaSetVarDoNotEnregister(operand->AsLclVar()->GetLclNum()
DEBUGARG(DoNotEnregisterReason::SimdUserForcesDep));
}
}
#if defined(FEATURE_HW_INTRINSICS)
if (opts.OptimizationEnabled() && multiOp->OperIs(GT_HWINTRINSIC))
{
GenTreeHWIntrinsic* hw = multiOp->AsHWIntrinsic();
switch (hw->GetHWIntrinsicId())
{
#if defined(TARGET_XARCH)
case NI_SSE_Xor:
case NI_SSE2_Xor:
case NI_AVX_Xor:
case NI_AVX2_Xor:
{
// Transform XOR(X, 0) to X for vectors
GenTree* op1 = hw->Op(1);
GenTree* op2 = hw->Op(2);
if (!gtIsActiveCSE_Candidate(hw))
{
if (op1->IsIntegralConstVector(0) && !gtIsActiveCSE_Candidate(op1))
{
DEBUG_DESTROY_NODE(hw);
DEBUG_DESTROY_NODE(op1);
return op2;
}
if (op2->IsIntegralConstVector(0) && !gtIsActiveCSE_Candidate(op2))
{
DEBUG_DESTROY_NODE(hw);
DEBUG_DESTROY_NODE(op2);
return op1;
}
}
break;
}
#endif
case NI_Vector128_Create:
#if defined(TARGET_XARCH)
case NI_Vector256_Create:
#elif defined(TARGET_ARMARCH)
case NI_Vector64_Create:
#endif
{
bool hwAllArgsAreConst = true;
for (GenTree** use : multiOp->UseEdges())
{
if (!(*use)->OperIsConst())
{
hwAllArgsAreConst = false;
break;
}
}
// Avoid unexpected CSE for constant arguments for Vector_.Create
// but only if all arguments are constants.
if (hwAllArgsAreConst)
{
for (GenTree** use : multiOp->UseEdges())
{
(*use)->SetDoNotCSE();
}
}
}
break;
default:
break;
}
}
#endif // defined(FEATURE_HW_INTRINSICS) && defined(TARGET_XARCH)
#ifdef FEATURE_HW_INTRINSICS
if (multiOp->OperIsHWIntrinsic() && !optValnumCSE_phase)
{
return fgOptimizeHWIntrinsic(multiOp->AsHWIntrinsic());
}
#endif
return multiOp;
}
#endif // defined(FEATURE_SIMD) || defined(FEATURE_HW_INTRINSICS)
//------------------------------------------------------------------------
// fgMorphModToSubMulDiv: Transform a % b into the equivalent a - (a / b) * b
// (see ECMA III 3.55 and III.3.56).
//
// Arguments:
// tree - The GT_MOD/GT_UMOD tree to morph
//
// Returns:
// The morphed tree
//
// Notes:
// For ARM64 we don't have a remainder instruction so this transform is
// always done. For XARCH this transform is done if we know that magic
// division will be used, in that case this transform allows CSE to
// eliminate the redundant div from code like "x = a / 3; y = a % 3;".
//
GenTree* Compiler::fgMorphModToSubMulDiv(GenTreeOp* tree)
{
JITDUMP("\nMorphing MOD/UMOD [%06u] to Sub/Mul/Div\n", dspTreeID(tree));
if (tree->OperGet() == GT_MOD)
{
tree->SetOper(GT_DIV);
}
else if (tree->OperGet() == GT_UMOD)
{
tree->SetOper(GT_UDIV);
}
else
{
noway_assert(!"Illegal gtOper in fgMorphModToSubMulDiv");
}
var_types type = tree->gtType;
GenTree* const copyOfNumeratorValue = fgMakeMultiUse(&tree->gtOp1);
GenTree* const copyOfDenominatorValue = fgMakeMultiUse(&tree->gtOp2);
GenTree* const mul = gtNewOperNode(GT_MUL, type, tree, copyOfDenominatorValue);
GenTree* const sub = gtNewOperNode(GT_SUB, type, copyOfNumeratorValue, mul);
// Ensure "sub" does not evaluate "copyOfNumeratorValue" before it is defined by "mul".
//
sub->gtFlags |= GTF_REVERSE_OPS;
#ifdef DEBUG
sub->gtDebugFlags |= GTF_DEBUG_NODE_MORPHED;
#endif
tree->CheckDivideByConstOptimized(this);
return sub;
}
//------------------------------------------------------------------------
// fgMorphUModToAndSub: Transform a % b into the equivalent a & (b - 1).
// '%' must be unsigned (GT_UMOD).
// 'a' and 'b' must be integers.
// 'b' must be a constant and a power of two.
//
// Arguments:
// tree - The GT_UMOD tree to morph
//
// Returns:
// The morphed tree
//
// Notes:
// This is more optimized than calling fgMorphModToSubMulDiv.
//
GenTree* Compiler::fgMorphUModToAndSub(GenTreeOp* tree)
{
JITDUMP("\nMorphing UMOD [%06u] to And/Sub\n", dspTreeID(tree));
assert(tree->OperIs(GT_UMOD));
assert(tree->gtOp2->IsIntegralConstUnsignedPow2());
const var_types type = tree->TypeGet();
const size_t cnsValue = (static_cast<size_t>(tree->gtOp2->AsIntConCommon()->IntegralValue())) - 1;
GenTree* const newTree = gtNewOperNode(GT_AND, type, tree->gtOp1, gtNewIconNode(cnsValue, type));
INDEBUG(newTree->gtDebugFlags |= GTF_DEBUG_NODE_MORPHED);
DEBUG_DESTROY_NODE(tree->gtOp2);
DEBUG_DESTROY_NODE(tree);
return newTree;
}
//------------------------------------------------------------------------------
// fgOperIsBitwiseRotationRoot : Check if the operation can be a root of a bitwise rotation tree.
//
//
// Arguments:
// oper - Operation to check
//
// Return Value:
// True if the operation can be a root of a bitwise rotation tree; false otherwise.
bool Compiler::fgOperIsBitwiseRotationRoot(genTreeOps oper)
{
return (oper == GT_OR) || (oper == GT_XOR);
}
//------------------------------------------------------------------------------
// fgRecognizeAndMorphBitwiseRotation : Check if the tree represents a left or right rotation. If so, return
// an equivalent GT_ROL or GT_ROR tree; otherwise, return the original tree.
//
// Arguments:
// tree - tree to check for a rotation pattern
//
// Return Value:
// An equivalent GT_ROL or GT_ROR tree if a pattern is found; "nullptr" otherwise.
//
// Assumption:
// The input is a GT_OR or a GT_XOR tree.
GenTree* Compiler::fgRecognizeAndMorphBitwiseRotation(GenTree* tree)
{
//
// Check for a rotation pattern, e.g.,
//
// OR ROL
// / \ / \.
// LSH RSZ -> x y
// / \ / \.
// x AND x AND
// / \ / \.
// y 31 ADD 31
// / \.
// NEG 32
// |
// y
// The patterns recognized:
// (x << (y & M)) op (x >>> ((-y + N) & M))
// (x >>> ((-y + N) & M)) op (x << (y & M))
//
// (x << y) op (x >>> (-y + N))
// (x >> > (-y + N)) op (x << y)
//
// (x >>> (y & M)) op (x << ((-y + N) & M))
// (x << ((-y + N) & M)) op (x >>> (y & M))
//
// (x >>> y) op (x << (-y + N))
// (x << (-y + N)) op (x >>> y)
//
// (x << c1) op (x >>> c2)
// (x >>> c1) op (x << c2)
//
// where
// c1 and c2 are const
// c1 + c2 == bitsize(x)
// N == bitsize(x)
// M is const
// M & (N - 1) == N - 1
// op is either | or ^
if (((tree->gtFlags & GTF_PERSISTENT_SIDE_EFFECTS) != 0) || ((tree->gtFlags & GTF_ORDER_SIDEEFF) != 0))
{
// We can't do anything if the tree has assignments, calls, or volatile
// reads. Note that we allow GTF_EXCEPT side effect since any exceptions
// thrown by the original tree will be thrown by the transformed tree as well.
return nullptr;
}
genTreeOps oper = tree->OperGet();
assert(fgOperIsBitwiseRotationRoot(oper));
// Check if we have an LSH on one side of the OR and an RSZ on the other side.
GenTree* op1 = tree->gtGetOp1();
GenTree* op2 = tree->gtGetOp2();
GenTree* leftShiftTree = nullptr;
GenTree* rightShiftTree = nullptr;
if ((op1->OperGet() == GT_LSH) && (op2->OperGet() == GT_RSZ))
{
leftShiftTree = op1;
rightShiftTree = op2;
}
else if ((op1->OperGet() == GT_RSZ) && (op2->OperGet() == GT_LSH))
{
leftShiftTree = op2;
rightShiftTree = op1;
}
else
{
return nullptr;
}
// Check if the trees representing the value to shift are identical.
// We already checked that there are no side effects above.
if (GenTree::Compare(leftShiftTree->gtGetOp1(), rightShiftTree->gtGetOp1()))
{
GenTree* rotatedValue = leftShiftTree->gtGetOp1();
var_types rotatedValueActualType = genActualType(rotatedValue->gtType);
ssize_t rotatedValueBitSize = genTypeSize(rotatedValueActualType) * 8;
noway_assert((rotatedValueBitSize == 32) || (rotatedValueBitSize == 64));
GenTree* leftShiftIndex = leftShiftTree->gtGetOp2();
GenTree* rightShiftIndex = rightShiftTree->gtGetOp2();
// The shift index may be masked. At least (rotatedValueBitSize - 1) lower bits
// shouldn't be masked for the transformation to be valid. If additional
// higher bits are not masked, the transformation is still valid since the result
// of MSIL shift instructions is unspecified if the shift amount is greater or equal
// than the width of the value being shifted.
ssize_t minimalMask = rotatedValueBitSize - 1;
ssize_t leftShiftMask = -1;
ssize_t rightShiftMask = -1;
if ((leftShiftIndex->OperGet() == GT_AND))
{
if (leftShiftIndex->gtGetOp2()->IsCnsIntOrI())
{
leftShiftMask = leftShiftIndex->gtGetOp2()->AsIntCon()->gtIconVal;
leftShiftIndex = leftShiftIndex->gtGetOp1();
}
else
{
return nullptr;
}
}
if ((rightShiftIndex->OperGet() == GT_AND))
{
if (rightShiftIndex->gtGetOp2()->IsCnsIntOrI())
{
rightShiftMask = rightShiftIndex->gtGetOp2()->AsIntCon()->gtIconVal;
rightShiftIndex = rightShiftIndex->gtGetOp1();
}
else
{
return nullptr;
}
}
if (((minimalMask & leftShiftMask) != minimalMask) || ((minimalMask & rightShiftMask) != minimalMask))
{
// The shift index is overmasked, e.g., we have
// something like (x << y & 15) or
// (x >> (32 - y) & 15 with 32 bit x.
// The transformation is not valid.
return nullptr;
}
GenTree* shiftIndexWithAdd = nullptr;
GenTree* shiftIndexWithoutAdd = nullptr;
genTreeOps rotateOp = GT_NONE;
GenTree* rotateIndex = nullptr;
if (leftShiftIndex->OperGet() == GT_ADD)
{
shiftIndexWithAdd = leftShiftIndex;
shiftIndexWithoutAdd = rightShiftIndex;
rotateOp = GT_ROR;
}
else if (rightShiftIndex->OperGet() == GT_ADD)
{
shiftIndexWithAdd = rightShiftIndex;
shiftIndexWithoutAdd = leftShiftIndex;
rotateOp = GT_ROL;
}
if (shiftIndexWithAdd != nullptr)
{
if (shiftIndexWithAdd->gtGetOp2()->IsCnsIntOrI())
{
if (shiftIndexWithAdd->gtGetOp2()->AsIntCon()->gtIconVal == rotatedValueBitSize)
{
if (shiftIndexWithAdd->gtGetOp1()->OperGet() == GT_NEG)
{
if (GenTree::Compare(shiftIndexWithAdd->gtGetOp1()->gtGetOp1(), shiftIndexWithoutAdd))
{
// We found one of these patterns:
// (x << (y & M)) | (x >>> ((-y + N) & M))
// (x << y) | (x >>> (-y + N))
// (x >>> (y & M)) | (x << ((-y + N) & M))
// (x >>> y) | (x << (-y + N))
// where N == bitsize(x), M is const, and
// M & (N - 1) == N - 1
CLANG_FORMAT_COMMENT_ANCHOR;
#ifndef TARGET_64BIT
if (!shiftIndexWithoutAdd->IsCnsIntOrI() && (rotatedValueBitSize == 64))
{
// TODO-X86-CQ: we need to handle variable-sized long shifts specially on x86.
// GT_LSH, GT_RSH, and GT_RSZ have helpers for this case. We may need
// to add helpers for GT_ROL and GT_ROR.
return nullptr;
}
#endif
rotateIndex = shiftIndexWithoutAdd;
}
}
}
}
}
else if ((leftShiftIndex->IsCnsIntOrI() && rightShiftIndex->IsCnsIntOrI()))
{
if (leftShiftIndex->AsIntCon()->gtIconVal + rightShiftIndex->AsIntCon()->gtIconVal == rotatedValueBitSize)
{
// We found this pattern:
// (x << c1) | (x >>> c2)
// where c1 and c2 are const and c1 + c2 == bitsize(x)
rotateOp = GT_ROL;
rotateIndex = leftShiftIndex;
}
}
if (rotateIndex != nullptr)
{
noway_assert(GenTree::OperIsRotate(rotateOp));
GenTreeFlags inputTreeEffects = tree->gtFlags & GTF_ALL_EFFECT;
// We can use the same tree only during global morph; reusing the tree in a later morph
// may invalidate value numbers.
if (fgGlobalMorph)
{
tree->AsOp()->gtOp1 = rotatedValue;
tree->AsOp()->gtOp2 = rotateIndex;
tree->ChangeOper(rotateOp);
unsigned childFlags = 0;
for (GenTree* op : tree->Operands())
{
childFlags |= (op->gtFlags & GTF_ALL_EFFECT);
}
// The parent's flags should be a superset of its operands' flags
noway_assert((inputTreeEffects & childFlags) == childFlags);
}
else
{
tree = gtNewOperNode(rotateOp, rotatedValueActualType, rotatedValue, rotateIndex);
noway_assert(inputTreeEffects == (tree->gtFlags & GTF_ALL_EFFECT));
}
return tree;
}
}
return nullptr;
}
#if !defined(TARGET_64BIT)
//------------------------------------------------------------------------------
// fgRecognizeAndMorphLongMul : Check for and morph long multiplication with 32 bit operands.
//
// Uses "GenTree::IsValidLongMul" to check for the long multiplication pattern. Will swap
// operands if the first one is a constant and the second one is not, even for trees which
// end up not being eligibile for long multiplication.
//
// Arguments:
// mul - GT_MUL tree to check for a long multiplication opportunity
//
// Return Value:
// The original tree, with operands possibly swapped, if it is not eligible for long multiplication.
// Tree with GTF_MUL_64RSLT set, side effect flags propagated, and children morphed if it is.
//
GenTreeOp* Compiler::fgRecognizeAndMorphLongMul(GenTreeOp* mul)
{
assert(mul->OperIs(GT_MUL));
assert(mul->TypeIs(TYP_LONG));
GenTree* op1 = mul->gtGetOp1();
GenTree* op2 = mul->gtGetOp2();
// "IsValidLongMul" and decomposition do not handle constant op1.
if (op1->IsIntegralConst())
{
std::swap(op1, op2);
mul->gtOp1 = op1;
mul->gtOp2 = op2;
}
if (!mul->IsValidLongMul())
{
return mul;
}
// MUL_LONG needs to do the work the casts would have done.
mul->ClearUnsigned();
if (op1->IsUnsigned())
{
mul->SetUnsigned();
}
// "IsValidLongMul" returned "true", so this GT_MUL cannot overflow.
mul->ClearOverflow();
mul->Set64RsltMul();
return fgMorphLongMul(mul);
}
//------------------------------------------------------------------------------
// fgMorphLongMul : Morphs GT_MUL nodes marked with GTF_MUL_64RSLT.
//
// Morphs *only* the operands of casts that compose the long mul to
// avoid them being folded aways.
//
// Arguments:
// mul - GT_MUL tree to morph operands of
//
// Return Value:
// The original tree, with operands morphed and flags propagated.
//
GenTreeOp* Compiler::fgMorphLongMul(GenTreeOp* mul)
{
INDEBUG(mul->DebugCheckLongMul());
GenTree* op1 = mul->gtGetOp1();
GenTree* op2 = mul->gtGetOp2();
// Morph the operands. We cannot allow the casts to go away, so we morph their operands directly.
op1->AsCast()->CastOp() = fgMorphTree(op1->AsCast()->CastOp());
op1->SetAllEffectsFlags(op1->AsCast()->CastOp());
if (op2->OperIs(GT_CAST))
{
op2->AsCast()->CastOp() = fgMorphTree(op2->AsCast()->CastOp());
op2->SetAllEffectsFlags(op2->AsCast()->CastOp());
}
mul->SetAllEffectsFlags(op1, op2);
op1->SetDoNotCSE();
op2->SetDoNotCSE();
return mul;
}
#endif // !defined(TARGET_64BIT)
/*****************************************************************************
*
* Transform the given tree for code generation and return an equivalent tree.
*/
GenTree* Compiler::fgMorphTree(GenTree* tree, MorphAddrContext* mac)
{
assert(tree);
#ifdef DEBUG
if (verbose)
{
if ((unsigned)JitConfig.JitBreakMorphTree() == tree->gtTreeID)
{
noway_assert(!"JitBreakMorphTree hit");
}
}
#endif
#ifdef DEBUG
int thisMorphNum = 0;
if (verbose && treesBeforeAfterMorph)
{
thisMorphNum = morphNum++;
printf("\nfgMorphTree (before %d):\n", thisMorphNum);
gtDispTree(tree);
}
#endif
if (fgGlobalMorph)
{
// Apply any rewrites for implicit byref arguments before morphing the
// tree.
if (fgMorphImplicitByRefArgs(tree))
{
#ifdef DEBUG
if (verbose && treesBeforeAfterMorph)
{
printf("\nfgMorphTree (%d), after implicit-byref rewrite:\n", thisMorphNum);
gtDispTree(tree);
}
#endif
}
}
/*-------------------------------------------------------------------------
* fgMorphTree() can potentially replace a tree with another, and the
* caller has to store the return value correctly.
* Turn this on to always make copy of "tree" here to shake out
* hidden/unupdated references.
*/
#ifdef DEBUG
if (compStressCompile(STRESS_GENERIC_CHECK, 0))
{
GenTree* copy;
if (GenTree::s_gtNodeSizes[tree->gtOper] == TREE_NODE_SZ_SMALL)
{
copy = gtNewLargeOperNode(GT_ADD, TYP_INT);
}
else
{
copy = new (this, GT_CALL) GenTreeCall(TYP_INT);
}
copy->ReplaceWith(tree, this);
#if defined(LATE_DISASM)
// GT_CNS_INT is considered small, so ReplaceWith() won't copy all fields
if ((tree->gtOper == GT_CNS_INT) && tree->IsIconHandle())
{
copy->AsIntCon()->gtCompileTimeHandle = tree->AsIntCon()->gtCompileTimeHandle;
}
#endif
DEBUG_DESTROY_NODE(tree);
tree = copy;
}
#endif // DEBUG
if (fgGlobalMorph)
{
/* Ensure that we haven't morphed this node already */
assert(((tree->gtDebugFlags & GTF_DEBUG_NODE_MORPHED) == 0) && "ERROR: Already morphed this node!");
/* Before morphing the tree, we try to propagate any active assertions */
if (optLocalAssertionProp)
{
/* Do we have any active assertions? */
if (optAssertionCount > 0)
{
GenTree* newTree = tree;
while (newTree != nullptr)
{
tree = newTree;
/* newTree is non-Null if we propagated an assertion */
newTree = optAssertionProp(apFull, tree, nullptr, nullptr);
}
assert(tree != nullptr);
}
}
PREFAST_ASSUME(tree != nullptr);
}
/* Save the original un-morphed tree for fgMorphTreeDone */
GenTree* oldTree = tree;
/* Figure out what kind of a node we have */
unsigned kind = tree->OperKind();
/* Is this a constant node? */
if (tree->OperIsConst())
{
tree = fgMorphConst(tree);
goto DONE;
}
/* Is this a leaf node? */
if (kind & GTK_LEAF)
{
tree = fgMorphLeaf(tree);
goto DONE;
}
/* Is it a 'simple' unary/binary operator? */
if (kind & GTK_SMPOP)
{
tree = fgMorphSmpOp(tree, mac);
goto DONE;
}
/* See what kind of a special operator we have here */
switch (tree->OperGet())
{
case GT_CALL:
if (tree->OperMayThrow(this))
{
tree->gtFlags |= GTF_EXCEPT;
}
else
{
tree->gtFlags &= ~GTF_EXCEPT;
}
tree = fgMorphCall(tree->AsCall());
break;
#if defined(FEATURE_SIMD) || defined(FEATURE_HW_INTRINSICS)
#if defined(FEATURE_SIMD)
case GT_SIMD:
#endif
#if defined(FEATURE_HW_INTRINSICS)
case GT_HWINTRINSIC:
#endif
tree = fgMorphMultiOp(tree->AsMultiOp());
break;
#endif // defined(FEATURE_SIMD) || defined(FEATURE_HW_INTRINSICS)
case GT_ARR_ELEM:
tree->AsArrElem()->gtArrObj = fgMorphTree(tree->AsArrElem()->gtArrObj);
unsigned dim;
for (dim = 0; dim < tree->AsArrElem()->gtArrRank; dim++)
{
tree->AsArrElem()->gtArrInds[dim] = fgMorphTree(tree->AsArrElem()->gtArrInds[dim]);
}
tree->gtFlags &= ~GTF_CALL;
tree->gtFlags |= tree->AsArrElem()->gtArrObj->gtFlags & GTF_ALL_EFFECT;
for (dim = 0; dim < tree->AsArrElem()->gtArrRank; dim++)
{
tree->gtFlags |= tree->AsArrElem()->gtArrInds[dim]->gtFlags & GTF_ALL_EFFECT;
}
if (fgGlobalMorph)
{
fgSetRngChkTarget(tree, false);
}
break;
case GT_ARR_OFFSET:
tree->AsArrOffs()->gtOffset = fgMorphTree(tree->AsArrOffs()->gtOffset);
tree->AsArrOffs()->gtIndex = fgMorphTree(tree->AsArrOffs()->gtIndex);
tree->AsArrOffs()->gtArrObj = fgMorphTree(tree->AsArrOffs()->gtArrObj);
tree->gtFlags &= ~GTF_CALL;
tree->gtFlags |= tree->AsArrOffs()->gtOffset->gtFlags & GTF_ALL_EFFECT;
tree->gtFlags |= tree->AsArrOffs()->gtIndex->gtFlags & GTF_ALL_EFFECT;
tree->gtFlags |= tree->AsArrOffs()->gtArrObj->gtFlags & GTF_ALL_EFFECT;
if (fgGlobalMorph)
{
fgSetRngChkTarget(tree, false);
}
break;
case GT_PHI:
tree->gtFlags &= ~GTF_ALL_EFFECT;
for (GenTreePhi::Use& use : tree->AsPhi()->Uses())
{
use.SetNode(fgMorphTree(use.GetNode()));
tree->gtFlags |= use.GetNode()->gtFlags & GTF_ALL_EFFECT;
}
break;
case GT_FIELD_LIST:
tree->gtFlags &= ~GTF_ALL_EFFECT;
for (GenTreeFieldList::Use& use : tree->AsFieldList()->Uses())
{
use.SetNode(fgMorphTree(use.GetNode()));
tree->gtFlags |= (use.GetNode()->gtFlags & GTF_ALL_EFFECT);
}
break;
case GT_CMPXCHG:
tree->AsCmpXchg()->gtOpLocation = fgMorphTree(tree->AsCmpXchg()->gtOpLocation);
tree->AsCmpXchg()->gtOpValue = fgMorphTree(tree->AsCmpXchg()->gtOpValue);
tree->AsCmpXchg()->gtOpComparand = fgMorphTree(tree->AsCmpXchg()->gtOpComparand);
tree->gtFlags &= (~GTF_EXCEPT & ~GTF_CALL);
tree->gtFlags |= tree->AsCmpXchg()->gtOpLocation->gtFlags & GTF_ALL_EFFECT;
tree->gtFlags |= tree->AsCmpXchg()->gtOpValue->gtFlags & GTF_ALL_EFFECT;
tree->gtFlags |= tree->AsCmpXchg()->gtOpComparand->gtFlags & GTF_ALL_EFFECT;
break;
case GT_STORE_DYN_BLK:
tree = fgMorphStoreDynBlock(tree->AsStoreDynBlk());
break;
default:
#ifdef DEBUG
gtDispTree(tree);
#endif
noway_assert(!"unexpected operator");
}
DONE:
fgMorphTreeDone(tree, oldTree DEBUGARG(thisMorphNum));
return tree;
}
//------------------------------------------------------------------------
// fgKillDependentAssertionsSingle: Kill all assertions specific to lclNum
//
// Arguments:
// lclNum - The varNum of the lclVar for which we're killing assertions.
// tree - (DEBUG only) the tree responsible for killing its assertions.
//
void Compiler::fgKillDependentAssertionsSingle(unsigned lclNum DEBUGARG(GenTree* tree))
{
/* All dependent assertions are killed here */
ASSERT_TP killed = BitVecOps::MakeCopy(apTraits, GetAssertionDep(lclNum));
if (killed)
{
AssertionIndex index = optAssertionCount;
while (killed && (index > 0))
{
if (BitVecOps::IsMember(apTraits, killed, index - 1))
{
#ifdef DEBUG
AssertionDsc* curAssertion = optGetAssertion(index);
noway_assert((curAssertion->op1.lcl.lclNum == lclNum) ||
((curAssertion->op2.kind == O2K_LCLVAR_COPY) && (curAssertion->op2.lcl.lclNum == lclNum)));
if (verbose)
{
printf("\nThe assignment ");
printTreeID(tree);
printf(" using V%02u removes: ", curAssertion->op1.lcl.lclNum);
optPrintAssertion(curAssertion);
}
#endif
// Remove this bit from the killed mask
BitVecOps::RemoveElemD(apTraits, killed, index - 1);
optAssertionRemove(index);
}
index--;
}
// killed mask should now be zero
noway_assert(BitVecOps::IsEmpty(apTraits, killed));
}
}
//------------------------------------------------------------------------
// fgKillDependentAssertions: Kill all dependent assertions with regard to lclNum.
//
// Arguments:
// lclNum - The varNum of the lclVar for which we're killing assertions.
// tree - (DEBUG only) the tree responsible for killing its assertions.
//
// Notes:
// For structs and struct fields, it will invalidate the children and parent
// respectively.
// Calls fgKillDependentAssertionsSingle to kill the assertions for a single lclVar.
//
void Compiler::fgKillDependentAssertions(unsigned lclNum DEBUGARG(GenTree* tree))
{
LclVarDsc* varDsc = lvaGetDesc(lclNum);
if (varDsc->lvPromoted)
{
noway_assert(varTypeIsStruct(varDsc));
// Kill the field locals.
for (unsigned i = varDsc->lvFieldLclStart; i < varDsc->lvFieldLclStart + varDsc->lvFieldCnt; ++i)
{
fgKillDependentAssertionsSingle(i DEBUGARG(tree));
}
// Kill the struct local itself.
fgKillDependentAssertionsSingle(lclNum DEBUGARG(tree));
}
else if (varDsc->lvIsStructField)
{
// Kill the field local.
fgKillDependentAssertionsSingle(lclNum DEBUGARG(tree));
// Kill the parent struct.
fgKillDependentAssertionsSingle(varDsc->lvParentLcl DEBUGARG(tree));
}
else
{
fgKillDependentAssertionsSingle(lclNum DEBUGARG(tree));
}
}
/*****************************************************************************
*
* This function is called to complete the morphing of a tree node
* It should only be called once for each node.
* If DEBUG is defined the flag GTF_DEBUG_NODE_MORPHED is checked and updated,
* to enforce the invariant that each node is only morphed once.
* If local assertion prop is enabled the result tree may be replaced
* by an equivalent tree.
*
*/
void Compiler::fgMorphTreeDone(GenTree* tree,
GenTree* oldTree /* == NULL */
DEBUGARG(int morphNum))
{
#ifdef DEBUG
if (verbose && treesBeforeAfterMorph)
{
printf("\nfgMorphTree (after %d):\n", morphNum);
gtDispTree(tree);
printf(""); // in our logic this causes a flush
}
#endif
if (!fgGlobalMorph)
{
return;
}
if ((oldTree != nullptr) && (oldTree != tree))
{
/* Ensure that we have morphed this node */
assert((tree->gtDebugFlags & GTF_DEBUG_NODE_MORPHED) && "ERROR: Did not morph this node!");
#ifdef DEBUG
TransferTestDataToNode(oldTree, tree);
#endif
}
else
{
// Ensure that we haven't morphed this node already
assert(((tree->gtDebugFlags & GTF_DEBUG_NODE_MORPHED) == 0) && "ERROR: Already morphed this node!");
}
if (tree->OperIsConst())
{
goto DONE;
}
if (!optLocalAssertionProp)
{
goto DONE;
}
/* Do we have any active assertions? */
if (optAssertionCount > 0)
{
/* Is this an assignment to a local variable */
GenTreeLclVarCommon* lclVarTree = nullptr;
// The check below will miss LIR-style assignments.
//
// But we shouldn't be running local assertion prop on these,
// as local prop gets disabled when we run global prop.
assert(!tree->OperIs(GT_STORE_LCL_VAR, GT_STORE_LCL_FLD));
// DefinesLocal can return true for some BLK op uses, so
// check what gets assigned only when we're at an assignment.
if (tree->OperIsSsaDef() && tree->DefinesLocal(this, &lclVarTree))
{
unsigned lclNum = lclVarTree->GetLclNum();
noway_assert(lclNum < lvaCount);
fgKillDependentAssertions(lclNum DEBUGARG(tree));
}
}
/* If this tree makes a new assertion - make it available */
optAssertionGen(tree);
DONE:;
#ifdef DEBUG
/* Mark this node as being morphed */
tree->gtDebugFlags |= GTF_DEBUG_NODE_MORPHED;
#endif
}
//------------------------------------------------------------------------
// fgFoldConditional: try and fold conditionals and optimize BBJ_COND or
// BBJ_SWITCH blocks.
//
// Argumetns:
// block - block to examine
//
// Returns:
// FoldResult indicating what changes were made, if any
//
Compiler::FoldResult Compiler::fgFoldConditional(BasicBlock* block)
{
FoldResult result = FoldResult::FOLD_DID_NOTHING;
// We don't want to make any code unreachable
//
if (opts.OptimizationDisabled())
{
return result;
}
if (block->bbJumpKind == BBJ_COND)
{
noway_assert(block->bbStmtList != nullptr && block->bbStmtList->GetPrevStmt() != nullptr);
Statement* lastStmt = block->lastStmt();
noway_assert(lastStmt->GetNextStmt() == nullptr);
if (lastStmt->GetRootNode()->gtOper == GT_CALL)
{
noway_assert(fgRemoveRestOfBlock);
// Unconditional throw - transform the basic block into a BBJ_THROW
//
fgConvertBBToThrowBB(block);
result = FoldResult::FOLD_CHANGED_CONTROL_FLOW;
JITDUMP("\nConditional folded at " FMT_BB "\n", block->bbNum);
JITDUMP(FMT_BB " becomes a BBJ_THROW\n", block->bbNum);
return result;
}
noway_assert(lastStmt->GetRootNode()->gtOper == GT_JTRUE);
/* Did we fold the conditional */
noway_assert(lastStmt->GetRootNode()->AsOp()->gtOp1);
GenTree* condTree;
condTree = lastStmt->GetRootNode()->AsOp()->gtOp1;
GenTree* cond;
cond = condTree->gtEffectiveVal(true);
if (cond->OperIsConst())
{
/* Yupee - we folded the conditional!
* Remove the conditional statement */
noway_assert(cond->gtOper == GT_CNS_INT);
noway_assert((block->bbNext->countOfInEdges() > 0) && (block->bbJumpDest->countOfInEdges() > 0));
if (condTree != cond)
{
// Preserve any side effects
assert(condTree->OperIs(GT_COMMA));
lastStmt->SetRootNode(condTree);
result = FoldResult::FOLD_ALTERED_LAST_STMT;
}
else
{
// no side effects, remove the jump entirely
fgRemoveStmt(block, lastStmt);
result = FoldResult::FOLD_REMOVED_LAST_STMT;
}
// block is a BBJ_COND that we are folding the conditional for.
// bTaken is the path that will always be taken from block.
// bNotTaken is the path that will never be taken from block.
//
BasicBlock* bTaken;
BasicBlock* bNotTaken;
if (cond->AsIntCon()->gtIconVal != 0)
{
/* JTRUE 1 - transform the basic block into a BBJ_ALWAYS */
block->bbJumpKind = BBJ_ALWAYS;
bTaken = block->bbJumpDest;
bNotTaken = block->bbNext;
}
else
{
/* Unmark the loop if we are removing a backwards branch */
/* dest block must also be marked as a loop head and */
/* We must be able to reach the backedge block */
if ((block->bbJumpDest->isLoopHead()) && (block->bbJumpDest->bbNum <= block->bbNum) &&
fgReachable(block->bbJumpDest, block))
{
optUnmarkLoopBlocks(block->bbJumpDest, block);
}
/* JTRUE 0 - transform the basic block into a BBJ_NONE */
block->bbJumpKind = BBJ_NONE;
bTaken = block->bbNext;
bNotTaken = block->bbJumpDest;
}
if (fgHaveValidEdgeWeights)
{
// We are removing an edge from block to bNotTaken
// and we have already computed the edge weights, so
// we will try to adjust some of the weights
//
flowList* edgeTaken = fgGetPredForBlock(bTaken, block);
BasicBlock* bUpdated = nullptr; // non-NULL if we updated the weight of an internal block
// We examine the taken edge (block -> bTaken)
// if block has valid profile weight and bTaken does not we try to adjust bTaken's weight
// else if bTaken has valid profile weight and block does not we try to adjust block's weight
// We can only adjust the block weights when (the edge block -> bTaken) is the only edge into bTaken
//
if (block->hasProfileWeight())
{
// The edge weights for (block -> bTaken) are 100% of block's weight
edgeTaken->setEdgeWeights(block->bbWeight, block->bbWeight, bTaken);
if (!bTaken->hasProfileWeight())
{
if ((bTaken->countOfInEdges() == 1) || (bTaken->bbWeight < block->bbWeight))
{
// Update the weight of bTaken
bTaken->inheritWeight(block);
bUpdated = bTaken;
}
}
}
else if (bTaken->hasProfileWeight())
{
if (bTaken->countOfInEdges() == 1)
{
// There is only one in edge to bTaken
edgeTaken->setEdgeWeights(bTaken->bbWeight, bTaken->bbWeight, bTaken);
// Update the weight of block
block->inheritWeight(bTaken);
bUpdated = block;
}
}
if (bUpdated != nullptr)
{
weight_t newMinWeight;
weight_t newMaxWeight;
flowList* edge;
// Now fix the weights of the edges out of 'bUpdated'
switch (bUpdated->bbJumpKind)
{
case BBJ_NONE:
edge = fgGetPredForBlock(bUpdated->bbNext, bUpdated);
newMaxWeight = bUpdated->bbWeight;
newMinWeight = min(edge->edgeWeightMin(), newMaxWeight);
edge->setEdgeWeights(newMinWeight, newMaxWeight, bUpdated->bbNext);
break;
case BBJ_COND:
edge = fgGetPredForBlock(bUpdated->bbNext, bUpdated);
newMaxWeight = bUpdated->bbWeight;
newMinWeight = min(edge->edgeWeightMin(), newMaxWeight);
edge->setEdgeWeights(newMinWeight, newMaxWeight, bUpdated->bbNext);
FALLTHROUGH;
case BBJ_ALWAYS:
edge = fgGetPredForBlock(bUpdated->bbJumpDest, bUpdated);
newMaxWeight = bUpdated->bbWeight;
newMinWeight = min(edge->edgeWeightMin(), newMaxWeight);
edge->setEdgeWeights(newMinWeight, newMaxWeight, bUpdated->bbNext);
break;
default:
// We don't handle BBJ_SWITCH
break;
}
}
}
/* modify the flow graph */
/* Remove 'block' from the predecessor list of 'bNotTaken' */
fgRemoveRefPred(bNotTaken, block);
#ifdef DEBUG
if (verbose)
{
printf("\nConditional folded at " FMT_BB "\n", block->bbNum);
printf(FMT_BB " becomes a %s", block->bbNum,
block->bbJumpKind == BBJ_ALWAYS ? "BBJ_ALWAYS" : "BBJ_NONE");
if (block->bbJumpKind == BBJ_ALWAYS)
{
printf(" to " FMT_BB, block->bbJumpDest->bbNum);
}
printf("\n");
}
#endif
/* if the block was a loop condition we may have to modify
* the loop table */
for (unsigned loopNum = 0; loopNum < optLoopCount; loopNum++)
{
/* Some loops may have been already removed by
* loop unrolling or conditional folding */
if (optLoopTable[loopNum].lpFlags & LPFLG_REMOVED)
{
continue;
}
/* We are only interested in the loop bottom */
if (optLoopTable[loopNum].lpBottom == block)
{
if (cond->AsIntCon()->gtIconVal == 0)
{
/* This was a bogus loop (condition always false)
* Remove the loop from the table */
optMarkLoopRemoved(loopNum);
optLoopTable[loopNum].lpTop->unmarkLoopAlign(this DEBUG_ARG("Bogus loop"));
#ifdef DEBUG
if (verbose)
{
printf("Removing loop " FMT_LP " (from " FMT_BB " to " FMT_BB ")\n\n", loopNum,
optLoopTable[loopNum].lpTop->bbNum, optLoopTable[loopNum].lpBottom->bbNum);
}
#endif
}
}
}
}
}
else if (block->bbJumpKind == BBJ_SWITCH)
{
noway_assert(block->bbStmtList != nullptr && block->bbStmtList->GetPrevStmt() != nullptr);
Statement* lastStmt = block->lastStmt();
noway_assert(lastStmt->GetNextStmt() == nullptr);
if (lastStmt->GetRootNode()->gtOper == GT_CALL)
{
noway_assert(fgRemoveRestOfBlock);
// Unconditional throw - transform the basic block into a BBJ_THROW
//
fgConvertBBToThrowBB(block);
result = FoldResult::FOLD_CHANGED_CONTROL_FLOW;
JITDUMP("\nConditional folded at " FMT_BB "\n", block->bbNum);
JITDUMP(FMT_BB " becomes a BBJ_THROW\n", block->bbNum);
return result;
}
noway_assert(lastStmt->GetRootNode()->gtOper == GT_SWITCH);
/* Did we fold the conditional */
noway_assert(lastStmt->GetRootNode()->AsOp()->gtOp1);
GenTree* condTree;
condTree = lastStmt->GetRootNode()->AsOp()->gtOp1;
GenTree* cond;
cond = condTree->gtEffectiveVal(true);
if (cond->OperIsConst())
{
/* Yupee - we folded the conditional!
* Remove the conditional statement */
noway_assert(cond->gtOper == GT_CNS_INT);
if (condTree != cond)
{
// Preserve any side effects
assert(condTree->OperIs(GT_COMMA));
lastStmt->SetRootNode(condTree);
result = FoldResult::FOLD_ALTERED_LAST_STMT;
}
else
{
// no side effects, remove the switch entirely
fgRemoveStmt(block, lastStmt);
result = FoldResult::FOLD_REMOVED_LAST_STMT;
}
/* modify the flow graph */
/* Find the actual jump target */
unsigned switchVal;
switchVal = (unsigned)cond->AsIntCon()->gtIconVal;
unsigned jumpCnt;
jumpCnt = block->bbJumpSwt->bbsCount;
BasicBlock** jumpTab;
jumpTab = block->bbJumpSwt->bbsDstTab;
bool foundVal;
foundVal = false;
for (unsigned val = 0; val < jumpCnt; val++, jumpTab++)
{
BasicBlock* curJump = *jumpTab;
assert(curJump->countOfInEdges() > 0);
// If val matches switchVal or we are at the last entry and
// we never found the switch value then set the new jump dest
if ((val == switchVal) || (!foundVal && (val == jumpCnt - 1)))
{
if (curJump != block->bbNext)
{
/* transform the basic block into a BBJ_ALWAYS */
block->bbJumpKind = BBJ_ALWAYS;
block->bbJumpDest = curJump;
}
else
{
/* transform the basic block into a BBJ_NONE */
block->bbJumpKind = BBJ_NONE;
}
foundVal = true;
}
else
{
/* Remove 'block' from the predecessor list of 'curJump' */
fgRemoveRefPred(curJump, block);
}
}
assert(foundVal);
#ifdef DEBUG
if (verbose)
{
printf("\nConditional folded at " FMT_BB "\n", block->bbNum);
printf(FMT_BB " becomes a %s", block->bbNum,
block->bbJumpKind == BBJ_ALWAYS ? "BBJ_ALWAYS" : "BBJ_NONE");
if (block->bbJumpKind == BBJ_ALWAYS)
{
printf(" to " FMT_BB, block->bbJumpDest->bbNum);
}
printf("\n");
}
#endif
}
}
return result;
}
//------------------------------------------------------------------------
// fgMorphBlockStmt: morph a single statement in a block.
//
// Arguments:
// block - block containing the statement
// stmt - statement to morph
// msg - string to identify caller in a dump
//
// Returns:
// true if 'stmt' was removed from the block.
// s false if 'stmt' is still in the block (even if other statements were removed).
//
// Notes:
// Can be called anytime, unlike fgMorphStmts() which should only be called once.
//
bool Compiler::fgMorphBlockStmt(BasicBlock* block, Statement* stmt DEBUGARG(const char* msg))
{
assert(block != nullptr);
assert(stmt != nullptr);
// Reset some ambient state
fgRemoveRestOfBlock = false;
compCurBB = block;
compCurStmt = stmt;
GenTree* morph = fgMorphTree(stmt->GetRootNode());
// Bug 1106830 - During the CSE phase we can't just remove
// morph->AsOp()->gtOp2 as it could contain CSE expressions.
// This leads to a noway_assert in OptCSE.cpp when
// searching for the removed CSE ref. (using gtFindLink)
//
if (!optValnumCSE_phase)
{
// Check for morph as a GT_COMMA with an unconditional throw
if (fgIsCommaThrow(morph, true))
{
#ifdef DEBUG
if (verbose)
{
printf("Folding a top-level fgIsCommaThrow stmt\n");
printf("Removing op2 as unreachable:\n");
gtDispTree(morph->AsOp()->gtOp2);
printf("\n");
}
#endif
// Use the call as the new stmt
morph = morph->AsOp()->gtOp1;
noway_assert(morph->gtOper == GT_CALL);
}
// we can get a throw as a statement root
if (fgIsThrow(morph))
{
#ifdef DEBUG
if (verbose)
{
printf("We have a top-level fgIsThrow stmt\n");
printf("Removing the rest of block as unreachable:\n");
}
#endif
noway_assert((morph->gtFlags & GTF_COLON_COND) == 0);
fgRemoveRestOfBlock = true;
}
}
stmt->SetRootNode(morph);
// Can the entire tree be removed?
bool removedStmt = false;
// Defer removing statements during CSE so we don't inadvertently remove any CSE defs.
if (!optValnumCSE_phase)
{
removedStmt = fgCheckRemoveStmt(block, stmt);
}
// Or this is the last statement of a conditional branch that was just folded?
if (!removedStmt && (stmt->GetNextStmt() == nullptr) && !fgRemoveRestOfBlock)
{
FoldResult const fr = fgFoldConditional(block);
removedStmt = (fr == FoldResult::FOLD_REMOVED_LAST_STMT);
}
if (!removedStmt)
{
// Have to re-do the evaluation order since for example some later code does not expect constants as op1
gtSetStmtInfo(stmt);
// Have to re-link the nodes for this statement
fgSetStmtSeq(stmt);
}
#ifdef DEBUG
if (verbose)
{
printf("%s %s tree:\n", msg, (removedStmt ? "removed" : "morphed"));
gtDispTree(morph);
printf("\n");
}
#endif
if (fgRemoveRestOfBlock)
{
// Remove the rest of the stmts in the block
for (Statement* removeStmt : StatementList(stmt->GetNextStmt()))
{
fgRemoveStmt(block, removeStmt);
}
// The rest of block has been removed and we will always throw an exception.
//
// For compDbgCode, we prepend an empty BB as the firstBB, it is BBJ_NONE.
// We should not convert it to a ThrowBB.
if ((block != fgFirstBB) || ((fgFirstBB->bbFlags & BBF_INTERNAL) == 0))
{
// Convert block to a throw bb
fgConvertBBToThrowBB(block);
}
#ifdef DEBUG
if (verbose)
{
printf("\n%s Block " FMT_BB " becomes a throw block.\n", msg, block->bbNum);
}
#endif
fgRemoveRestOfBlock = false;
}
return removedStmt;
}
/*****************************************************************************
*
* Morph the statements of the given block.
* This function should be called just once for a block. Use fgMorphBlockStmt()
* for reentrant calls.
*/
void Compiler::fgMorphStmts(BasicBlock* block)
{
fgRemoveRestOfBlock = false;
fgCurrentlyInUseArgTemps = hashBv::Create(this);
for (Statement* const stmt : block->Statements())
{
if (fgRemoveRestOfBlock)
{
fgRemoveStmt(block, stmt);
continue;
}
#ifdef FEATURE_SIMD
if (opts.OptimizationEnabled() && stmt->GetRootNode()->TypeGet() == TYP_FLOAT &&
stmt->GetRootNode()->OperGet() == GT_ASG)
{
fgMorphCombineSIMDFieldAssignments(block, stmt);
}
#endif
fgMorphStmt = stmt;
compCurStmt = stmt;
GenTree* oldTree = stmt->GetRootNode();
#ifdef DEBUG
unsigned oldHash = verbose ? gtHashValue(oldTree) : DUMMY_INIT(~0);
if (verbose)
{
printf("\nfgMorphTree " FMT_BB ", " FMT_STMT " (before)\n", block->bbNum, stmt->GetID());
gtDispTree(oldTree);
}
#endif
/* Morph this statement tree */
GenTree* morphedTree = fgMorphTree(oldTree);
// mark any outgoing arg temps as free so we can reuse them in the next statement.
fgCurrentlyInUseArgTemps->ZeroAll();
// Has fgMorphStmt been sneakily changed ?
if ((stmt->GetRootNode() != oldTree) || (block != compCurBB))
{
if (stmt->GetRootNode() != oldTree)
{
/* This must be tailcall. Ignore 'morphedTree' and carry on with
the tail-call node */
morphedTree = stmt->GetRootNode();
}
else
{
/* This must be a tailcall that caused a GCPoll to get
injected. We haven't actually morphed the call yet
but the flag still got set, clear it here... */
CLANG_FORMAT_COMMENT_ANCHOR;
#ifdef DEBUG
morphedTree->gtDebugFlags &= ~GTF_DEBUG_NODE_MORPHED;
#endif
}
noway_assert(compTailCallUsed);
noway_assert(morphedTree->gtOper == GT_CALL);
GenTreeCall* call = morphedTree->AsCall();
// Could be
// - a fast call made as jmp in which case block will be ending with
// BBJ_RETURN (as we need epilog) and marked as containing a jmp.
// - a tailcall dispatched via JIT helper, on x86, in which case
// block will be ending with BBJ_THROW.
// - a tail call dispatched via runtime help (IL stubs), in which
// case there will not be any tailcall and the block will be ending
// with BBJ_RETURN (as normal control flow)
noway_assert((call->IsFastTailCall() && (compCurBB->bbJumpKind == BBJ_RETURN) &&
((compCurBB->bbFlags & BBF_HAS_JMP)) != 0) ||
(call->IsTailCallViaJitHelper() && (compCurBB->bbJumpKind == BBJ_THROW)) ||
(!call->IsTailCall() && (compCurBB->bbJumpKind == BBJ_RETURN)));
}
#ifdef DEBUG
if (compStressCompile(STRESS_CLONE_EXPR, 30))
{
// Clone all the trees to stress gtCloneExpr()
if (verbose)
{
printf("\nfgMorphTree (stressClone from):\n");
gtDispTree(morphedTree);
}
morphedTree = gtCloneExpr(morphedTree);
noway_assert(morphedTree != nullptr);
if (verbose)
{
printf("\nfgMorphTree (stressClone to):\n");
gtDispTree(morphedTree);
}
}
/* If the hash value changes. we modified the tree during morphing */
if (verbose)
{
unsigned newHash = gtHashValue(morphedTree);
if (newHash != oldHash)
{
printf("\nfgMorphTree " FMT_BB ", " FMT_STMT " (after)\n", block->bbNum, stmt->GetID());
gtDispTree(morphedTree);
}
}
#endif
/* Check for morphedTree as a GT_COMMA with an unconditional throw */
if (!gtIsActiveCSE_Candidate(morphedTree) && fgIsCommaThrow(morphedTree, true))
{
/* Use the call as the new stmt */
morphedTree = morphedTree->AsOp()->gtOp1;
noway_assert(morphedTree->gtOper == GT_CALL);
noway_assert((morphedTree->gtFlags & GTF_COLON_COND) == 0);
fgRemoveRestOfBlock = true;
}
stmt->SetRootNode(morphedTree);
if (fgRemoveRestOfBlock)
{
continue;
}
/* Has the statement been optimized away */
if (fgCheckRemoveStmt(block, stmt))
{
continue;
}
/* Check if this block ends with a conditional branch that can be folded */
if (fgFoldConditional(block) != FoldResult::FOLD_DID_NOTHING)
{
continue;
}
if (ehBlockHasExnFlowDsc(block))
{
continue;
}
}
if (fgRemoveRestOfBlock)
{
if ((block->bbJumpKind == BBJ_COND) || (block->bbJumpKind == BBJ_SWITCH))
{
Statement* first = block->firstStmt();
noway_assert(first);
Statement* lastStmt = block->lastStmt();
noway_assert(lastStmt && lastStmt->GetNextStmt() == nullptr);
GenTree* last = lastStmt->GetRootNode();
if (((block->bbJumpKind == BBJ_COND) && (last->gtOper == GT_JTRUE)) ||
((block->bbJumpKind == BBJ_SWITCH) && (last->gtOper == GT_SWITCH)))
{
GenTree* op1 = last->AsOp()->gtOp1;
if (op1->OperIsCompare())
{
/* Unmark the comparison node with GTF_RELOP_JMP_USED */
op1->gtFlags &= ~GTF_RELOP_JMP_USED;
}
lastStmt->SetRootNode(fgMorphTree(op1));
}
}
/* Mark block as a BBJ_THROW block */
fgConvertBBToThrowBB(block);
}
#if FEATURE_FASTTAILCALL
GenTree* recursiveTailCall = nullptr;
if (block->endsWithTailCallConvertibleToLoop(this, &recursiveTailCall))
{
fgMorphRecursiveFastTailCallIntoLoop(block, recursiveTailCall->AsCall());
}
#endif
// Reset this back so that it doesn't leak out impacting other blocks
fgRemoveRestOfBlock = false;
}
/*****************************************************************************
*
* Morph the blocks of the method.
* Returns true if the basic block list is modified.
* This function should be called just once.
*/
void Compiler::fgMorphBlocks()
{
#ifdef DEBUG
if (verbose)
{
printf("\n*************** In fgMorphBlocks()\n");
}
#endif
/* Since fgMorphTree can be called after various optimizations to re-arrange
* the nodes we need a global flag to signal if we are during the one-pass
* global morphing */
fgGlobalMorph = true;
//
// Local assertion prop is enabled if we are optimized
//
optLocalAssertionProp = opts.OptimizationEnabled();
if (optLocalAssertionProp)
{
//
// Initialize for local assertion prop
//
optAssertionInit(true);
}
if (!compEnregLocals())
{
// Morph is checking if lvDoNotEnregister is already set for some optimizations.
// If we are running without `CLFLG_REGVAR` flag set (`compEnregLocals() == false`)
// then we already know that we won't enregister any locals and it is better to set
// this flag before we start reading it.
// The main reason why this flag is not set is that we are running in minOpts.
lvSetMinOptsDoNotEnreg();
}
/*-------------------------------------------------------------------------
* Process all basic blocks in the function
*/
BasicBlock* block = fgFirstBB;
noway_assert(block);
do
{
#ifdef DEBUG
if (verbose)
{
printf("\nMorphing " FMT_BB " of '%s'\n", block->bbNum, info.compFullName);
}
#endif
if (optLocalAssertionProp)
{
//
// Clear out any currently recorded assertion candidates
// before processing each basic block,
// also we must handle QMARK-COLON specially
//
optAssertionReset(0);
}
// Make the current basic block address available globally.
compCurBB = block;
// Process all statement trees in the basic block.
fgMorphStmts(block);
// Do we need to merge the result of this block into a single return block?
if ((block->bbJumpKind == BBJ_RETURN) && ((block->bbFlags & BBF_HAS_JMP) == 0))
{
if ((genReturnBB != nullptr) && (genReturnBB != block))
{
fgMergeBlockReturn(block);
}
}
block = block->bbNext;
} while (block != nullptr);
// We are done with the global morphing phase
fgGlobalMorph = false;
compCurBB = nullptr;
// Under OSR, we no longer need to specially protect the original method entry
//
if (opts.IsOSR() && (fgEntryBB != nullptr) && (fgEntryBB->bbFlags & BBF_IMPORTED))
{
JITDUMP("OSR: un-protecting original method entry " FMT_BB "\n", fgEntryBB->bbNum);
assert(fgEntryBB->bbRefs > 0);
fgEntryBB->bbRefs--;
// We don't need to remember this block anymore.
fgEntryBB = nullptr;
}
#ifdef DEBUG
if (verboseTrees)
{
fgDispBasicBlocks(true);
}
#endif
}
//------------------------------------------------------------------------
// fgMergeBlockReturn: assign the block return value (if any) into the single return temp
// and branch to the single return block.
//
// Arguments:
// block - the block to process.
//
// Notes:
// A block is not guaranteed to have a last stmt if its jump kind is BBJ_RETURN.
// For example a method returning void could have an empty block with jump kind BBJ_RETURN.
// Such blocks do materialize as part of in-lining.
//
// A block with jump kind BBJ_RETURN does not necessarily need to end with GT_RETURN.
// It could end with a tail call or rejected tail call or monitor.exit or a GT_INTRINSIC.
// For now it is safe to explicitly check whether last stmt is GT_RETURN if genReturnLocal
// is BAD_VAR_NUM.
//
void Compiler::fgMergeBlockReturn(BasicBlock* block)
{
assert((block->bbJumpKind == BBJ_RETURN) && ((block->bbFlags & BBF_HAS_JMP) == 0));
assert((genReturnBB != nullptr) && (genReturnBB != block));
// TODO: Need to characterize the last top level stmt of a block ending with BBJ_RETURN.
Statement* lastStmt = block->lastStmt();
GenTree* ret = (lastStmt != nullptr) ? lastStmt->GetRootNode() : nullptr;
if ((ret != nullptr) && (ret->OperGet() == GT_RETURN) && ((ret->gtFlags & GTF_RET_MERGED) != 0))
{
// This return was generated during epilog merging, so leave it alone
}
else
{
// We'll jump to the genReturnBB.
CLANG_FORMAT_COMMENT_ANCHOR;
#if !defined(TARGET_X86)
if (info.compFlags & CORINFO_FLG_SYNCH)
{
fgConvertSyncReturnToLeave(block);
}
else
#endif // !TARGET_X86
{
block->bbJumpKind = BBJ_ALWAYS;
block->bbJumpDest = genReturnBB;
fgAddRefPred(genReturnBB, block);
fgReturnCount--;
}
if (genReturnLocal != BAD_VAR_NUM)
{
// replace the GT_RETURN node to be a GT_ASG that stores the return value into genReturnLocal.
// Method must be returning a value other than TYP_VOID.
noway_assert(compMethodHasRetVal());
// This block must be ending with a GT_RETURN
noway_assert(lastStmt != nullptr);
noway_assert(lastStmt->GetNextStmt() == nullptr);
noway_assert(ret != nullptr);
// GT_RETURN must have non-null operand as the method is returning the value assigned to
// genReturnLocal
noway_assert(ret->OperGet() == GT_RETURN);
noway_assert(ret->gtGetOp1() != nullptr);
Statement* pAfterStatement = lastStmt;
const DebugInfo& di = lastStmt->GetDebugInfo();
GenTree* tree = gtNewTempAssign(genReturnLocal, ret->gtGetOp1(), &pAfterStatement, di, block);
if (tree->OperIsCopyBlkOp())
{
tree = fgMorphCopyBlock(tree);
}
else if (tree->OperIsInitBlkOp())
{
tree = fgMorphInitBlock(tree);
}
if (pAfterStatement == lastStmt)
{
lastStmt->SetRootNode(tree);
}
else
{
// gtNewTempAssign inserted additional statements after last
fgRemoveStmt(block, lastStmt);
Statement* newStmt = gtNewStmt(tree, di);
fgInsertStmtAfter(block, pAfterStatement, newStmt);
lastStmt = newStmt;
}
}
else if (ret != nullptr && ret->OperGet() == GT_RETURN)
{
// This block ends with a GT_RETURN
noway_assert(lastStmt != nullptr);
noway_assert(lastStmt->GetNextStmt() == nullptr);
// Must be a void GT_RETURN with null operand; delete it as this block branches to oneReturn
// block
noway_assert(ret->TypeGet() == TYP_VOID);
noway_assert(ret->gtGetOp1() == nullptr);
fgRemoveStmt(block, lastStmt);
}
JITDUMP("\nUpdate " FMT_BB " to jump to common return block.\n", block->bbNum);
DISPBLOCK(block);
if (block->hasProfileWeight())
{
weight_t const oldWeight = genReturnBB->hasProfileWeight() ? genReturnBB->bbWeight : BB_ZERO_WEIGHT;
weight_t const newWeight = oldWeight + block->bbWeight;
JITDUMP("merging profile weight " FMT_WT " from " FMT_BB " to common return " FMT_BB "\n", block->bbWeight,
block->bbNum, genReturnBB->bbNum);
genReturnBB->setBBProfileWeight(newWeight);
DISPBLOCK(genReturnBB);
}
}
}
/*****************************************************************************
*
* Make some decisions about the kind of code to generate.
*/
void Compiler::fgSetOptions()
{
#ifdef DEBUG
/* Should we force fully interruptible code ? */
if (JitConfig.JitFullyInt() || compStressCompile(STRESS_GENERIC_VARN, 30))
{
noway_assert(!codeGen->isGCTypeFixed());
SetInterruptible(true);
}
#endif
if (opts.compDbgCode)
{
assert(!codeGen->isGCTypeFixed());
SetInterruptible(true); // debugging is easier this way ...
}
/* Assume we won't need an explicit stack frame if this is allowed */
if (compLocallocUsed)
{
codeGen->setFramePointerRequired(true);
}
#ifdef TARGET_X86
if (compTailCallUsed)
codeGen->setFramePointerRequired(true);
#endif // TARGET_X86
if (!opts.genFPopt)
{
codeGen->setFramePointerRequired(true);
}
// Assert that the EH table has been initialized by now. Note that
// compHndBBtabAllocCount never decreases; it is a high-water mark
// of table allocation. In contrast, compHndBBtabCount does shrink
// if we delete a dead EH region, and if it shrinks to zero, the
// table pointer compHndBBtab is unreliable.
assert(compHndBBtabAllocCount >= info.compXcptnsCount);
#ifdef TARGET_X86
// Note: this case, and the !X86 case below, should both use the
// !X86 path. This would require a few more changes for X86 to use
// compHndBBtabCount (the current number of EH clauses) instead of
// info.compXcptnsCount (the number of EH clauses in IL), such as
// in ehNeedsShadowSPslots(). This is because sometimes the IL has
// an EH clause that we delete as statically dead code before we
// get here, leaving no EH clauses left, and thus no requirement
// to use a frame pointer because of EH. But until all the code uses
// the same test, leave info.compXcptnsCount here.
if (info.compXcptnsCount > 0)
{
codeGen->setFramePointerRequiredEH(true);
}
#else // !TARGET_X86
if (compHndBBtabCount > 0)
{
codeGen->setFramePointerRequiredEH(true);
}
#endif // TARGET_X86
#ifdef UNIX_X86_ABI
if (info.compXcptnsCount > 0)
{
assert(!codeGen->isGCTypeFixed());
// Enforce fully interruptible codegen for funclet unwinding
SetInterruptible(true);
}
#endif // UNIX_X86_ABI
if (compMethodRequiresPInvokeFrame())
{
codeGen->setFramePointerRequired(true); // Setup of Pinvoke frame currently requires an EBP style frame
}
if (info.compPublishStubParam)
{
codeGen->setFramePointerRequiredGCInfo(true);
}
if (compIsProfilerHookNeeded())
{
codeGen->setFramePointerRequired(true);
}
if (info.compIsVarArgs)
{
// Code that initializes lvaVarargsBaseOfStkArgs requires this to be EBP relative.
codeGen->setFramePointerRequiredGCInfo(true);
}
if (lvaReportParamTypeArg())
{
codeGen->setFramePointerRequiredGCInfo(true);
}
// printf("method will %s be fully interruptible\n", GetInterruptible() ? " " : "not");
}
/*****************************************************************************/
GenTree* Compiler::fgInitThisClass()
{
noway_assert(!compIsForInlining());
CORINFO_LOOKUP_KIND kind;
info.compCompHnd->getLocationOfThisType(info.compMethodHnd, &kind);
if (!kind.needsRuntimeLookup)
{
return fgGetSharedCCtor(info.compClassHnd);
}
else
{
#ifdef FEATURE_READYTORUN
// Only CoreRT understands CORINFO_HELP_READYTORUN_GENERIC_STATIC_BASE. Don't do this on CoreCLR.
if (opts.IsReadyToRun() && IsTargetAbi(CORINFO_CORERT_ABI))
{
CORINFO_RESOLVED_TOKEN resolvedToken;
memset(&resolvedToken, 0, sizeof(resolvedToken));
// We are in a shared method body, but maybe we don't need a runtime lookup after all.
// This covers the case of a generic method on a non-generic type.
if (!(info.compClassAttr & CORINFO_FLG_SHAREDINST))
{
resolvedToken.hClass = info.compClassHnd;
return impReadyToRunHelperToTree(&resolvedToken, CORINFO_HELP_READYTORUN_STATIC_BASE, TYP_BYREF);
}
// We need a runtime lookup.
GenTree* ctxTree = getRuntimeContextTree(kind.runtimeLookupKind);
// CORINFO_HELP_READYTORUN_GENERIC_STATIC_BASE with a zeroed out resolvedToken means "get the static
// base of the class that owns the method being compiled". If we're in this method, it means we're not
// inlining and there's no ambiguity.
return impReadyToRunHelperToTree(&resolvedToken, CORINFO_HELP_READYTORUN_GENERIC_STATIC_BASE, TYP_BYREF,
gtNewCallArgs(ctxTree), &kind);
}
#endif
// Collectible types requires that for shared generic code, if we use the generic context paramter
// that we report it. (This is a conservative approach, we could detect some cases particularly when the
// context parameter is this that we don't need the eager reporting logic.)
lvaGenericsContextInUse = true;
switch (kind.runtimeLookupKind)
{
case CORINFO_LOOKUP_THISOBJ:
{
// This code takes a this pointer; but we need to pass the static method desc to get the right point in
// the hierarchy
GenTree* vtTree = gtNewLclvNode(info.compThisArg, TYP_REF);
vtTree->gtFlags |= GTF_VAR_CONTEXT;
// Vtable pointer of this object
vtTree = gtNewMethodTableLookup(vtTree);
GenTree* methodHnd = gtNewIconEmbMethHndNode(info.compMethodHnd);
return gtNewHelperCallNode(CORINFO_HELP_INITINSTCLASS, TYP_VOID, gtNewCallArgs(vtTree, methodHnd));
}
case CORINFO_LOOKUP_CLASSPARAM:
{
GenTree* vtTree = gtNewLclvNode(info.compTypeCtxtArg, TYP_I_IMPL);
vtTree->gtFlags |= GTF_VAR_CONTEXT;
return gtNewHelperCallNode(CORINFO_HELP_INITCLASS, TYP_VOID, gtNewCallArgs(vtTree));
}
case CORINFO_LOOKUP_METHODPARAM:
{
GenTree* methHndTree = gtNewLclvNode(info.compTypeCtxtArg, TYP_I_IMPL);
methHndTree->gtFlags |= GTF_VAR_CONTEXT;
return gtNewHelperCallNode(CORINFO_HELP_INITINSTCLASS, TYP_VOID,
gtNewCallArgs(gtNewIconNode(0), methHndTree));
}
default:
noway_assert(!"Unknown LOOKUP_KIND");
UNREACHABLE();
}
}
}
#ifdef DEBUG
/*****************************************************************************
*
* Tree walk callback to make sure no GT_QMARK nodes are present in the tree,
* except for the allowed ? 1 : 0; pattern.
*/
Compiler::fgWalkResult Compiler::fgAssertNoQmark(GenTree** tree, fgWalkData* data)
{
if ((*tree)->OperGet() == GT_QMARK)
{
fgCheckQmarkAllowedForm(*tree);
}
return WALK_CONTINUE;
}
void Compiler::fgCheckQmarkAllowedForm(GenTree* tree)
{
assert(tree->OperGet() == GT_QMARK);
assert(!"Qmarks beyond morph disallowed.");
}
/*****************************************************************************
*
* Verify that the importer has created GT_QMARK nodes in a way we can
* process them. The following is allowed:
*
* 1. A top level qmark. Top level qmark is of the form:
* a) (bool) ? (void) : (void) OR
* b) V0N = (bool) ? (type) : (type)
*
* 2. Recursion is allowed at the top level, i.e., a GT_QMARK can be a child
* of either op1 of colon or op2 of colon but not a child of any other
* operator.
*/
void Compiler::fgPreExpandQmarkChecks(GenTree* expr)
{
GenTree* topQmark = fgGetTopLevelQmark(expr);
// If the top level Qmark is null, then scan the tree to make sure
// there are no qmarks within it.
if (topQmark == nullptr)
{
fgWalkTreePre(&expr, Compiler::fgAssertNoQmark, nullptr);
}
else
{
// We could probably expand the cond node also, but don't think the extra effort is necessary,
// so let's just assert the cond node of a top level qmark doesn't have further top level qmarks.
fgWalkTreePre(&topQmark->AsOp()->gtOp1, Compiler::fgAssertNoQmark, nullptr);
fgPreExpandQmarkChecks(topQmark->AsOp()->gtOp2->AsOp()->gtOp1);
fgPreExpandQmarkChecks(topQmark->AsOp()->gtOp2->AsOp()->gtOp2);
}
}
#endif // DEBUG
/*****************************************************************************
*
* Get the top level GT_QMARK node in a given "expr", return NULL if such a
* node is not present. If the top level GT_QMARK node is assigned to a
* GT_LCL_VAR, then return the lcl node in ppDst.
*
*/
GenTree* Compiler::fgGetTopLevelQmark(GenTree* expr, GenTree** ppDst /* = NULL */)
{
if (ppDst != nullptr)
{
*ppDst = nullptr;
}
GenTree* topQmark = nullptr;
if (expr->gtOper == GT_QMARK)
{
topQmark = expr;
}
else if (expr->gtOper == GT_ASG && expr->AsOp()->gtOp2->gtOper == GT_QMARK &&
expr->AsOp()->gtOp1->gtOper == GT_LCL_VAR)
{
topQmark = expr->AsOp()->gtOp2;
if (ppDst != nullptr)
{
*ppDst = expr->AsOp()->gtOp1;
}
}
return topQmark;
}
/*********************************************************************************
*
* For a castclass helper call,
* Importer creates the following tree:
* tmp = (op1 == null) ? op1 : ((*op1 == (cse = op2, cse)) ? op1 : helper());
*
* This method splits the qmark expression created by the importer into the
* following blocks: (block, asg, cond1, cond2, helper, remainder)
* Notice that op1 is the result for both the conditions. So we coalesce these
* assignments into a single block instead of two blocks resulting a nested diamond.
*
* +---------->-----------+
* | | |
* ^ ^ v
* | | |
* block-->asg-->cond1--+-->cond2--+-->helper--+-->remainder
*
* We expect to achieve the following codegen:
* mov rsi, rdx tmp = op1 // asgBlock
* test rsi, rsi goto skip if tmp == null ? // cond1Block
* je SKIP
* mov rcx, 0x76543210 cns = op2 // cond2Block
* cmp qword ptr [rsi], rcx goto skip if *tmp == op2
* je SKIP
* call CORINFO_HELP_CHKCASTCLASS_SPECIAL tmp = helper(cns, tmp) // helperBlock
* mov rsi, rax
* SKIP: // remainderBlock
* tmp has the result.
*
*/
void Compiler::fgExpandQmarkForCastInstOf(BasicBlock* block, Statement* stmt)
{
#ifdef DEBUG
if (verbose)
{
printf("\nExpanding CastInstOf qmark in " FMT_BB " (before)\n", block->bbNum);
fgDispBasicBlocks(block, block, true);
}
#endif // DEBUG
GenTree* expr = stmt->GetRootNode();
GenTree* dst = nullptr;
GenTree* qmark = fgGetTopLevelQmark(expr, &dst);
noway_assert(dst != nullptr);
assert(qmark->gtFlags & GTF_QMARK_CAST_INSTOF);
// Get cond, true, false exprs for the qmark.
GenTree* condExpr = qmark->gtGetOp1();
GenTree* trueExpr = qmark->gtGetOp2()->AsColon()->ThenNode();
GenTree* falseExpr = qmark->gtGetOp2()->AsColon()->ElseNode();
// Get cond, true, false exprs for the nested qmark.
GenTree* nestedQmark = falseExpr;
GenTree* cond2Expr;
GenTree* true2Expr;
GenTree* false2Expr;
if (nestedQmark->gtOper == GT_QMARK)
{
cond2Expr = nestedQmark->gtGetOp1();
true2Expr = nestedQmark->gtGetOp2()->AsColon()->ThenNode();
false2Expr = nestedQmark->gtGetOp2()->AsColon()->ElseNode();
}
else
{
// This is a rare case that arises when we are doing minopts and encounter isinst of null
// gtFoldExpr was still is able to optimize away part of the tree (but not all).
// That means it does not match our pattern.
// Rather than write code to handle this case, just fake up some nodes to make it match the common
// case. Synthesize a comparison that is always true, and for the result-on-true, use the
// entire subtree we expected to be the nested question op.
cond2Expr = gtNewOperNode(GT_EQ, TYP_INT, gtNewIconNode(0, TYP_I_IMPL), gtNewIconNode(0, TYP_I_IMPL));
true2Expr = nestedQmark;
false2Expr = gtNewIconNode(0, TYP_I_IMPL);
}
assert(false2Expr->OperGet() == trueExpr->OperGet());
// Create the chain of blocks. See method header comment.
// The order of blocks after this is the following:
// block ... asgBlock ... cond1Block ... cond2Block ... helperBlock ... remainderBlock
//
// We need to remember flags that exist on 'block' that we want to propagate to 'remainderBlock',
// if they are going to be cleared by fgSplitBlockAfterStatement(). We currently only do this only
// for the GC safe point bit, the logic being that if 'block' was marked gcsafe, then surely
// remainderBlock will still be GC safe.
BasicBlockFlags propagateFlags = block->bbFlags & BBF_GC_SAFE_POINT;
BasicBlock* remainderBlock = fgSplitBlockAfterStatement(block, stmt);
fgRemoveRefPred(remainderBlock, block); // We're going to put more blocks between block and remainderBlock.
BasicBlock* helperBlock = fgNewBBafter(BBJ_NONE, block, true);
BasicBlock* cond2Block = fgNewBBafter(BBJ_COND, block, true);
BasicBlock* cond1Block = fgNewBBafter(BBJ_COND, block, true);
BasicBlock* asgBlock = fgNewBBafter(BBJ_NONE, block, true);
remainderBlock->bbFlags |= propagateFlags;
// These blocks are only internal if 'block' is (but they've been set as internal by fgNewBBafter).
// If they're not internal, mark them as imported to avoid asserts about un-imported blocks.
if ((block->bbFlags & BBF_INTERNAL) == 0)
{
helperBlock->bbFlags &= ~BBF_INTERNAL;
cond2Block->bbFlags &= ~BBF_INTERNAL;
cond1Block->bbFlags &= ~BBF_INTERNAL;
asgBlock->bbFlags &= ~BBF_INTERNAL;
helperBlock->bbFlags |= BBF_IMPORTED;
cond2Block->bbFlags |= BBF_IMPORTED;
cond1Block->bbFlags |= BBF_IMPORTED;
asgBlock->bbFlags |= BBF_IMPORTED;
}
// Chain the flow correctly.
fgAddRefPred(asgBlock, block);
fgAddRefPred(cond1Block, asgBlock);
fgAddRefPred(cond2Block, cond1Block);
fgAddRefPred(helperBlock, cond2Block);
fgAddRefPred(remainderBlock, helperBlock);
fgAddRefPred(remainderBlock, cond1Block);
fgAddRefPred(remainderBlock, cond2Block);
cond1Block->bbJumpDest = remainderBlock;
cond2Block->bbJumpDest = remainderBlock;
// Set the weights; some are guesses.
asgBlock->inheritWeight(block);
cond1Block->inheritWeight(block);
cond2Block->inheritWeightPercentage(cond1Block, 50);
helperBlock->inheritWeightPercentage(cond2Block, 50);
// Append cond1 as JTRUE to cond1Block
GenTree* jmpTree = gtNewOperNode(GT_JTRUE, TYP_VOID, condExpr);
Statement* jmpStmt = fgNewStmtFromTree(jmpTree, stmt->GetDebugInfo());
fgInsertStmtAtEnd(cond1Block, jmpStmt);
// Append cond2 as JTRUE to cond2Block
jmpTree = gtNewOperNode(GT_JTRUE, TYP_VOID, cond2Expr);
jmpStmt = fgNewStmtFromTree(jmpTree, stmt->GetDebugInfo());
fgInsertStmtAtEnd(cond2Block, jmpStmt);
// AsgBlock should get tmp = op1 assignment.
trueExpr = gtNewTempAssign(dst->AsLclVarCommon()->GetLclNum(), trueExpr);
Statement* trueStmt = fgNewStmtFromTree(trueExpr, stmt->GetDebugInfo());
fgInsertStmtAtEnd(asgBlock, trueStmt);
// Since we are adding helper in the JTRUE false path, reverse the cond2 and add the helper.
gtReverseCond(cond2Expr);
GenTree* helperExpr = gtNewTempAssign(dst->AsLclVarCommon()->GetLclNum(), true2Expr);
Statement* helperStmt = fgNewStmtFromTree(helperExpr, stmt->GetDebugInfo());
fgInsertStmtAtEnd(helperBlock, helperStmt);
// Finally remove the nested qmark stmt.
fgRemoveStmt(block, stmt);
if (true2Expr->OperIs(GT_CALL) && (true2Expr->AsCall()->gtCallMoreFlags & GTF_CALL_M_DOES_NOT_RETURN))
{
fgConvertBBToThrowBB(helperBlock);
}
#ifdef DEBUG
if (verbose)
{
printf("\nExpanding CastInstOf qmark in " FMT_BB " (after)\n", block->bbNum);
fgDispBasicBlocks(block, remainderBlock, true);
}
#endif // DEBUG
}
/*****************************************************************************
*
* Expand a statement with a top level qmark node. There are three cases, based
* on whether the qmark has both "true" and "false" arms, or just one of them.
*
* S0;
* C ? T : F;
* S1;
*
* Generates ===>
*
* bbj_always
* +---->------+
* false | |
* S0 -->-- ~C -->-- T F -->-- S1
* | |
* +--->--------+
* bbj_cond(true)
*
* -----------------------------------------
*
* S0;
* C ? T : NOP;
* S1;
*
* Generates ===>
*
* false
* S0 -->-- ~C -->-- T -->-- S1
* | |
* +-->-------------+
* bbj_cond(true)
*
* -----------------------------------------
*
* S0;
* C ? NOP : F;
* S1;
*
* Generates ===>
*
* false
* S0 -->-- C -->-- F -->-- S1
* | |
* +-->------------+
* bbj_cond(true)
*
* If the qmark assigns to a variable, then create tmps for "then"
* and "else" results and assign the temp to the variable as a writeback step.
*/
void Compiler::fgExpandQmarkStmt(BasicBlock* block, Statement* stmt)
{
GenTree* expr = stmt->GetRootNode();
// Retrieve the Qmark node to be expanded.
GenTree* dst = nullptr;
GenTree* qmark = fgGetTopLevelQmark(expr, &dst);
if (qmark == nullptr)
{
return;
}
if (qmark->gtFlags & GTF_QMARK_CAST_INSTOF)
{
fgExpandQmarkForCastInstOf(block, stmt);
return;
}
#ifdef DEBUG
if (verbose)
{
printf("\nExpanding top-level qmark in " FMT_BB " (before)\n", block->bbNum);
fgDispBasicBlocks(block, block, true);
}
#endif // DEBUG
// Retrieve the operands.
GenTree* condExpr = qmark->gtGetOp1();
GenTree* trueExpr = qmark->gtGetOp2()->AsColon()->ThenNode();
GenTree* falseExpr = qmark->gtGetOp2()->AsColon()->ElseNode();
assert(!varTypeIsFloating(condExpr->TypeGet()));
bool hasTrueExpr = (trueExpr->OperGet() != GT_NOP);
bool hasFalseExpr = (falseExpr->OperGet() != GT_NOP);
assert(hasTrueExpr || hasFalseExpr); // We expect to have at least one arm of the qmark!
// Create remainder, cond and "else" blocks. After this, the blocks are in this order:
// block ... condBlock ... elseBlock ... remainderBlock
//
// We need to remember flags that exist on 'block' that we want to propagate to 'remainderBlock',
// if they are going to be cleared by fgSplitBlockAfterStatement(). We currently only do this only
// for the GC safe point bit, the logic being that if 'block' was marked gcsafe, then surely
// remainderBlock will still be GC safe.
BasicBlockFlags propagateFlags = block->bbFlags & BBF_GC_SAFE_POINT;
BasicBlock* remainderBlock = fgSplitBlockAfterStatement(block, stmt);
fgRemoveRefPred(remainderBlock, block); // We're going to put more blocks between block and remainderBlock.
BasicBlock* condBlock = fgNewBBafter(BBJ_COND, block, true);
BasicBlock* elseBlock = fgNewBBafter(BBJ_NONE, condBlock, true);
// These blocks are only internal if 'block' is (but they've been set as internal by fgNewBBafter).
// If they're not internal, mark them as imported to avoid asserts about un-imported blocks.
if ((block->bbFlags & BBF_INTERNAL) == 0)
{
condBlock->bbFlags &= ~BBF_INTERNAL;
elseBlock->bbFlags &= ~BBF_INTERNAL;
condBlock->bbFlags |= BBF_IMPORTED;
elseBlock->bbFlags |= BBF_IMPORTED;
}
remainderBlock->bbFlags |= propagateFlags;
condBlock->inheritWeight(block);
fgAddRefPred(condBlock, block);
fgAddRefPred(elseBlock, condBlock);
fgAddRefPred(remainderBlock, elseBlock);
BasicBlock* thenBlock = nullptr;
if (hasTrueExpr && hasFalseExpr)
{
// bbj_always
// +---->------+
// false | |
// S0 -->-- ~C -->-- T F -->-- S1
// | |
// +--->--------+
// bbj_cond(true)
//
gtReverseCond(condExpr);
condBlock->bbJumpDest = elseBlock;
thenBlock = fgNewBBafter(BBJ_ALWAYS, condBlock, true);
thenBlock->bbJumpDest = remainderBlock;
if ((block->bbFlags & BBF_INTERNAL) == 0)
{
thenBlock->bbFlags &= ~BBF_INTERNAL;
thenBlock->bbFlags |= BBF_IMPORTED;
}
fgAddRefPred(thenBlock, condBlock);
fgAddRefPred(remainderBlock, thenBlock);
thenBlock->inheritWeightPercentage(condBlock, 50);
elseBlock->inheritWeightPercentage(condBlock, 50);
}
else if (hasTrueExpr)
{
// false
// S0 -->-- ~C -->-- T -->-- S1
// | |
// +-->-------------+
// bbj_cond(true)
//
gtReverseCond(condExpr);
condBlock->bbJumpDest = remainderBlock;
fgAddRefPred(remainderBlock, condBlock);
// Since we have no false expr, use the one we'd already created.
thenBlock = elseBlock;
elseBlock = nullptr;
thenBlock->inheritWeightPercentage(condBlock, 50);
}
else if (hasFalseExpr)
{
// false
// S0 -->-- C -->-- F -->-- S1
// | |
// +-->------------+
// bbj_cond(true)
//
condBlock->bbJumpDest = remainderBlock;
fgAddRefPred(remainderBlock, condBlock);
elseBlock->inheritWeightPercentage(condBlock, 50);
}
GenTree* jmpTree = gtNewOperNode(GT_JTRUE, TYP_VOID, qmark->gtGetOp1());
Statement* jmpStmt = fgNewStmtFromTree(jmpTree, stmt->GetDebugInfo());
fgInsertStmtAtEnd(condBlock, jmpStmt);
// Remove the original qmark statement.
fgRemoveStmt(block, stmt);
// Since we have top level qmarks, we either have a dst for it in which case
// we need to create tmps for true and falseExprs, else just don't bother
// assigning.
unsigned lclNum = BAD_VAR_NUM;
if (dst != nullptr)
{
assert(dst->gtOper == GT_LCL_VAR);
lclNum = dst->AsLclVar()->GetLclNum();
}
else
{
assert(qmark->TypeGet() == TYP_VOID);
}
if (hasTrueExpr)
{
if (dst != nullptr)
{
trueExpr = gtNewTempAssign(lclNum, trueExpr);
}
Statement* trueStmt = fgNewStmtFromTree(trueExpr, stmt->GetDebugInfo());
fgInsertStmtAtEnd(thenBlock, trueStmt);
}
// Assign the falseExpr into the dst or tmp, insert in elseBlock
if (hasFalseExpr)
{
if (dst != nullptr)
{
falseExpr = gtNewTempAssign(lclNum, falseExpr);
}
Statement* falseStmt = fgNewStmtFromTree(falseExpr, stmt->GetDebugInfo());
fgInsertStmtAtEnd(elseBlock, falseStmt);
}
#ifdef DEBUG
if (verbose)
{
printf("\nExpanding top-level qmark in " FMT_BB " (after)\n", block->bbNum);
fgDispBasicBlocks(block, remainderBlock, true);
}
#endif // DEBUG
}
/*****************************************************************************
*
* Expand GT_QMARK nodes from the flow graph into basic blocks.
*
*/
void Compiler::fgExpandQmarkNodes()
{
if (compQmarkUsed)
{
for (BasicBlock* const block : Blocks())
{
for (Statement* const stmt : block->Statements())
{
GenTree* expr = stmt->GetRootNode();
#ifdef DEBUG
fgPreExpandQmarkChecks(expr);
#endif
fgExpandQmarkStmt(block, stmt);
}
}
#ifdef DEBUG
fgPostExpandQmarkChecks();
#endif
}
compQmarkRationalized = true;
}
#ifdef DEBUG
/*****************************************************************************
*
* Make sure we don't have any more GT_QMARK nodes.
*
*/
void Compiler::fgPostExpandQmarkChecks()
{
for (BasicBlock* const block : Blocks())
{
for (Statement* const stmt : block->Statements())
{
GenTree* expr = stmt->GetRootNode();
fgWalkTreePre(&expr, Compiler::fgAssertNoQmark, nullptr);
}
}
}
#endif
/*****************************************************************************
*
* Promoting struct locals
*/
void Compiler::fgPromoteStructs()
{
#ifdef DEBUG
if (verbose)
{
printf("*************** In fgPromoteStructs()\n");
}
#endif // DEBUG
if (!opts.OptEnabled(CLFLG_STRUCTPROMOTE))
{
JITDUMP(" promotion opt flag not enabled\n");
return;
}
if (fgNoStructPromotion)
{
JITDUMP(" promotion disabled by JitNoStructPromotion\n");
return;
}
#if 0
// The code in this #if has been useful in debugging struct promotion issues, by
// enabling selective enablement of the struct promotion optimization according to
// method hash.
#ifdef DEBUG
unsigned methHash = info.compMethodHash();
char* lostr = getenv("structpromohashlo");
unsigned methHashLo = 0;
if (lostr != NULL)
{
sscanf_s(lostr, "%x", &methHashLo);
}
char* histr = getenv("structpromohashhi");
unsigned methHashHi = UINT32_MAX;
if (histr != NULL)
{
sscanf_s(histr, "%x", &methHashHi);
}
if (methHash < methHashLo || methHash > methHashHi)
{
return;
}
else
{
printf("Promoting structs for method %s, hash = 0x%x.\n",
info.compFullName, info.compMethodHash());
printf(""); // in our logic this causes a flush
}
#endif // DEBUG
#endif // 0
if (info.compIsVarArgs)
{
JITDUMP(" promotion disabled because of varargs\n");
return;
}
#ifdef DEBUG
if (verbose)
{
printf("\nlvaTable before fgPromoteStructs\n");
lvaTableDump();
}
#endif // DEBUG
// The lvaTable might grow as we grab temps. Make a local copy here.
unsigned startLvaCount = lvaCount;
//
// Loop through the original lvaTable. Looking for struct locals to be promoted.
//
lvaStructPromotionInfo structPromotionInfo;
bool tooManyLocalsReported = false;
// Clear the structPromotionHelper, since it is used during inlining, at which point it
// may be conservative about looking up SIMD info.
// We don't want to preserve those conservative decisions for the actual struct promotion.
structPromotionHelper->Clear();
for (unsigned lclNum = 0; lclNum < startLvaCount; lclNum++)
{
// Whether this var got promoted
bool promotedVar = false;
LclVarDsc* varDsc = lvaGetDesc(lclNum);
// If we have marked this as lvUsedInSIMDIntrinsic, then we do not want to promote
// its fields. Instead, we will attempt to enregister the entire struct.
if (varDsc->lvIsSIMDType() && (varDsc->lvIsUsedInSIMDIntrinsic() || isOpaqueSIMDLclVar(varDsc)))
{
varDsc->lvRegStruct = true;
}
// Don't promote if we have reached the tracking limit.
else if (lvaHaveManyLocals())
{
// Print the message first time when we detected this condition
if (!tooManyLocalsReported)
{
JITDUMP("Stopped promoting struct fields, due to too many locals.\n");
}
tooManyLocalsReported = true;
}
else if (varTypeIsStruct(varDsc))
{
assert(structPromotionHelper != nullptr);
promotedVar = structPromotionHelper->TryPromoteStructVar(lclNum);
}
if (!promotedVar && varDsc->lvIsSIMDType() && !varDsc->lvFieldAccessed)
{
// Even if we have not used this in a SIMD intrinsic, if it is not being promoted,
// we will treat it as a reg struct.
varDsc->lvRegStruct = true;
}
}
#ifdef DEBUG
if (verbose)
{
printf("\nlvaTable after fgPromoteStructs\n");
lvaTableDump();
}
#endif // DEBUG
}
void Compiler::fgMorphStructField(GenTree* tree, GenTree* parent)
{
noway_assert(tree->OperGet() == GT_FIELD);
GenTreeField* field = tree->AsField();
GenTree* objRef = field->GetFldObj();
GenTree* obj = ((objRef != nullptr) && (objRef->gtOper == GT_ADDR)) ? objRef->AsOp()->gtOp1 : nullptr;
noway_assert((tree->gtFlags & GTF_GLOB_REF) || ((obj != nullptr) && (obj->gtOper == GT_LCL_VAR)));
/* Is this an instance data member? */
if ((obj != nullptr) && (obj->gtOper == GT_LCL_VAR))
{
unsigned lclNum = obj->AsLclVarCommon()->GetLclNum();
const LclVarDsc* varDsc = lvaGetDesc(lclNum);
if (varTypeIsStruct(obj))
{
if (varDsc->lvPromoted)
{
// Promoted struct
unsigned fldOffset = field->gtFldOffset;
unsigned fieldLclIndex = lvaGetFieldLocal(varDsc, fldOffset);
if (fieldLclIndex == BAD_VAR_NUM)
{
// Access a promoted struct's field with an offset that doesn't correspond to any field.
// It can happen if the struct was cast to another struct with different offsets.
return;
}
const LclVarDsc* fieldDsc = lvaGetDesc(fieldLclIndex);
var_types fieldType = fieldDsc->TypeGet();
assert(fieldType != TYP_STRUCT); // promoted LCL_VAR can't have a struct type.
if (tree->TypeGet() != fieldType)
{
if (tree->TypeGet() != TYP_STRUCT)
{
// This is going to be an incorrect instruction promotion.
// For example when we try to read int as long.
return;
}
if (field->gtFldHnd != fieldDsc->lvFieldHnd)
{
CORINFO_CLASS_HANDLE fieldTreeClass = nullptr, fieldDscClass = nullptr;
CorInfoType fieldTreeType = info.compCompHnd->getFieldType(field->gtFldHnd, &fieldTreeClass);
CorInfoType fieldDscType = info.compCompHnd->getFieldType(fieldDsc->lvFieldHnd, &fieldDscClass);
if (fieldTreeType != fieldDscType || fieldTreeClass != fieldDscClass)
{
// Access the promoted field with a different class handle, can't check that types match.
return;
}
// Access the promoted field as a field of a non-promoted struct with the same class handle.
}
else
{
// As we already checked this above, we must have a tree with a TYP_STRUCT type
//
assert(tree->TypeGet() == TYP_STRUCT);
// The field tree accesses it as a struct, but the promoted LCL_VAR field
// says that it has another type. This happens when struct promotion unwraps
// a single field struct to get to its ultimate type.
//
// Note that currently, we cannot have a promoted LCL_VAR field with a struct type.
//
// This mismatch in types can lead to problems for some parent node type like GT_RETURN.
// So we check the parent node and only allow this optimization when we have
// a GT_ADDR or a GT_ASG.
//
// Note that for a GT_ASG we have to do some additional work,
// see below after the SetOper(GT_LCL_VAR)
//
if (!parent->OperIs(GT_ADDR, GT_ASG))
{
// Don't transform other operations such as GT_RETURN
//
return;
}
#ifdef DEBUG
// This is an additional DEBUG-only sanity check
//
assert(structPromotionHelper != nullptr);
structPromotionHelper->CheckRetypedAsScalar(field->gtFldHnd, fieldType);
#endif // DEBUG
}
}
tree->SetOper(GT_LCL_VAR);
tree->AsLclVarCommon()->SetLclNum(fieldLclIndex);
tree->gtType = fieldType;
tree->gtFlags &= GTF_NODE_MASK; // Note: that clears all flags except `GTF_COLON_COND`.
if (parent->gtOper == GT_ASG)
{
// If we are changing the left side of an assignment, we need to set
// these two flags:
//
if (parent->AsOp()->gtOp1 == tree)
{
tree->gtFlags |= GTF_VAR_DEF;
tree->gtFlags |= GTF_DONT_CSE;
}
// Promotion of struct containing struct fields where the field
// is a struct with a single pointer sized scalar type field: in
// this case struct promotion uses the type of the underlying
// scalar field as the type of struct field instead of recursively
// promoting. This can lead to a case where we have a block-asgn
// with its RHS replaced with a scalar type. Mark RHS value as
// DONT_CSE so that assertion prop will not do const propagation.
// The reason this is required is that if RHS of a block-asg is a
// constant, then it is interpreted as init-block incorrectly.
//
// TODO - This can also be avoided if we implement recursive struct
// promotion, tracked by #10019.
if (varTypeIsStruct(parent) && parent->AsOp()->gtOp2 == tree && !varTypeIsStruct(tree))
{
tree->gtFlags |= GTF_DONT_CSE;
}
}
#ifdef DEBUG
if (verbose)
{
printf("Replacing the field in promoted struct with local var V%02u\n", fieldLclIndex);
}
#endif // DEBUG
}
}
else
{
// Normed struct
// A "normed struct" is a struct that the VM tells us is a basic type. This can only happen if
// the struct contains a single element, and that element is 4 bytes (on x64 it can also be 8
// bytes). Normally, the type of the local var and the type of GT_FIELD are equivalent. However,
// there is one extremely rare case where that won't be true. An enum type is a special value type
// that contains exactly one element of a primitive integer type (that, for CLS programs is named
// "value__"). The VM tells us that a local var of that enum type is the primitive type of the
// enum's single field. It turns out that it is legal for IL to access this field using ldflda or
// ldfld. For example:
//
// .class public auto ansi sealed mynamespace.e_t extends [mscorlib]System.Enum
// {
// .field public specialname rtspecialname int16 value__
// .field public static literal valuetype mynamespace.e_t one = int16(0x0000)
// }
// .method public hidebysig static void Main() cil managed
// {
// .locals init (valuetype mynamespace.e_t V_0)
// ...
// ldloca.s V_0
// ldflda int16 mynamespace.e_t::value__
// ...
// }
//
// Normally, compilers will not generate the ldflda, since it is superfluous.
//
// In the example, the lclVar is short, but the JIT promotes all trees using this local to the
// "actual type", that is, INT. But the GT_FIELD is still SHORT. So, in the case of a type
// mismatch like this, don't do this morphing. The local var may end up getting marked as
// address taken, and the appropriate SHORT load will be done from memory in that case.
if (tree->TypeGet() == obj->TypeGet())
{
tree->ChangeOper(GT_LCL_VAR);
tree->AsLclVarCommon()->SetLclNum(lclNum);
tree->gtFlags &= GTF_NODE_MASK;
if ((parent->gtOper == GT_ASG) && (parent->AsOp()->gtOp1 == tree))
{
tree->gtFlags |= GTF_VAR_DEF;
tree->gtFlags |= GTF_DONT_CSE;
}
#ifdef DEBUG
if (verbose)
{
printf("Replacing the field in normed struct with local var V%02u\n", lclNum);
}
#endif // DEBUG
}
}
}
}
void Compiler::fgMorphLocalField(GenTree* tree, GenTree* parent)
{
noway_assert(tree->OperGet() == GT_LCL_FLD);
unsigned lclNum = tree->AsLclFld()->GetLclNum();
LclVarDsc* varDsc = lvaGetDesc(lclNum);
if (varTypeIsStruct(varDsc))
{
if (varDsc->lvPromoted)
{
// Promoted struct
unsigned fldOffset = tree->AsLclFld()->GetLclOffs();
unsigned fieldLclIndex = 0;
LclVarDsc* fldVarDsc = nullptr;
if (fldOffset != BAD_VAR_NUM)
{
fieldLclIndex = lvaGetFieldLocal(varDsc, fldOffset);
noway_assert(fieldLclIndex != BAD_VAR_NUM);
fldVarDsc = lvaGetDesc(fieldLclIndex);
}
var_types treeType = tree->TypeGet();
var_types fieldType = fldVarDsc->TypeGet();
if (fldOffset != BAD_VAR_NUM &&
((genTypeSize(fieldType) == genTypeSize(treeType)) || (varDsc->lvFieldCnt == 1)))
{
// There is an existing sub-field we can use.
tree->AsLclFld()->SetLclNum(fieldLclIndex);
// The field must be an enregisterable type; otherwise it would not be a promoted field.
// The tree type may not match, e.g. for return types that have been morphed, but both
// must be enregisterable types.
assert(varTypeIsEnregisterable(treeType) && varTypeIsEnregisterable(fieldType));
tree->ChangeOper(GT_LCL_VAR);
assert(tree->AsLclVarCommon()->GetLclNum() == fieldLclIndex);
tree->gtType = fldVarDsc->TypeGet();
if ((parent->gtOper == GT_ASG) && (parent->AsOp()->gtOp1 == tree))
{
tree->gtFlags |= GTF_VAR_DEF;
tree->gtFlags |= GTF_DONT_CSE;
}
JITDUMP("Replacing the GT_LCL_FLD in promoted struct with local var V%02u\n", fieldLclIndex);
}
else
{
// There is no existing field that has all the parts that we need
// So we must ensure that the struct lives in memory.
lvaSetVarDoNotEnregister(lclNum DEBUGARG(DoNotEnregisterReason::LocalField));
#ifdef DEBUG
// We can't convert this guy to a float because he really does have his
// address taken..
varDsc->lvKeepType = 1;
#endif // DEBUG
}
}
else if (varTypeIsSIMD(varDsc) && (genTypeSize(tree->TypeGet()) == genTypeSize(varDsc)))
{
assert(tree->AsLclFld()->GetLclOffs() == 0);
tree->gtType = varDsc->TypeGet();
tree->ChangeOper(GT_LCL_VAR);
JITDUMP("Replacing GT_LCL_FLD of struct with local var V%02u\n", lclNum);
}
}
}
//------------------------------------------------------------------------
// fgResetImplicitByRefRefCount: Clear the ref count field of all implicit byrefs
void Compiler::fgResetImplicitByRefRefCount()
{
#if (defined(TARGET_AMD64) && !defined(UNIX_AMD64_ABI)) || defined(TARGET_ARM64)
#ifdef DEBUG
if (verbose)
{
printf("\n*************** In fgResetImplicitByRefRefCount()\n");
}
#endif // DEBUG
for (unsigned lclNum = 0; lclNum < info.compArgsCount; ++lclNum)
{
LclVarDsc* varDsc = lvaGetDesc(lclNum);
if (varDsc->lvIsImplicitByRef)
{
// Clear the ref count field; fgMarkAddressTakenLocals will increment it per
// appearance of implicit-by-ref param so that call arg morphing can do an
// optimization for single-use implicit-by-ref params whose single use is as
// an outgoing call argument.
varDsc->setLvRefCnt(0, RCS_EARLY);
varDsc->setLvRefCntWtd(0, RCS_EARLY);
}
}
#endif // (TARGET_AMD64 && !UNIX_AMD64_ABI) || TARGET_ARM64
}
//------------------------------------------------------------------------
// fgRetypeImplicitByRefArgs: Update the types on implicit byref parameters' `LclVarDsc`s (from
// struct to pointer). Also choose (based on address-exposed analysis)
// which struct promotions of implicit byrefs to keep or discard.
// For those which are kept, insert the appropriate initialization code.
// For those which are to be discarded, annotate the promoted field locals
// so that fgMorphImplicitByRefArgs will know to rewrite their appearances
// using indirections off the pointer parameters.
void Compiler::fgRetypeImplicitByRefArgs()
{
#if (defined(TARGET_AMD64) && !defined(UNIX_AMD64_ABI)) || defined(TARGET_ARM64)
#ifdef DEBUG
if (verbose)
{
printf("\n*************** In fgRetypeImplicitByRefArgs()\n");
}
#endif // DEBUG
for (unsigned lclNum = 0; lclNum < info.compArgsCount; lclNum++)
{
LclVarDsc* varDsc = lvaGetDesc(lclNum);
if (lvaIsImplicitByRefLocal(lclNum))
{
unsigned size;
if (varDsc->lvSize() > REGSIZE_BYTES)
{
size = varDsc->lvSize();
}
else
{
CORINFO_CLASS_HANDLE typeHnd = varDsc->GetStructHnd();
size = info.compCompHnd->getClassSize(typeHnd);
}
if (varDsc->lvPromoted)
{
// This implicit-by-ref was promoted; create a new temp to represent the
// promoted struct before rewriting this parameter as a pointer.
unsigned newLclNum = lvaGrabTemp(false DEBUGARG("Promoted implicit byref"));
lvaSetStruct(newLclNum, lvaGetStruct(lclNum), true);
if (info.compIsVarArgs)
{
lvaSetStructUsedAsVarArg(newLclNum);
}
// Update varDsc since lvaGrabTemp might have re-allocated the var dsc array.
varDsc = lvaGetDesc(lclNum);
// Copy the struct promotion annotations to the new temp.
LclVarDsc* newVarDsc = lvaGetDesc(newLclNum);
newVarDsc->lvPromoted = true;
newVarDsc->lvFieldLclStart = varDsc->lvFieldLclStart;
newVarDsc->lvFieldCnt = varDsc->lvFieldCnt;
newVarDsc->lvContainsHoles = varDsc->lvContainsHoles;
newVarDsc->lvCustomLayout = varDsc->lvCustomLayout;
#ifdef DEBUG
newVarDsc->lvKeepType = true;
#endif // DEBUG
// Propagate address-taken-ness and do-not-enregister-ness.
newVarDsc->SetAddressExposed(varDsc->IsAddressExposed() DEBUGARG(varDsc->GetAddrExposedReason()));
newVarDsc->lvDoNotEnregister = varDsc->lvDoNotEnregister;
newVarDsc->lvLiveInOutOfHndlr = varDsc->lvLiveInOutOfHndlr;
newVarDsc->lvSingleDef = varDsc->lvSingleDef;
newVarDsc->lvSingleDefRegCandidate = varDsc->lvSingleDefRegCandidate;
newVarDsc->lvSpillAtSingleDef = varDsc->lvSpillAtSingleDef;
#ifdef DEBUG
newVarDsc->SetDoNotEnregReason(varDsc->GetDoNotEnregReason());
#endif // DEBUG
// If the promotion is dependent, the promoted temp would just be committed
// to memory anyway, so we'll rewrite its appearances to be indirections
// through the pointer parameter, the same as we'd do for this
// parameter if it weren't promoted at all (otherwise the initialization
// of the new temp would just be a needless memcpy at method entry).
//
// Otherwise, see how many appearances there are. We keep two early ref counts: total
// number of references to the struct or some field, and how many of these are
// arguments to calls. We undo promotion unless we see enough non-call uses.
//
const unsigned totalAppearances = varDsc->lvRefCnt(RCS_EARLY);
const unsigned callAppearances = (unsigned)varDsc->lvRefCntWtd(RCS_EARLY);
assert(totalAppearances >= callAppearances);
const unsigned nonCallAppearances = totalAppearances - callAppearances;
bool undoPromotion = ((lvaGetPromotionType(newVarDsc) == PROMOTION_TYPE_DEPENDENT) ||
(nonCallAppearances <= varDsc->lvFieldCnt));
#ifdef DEBUG
// Above is a profitability heurisic; either value of
// undoPromotion should lead to correct code. So,
// under stress, make different decisions at times.
if (compStressCompile(STRESS_BYREF_PROMOTION, 25))
{
undoPromotion = !undoPromotion;
JITDUMP("Stress -- changing byref undo promotion for V%02u to %s undo\n", lclNum,
undoPromotion ? "" : "NOT");
}
#endif // DEBUG
JITDUMP("%s promotion of implicit by-ref V%02u: %s total: %u non-call: %u fields: %u\n",
undoPromotion ? "Undoing" : "Keeping", lclNum,
(lvaGetPromotionType(newVarDsc) == PROMOTION_TYPE_DEPENDENT) ? "dependent;" : "",
totalAppearances, nonCallAppearances, varDsc->lvFieldCnt);
if (!undoPromotion)
{
// Insert IR that initializes the temp from the parameter.
// LHS is a simple reference to the temp.
fgEnsureFirstBBisScratch();
GenTree* lhs = gtNewLclvNode(newLclNum, varDsc->lvType);
// RHS is an indirection (using GT_OBJ) off the parameter.
GenTree* addr = gtNewLclvNode(lclNum, TYP_BYREF);
GenTree* rhs = new (this, GT_BLK) GenTreeBlk(GT_BLK, TYP_STRUCT, addr, typGetBlkLayout(size));
GenTree* assign = gtNewAssignNode(lhs, rhs);
fgNewStmtAtBeg(fgFirstBB, assign);
}
// Update the locals corresponding to the promoted fields.
unsigned fieldLclStart = varDsc->lvFieldLclStart;
unsigned fieldCount = varDsc->lvFieldCnt;
unsigned fieldLclStop = fieldLclStart + fieldCount;
for (unsigned fieldLclNum = fieldLclStart; fieldLclNum < fieldLclStop; ++fieldLclNum)
{
LclVarDsc* fieldVarDsc = lvaGetDesc(fieldLclNum);
if (undoPromotion)
{
// Leave lvParentLcl pointing to the parameter so that fgMorphImplicitByRefArgs
// will know to rewrite appearances of this local.
assert(fieldVarDsc->lvParentLcl == lclNum);
}
else
{
// Set the new parent.
fieldVarDsc->lvParentLcl = newLclNum;
}
fieldVarDsc->lvIsParam = false;
// The fields shouldn't inherit any register preferences from
// the parameter which is really a pointer to the struct.
fieldVarDsc->lvIsRegArg = false;
fieldVarDsc->lvIsMultiRegArg = false;
fieldVarDsc->SetArgReg(REG_NA);
#if FEATURE_MULTIREG_ARGS
fieldVarDsc->SetOtherArgReg(REG_NA);
#endif
}
// Hijack lvFieldLclStart to record the new temp number.
// It will get fixed up in fgMarkDemotedImplicitByRefArgs.
varDsc->lvFieldLclStart = newLclNum;
// Go ahead and clear lvFieldCnt -- either we're promoting
// a replacement temp or we're not promoting this arg, and
// in either case the parameter is now a pointer that doesn't
// have these fields.
varDsc->lvFieldCnt = 0;
// Hijack lvPromoted to communicate to fgMorphImplicitByRefArgs
// whether references to the struct should be rewritten as
// indirections off the pointer (not promoted) or references
// to the new struct local (promoted).
varDsc->lvPromoted = !undoPromotion;
}
else
{
// The "undo promotion" path above clears lvPromoted for args that struct
// promotion wanted to promote but that aren't considered profitable to
// rewrite. It hijacks lvFieldLclStart to communicate to
// fgMarkDemotedImplicitByRefArgs that it needs to clean up annotations left
// on such args for fgMorphImplicitByRefArgs to consult in the interim.
// Here we have an arg that was simply never promoted, so make sure it doesn't
// have nonzero lvFieldLclStart, since that would confuse fgMorphImplicitByRefArgs
// and fgMarkDemotedImplicitByRefArgs.
assert(varDsc->lvFieldLclStart == 0);
}
// Since the parameter in this position is really a pointer, its type is TYP_BYREF.
varDsc->lvType = TYP_BYREF;
// Since this previously was a TYP_STRUCT and we have changed it to a TYP_BYREF
// make sure that the following flag is not set as these will force SSA to
// exclude tracking/enregistering these LclVars. (see SsaBuilder::IncludeInSsa)
//
varDsc->lvOverlappingFields = 0; // This flag could have been set, clear it.
// The struct parameter may have had its address taken, but the pointer parameter
// cannot -- any uses of the struct parameter's address are uses of the pointer
// parameter's value, and there's no way for the MSIL to reference the pointer
// parameter's address. So clear the address-taken bit for the parameter.
varDsc->CleanAddressExposed();
varDsc->lvDoNotEnregister = 0;
#ifdef DEBUG
// This should not be converted to a double in stress mode,
// because it is really a pointer
varDsc->lvKeepType = 1;
if (verbose)
{
printf("Changing the lvType for struct parameter V%02d to TYP_BYREF.\n", lclNum);
}
#endif // DEBUG
}
}
#endif // (TARGET_AMD64 && !UNIX_AMD64_ABI) || TARGET_ARM64
}
//------------------------------------------------------------------------
// fgMarkDemotedImplicitByRefArgs: Clear annotations for any implicit byrefs that struct promotion
// asked to promote. Appearances of these have now been rewritten
// (by fgMorphImplicitByRefArgs) using indirections from the pointer
// parameter or references to the promotion temp, as appropriate.
void Compiler::fgMarkDemotedImplicitByRefArgs()
{
JITDUMP("\n*************** In fgMarkDemotedImplicitByRefArgs()\n");
#if (defined(TARGET_AMD64) && !defined(UNIX_AMD64_ABI)) || defined(TARGET_ARM64)
for (unsigned lclNum = 0; lclNum < info.compArgsCount; lclNum++)
{
LclVarDsc* varDsc = lvaGetDesc(lclNum);
if (lvaIsImplicitByRefLocal(lclNum))
{
JITDUMP("Clearing annotation for V%02d\n", lclNum);
if (varDsc->lvPromoted)
{
// The parameter is simply a pointer now, so clear lvPromoted. It was left set
// by fgRetypeImplicitByRefArgs to communicate to fgMorphImplicitByRefArgs that
// appearances of this arg needed to be rewritten to a new promoted struct local.
varDsc->lvPromoted = false;
// Clear the lvFieldLclStart value that was set by fgRetypeImplicitByRefArgs
// to tell fgMorphImplicitByRefArgs which local is the new promoted struct one.
varDsc->lvFieldLclStart = 0;
}
else if (varDsc->lvFieldLclStart != 0)
{
// We created new temps to represent a promoted struct corresponding to this
// parameter, but decided not to go through with the promotion and have
// rewritten all uses as indirections off the pointer parameter.
// We stashed the pointer to the new struct temp in lvFieldLclStart; make
// note of that and clear the annotation.
unsigned structLclNum = varDsc->lvFieldLclStart;
varDsc->lvFieldLclStart = 0;
// The temp struct is now unused; set flags appropriately so that we
// won't allocate space for it on the stack.
LclVarDsc* structVarDsc = lvaGetDesc(structLclNum);
structVarDsc->CleanAddressExposed();
#ifdef DEBUG
structVarDsc->lvUnusedStruct = true;
structVarDsc->lvUndoneStructPromotion = true;
#endif // DEBUG
unsigned fieldLclStart = structVarDsc->lvFieldLclStart;
unsigned fieldCount = structVarDsc->lvFieldCnt;
unsigned fieldLclStop = fieldLclStart + fieldCount;
for (unsigned fieldLclNum = fieldLclStart; fieldLclNum < fieldLclStop; ++fieldLclNum)
{
JITDUMP("Fixing pointer for field V%02d from V%02d to V%02d\n", fieldLclNum, lclNum, structLclNum);
// Fix the pointer to the parent local.
LclVarDsc* fieldVarDsc = lvaGetDesc(fieldLclNum);
assert(fieldVarDsc->lvParentLcl == lclNum);
fieldVarDsc->lvParentLcl = structLclNum;
// The field local is now unused; set flags appropriately so that
// we won't allocate stack space for it.
fieldVarDsc->CleanAddressExposed();
}
}
}
}
#endif // (TARGET_AMD64 && !UNIX_AMD64_ABI) || TARGET_ARM64
}
/*****************************************************************************
*
* Morph irregular parameters
* for x64 and ARM64 this means turning them into byrefs, adding extra indirs.
*/
bool Compiler::fgMorphImplicitByRefArgs(GenTree* tree)
{
#if (!defined(TARGET_AMD64) || defined(UNIX_AMD64_ABI)) && !defined(TARGET_ARM64)
return false;
#else // (TARGET_AMD64 && !UNIX_AMD64_ABI) || TARGET_ARM64
bool changed = false;
// Implicit byref morphing needs to know if the reference to the parameter is a
// child of GT_ADDR or not, so this method looks one level down and does the
// rewrite whenever a child is a reference to an implicit byref parameter.
if (tree->gtOper == GT_ADDR)
{
if (tree->AsOp()->gtOp1->gtOper == GT_LCL_VAR)
{
GenTree* morphedTree = fgMorphImplicitByRefArgs(tree, true);
changed = (morphedTree != nullptr);
assert(!changed || (morphedTree == tree));
}
}
else
{
for (GenTree** pTree : tree->UseEdges())
{
GenTree** pTreeCopy = pTree;
GenTree* childTree = *pTree;
if (childTree->gtOper == GT_LCL_VAR)
{
GenTree* newChildTree = fgMorphImplicitByRefArgs(childTree, false);
if (newChildTree != nullptr)
{
changed = true;
*pTreeCopy = newChildTree;
}
}
}
}
return changed;
#endif // (TARGET_AMD64 && !UNIX_AMD64_ABI) || TARGET_ARM64
}
GenTree* Compiler::fgMorphImplicitByRefArgs(GenTree* tree, bool isAddr)
{
assert((tree->gtOper == GT_LCL_VAR) || ((tree->gtOper == GT_ADDR) && (tree->AsOp()->gtOp1->gtOper == GT_LCL_VAR)));
assert(isAddr == (tree->gtOper == GT_ADDR));
GenTree* lclVarTree = isAddr ? tree->AsOp()->gtOp1 : tree;
unsigned lclNum = lclVarTree->AsLclVarCommon()->GetLclNum();
LclVarDsc* lclVarDsc = lvaGetDesc(lclNum);
CORINFO_FIELD_HANDLE fieldHnd;
unsigned fieldOffset = 0;
var_types fieldRefType = TYP_UNKNOWN;
if (lvaIsImplicitByRefLocal(lclNum))
{
// The SIMD transformation to coalesce contiguous references to SIMD vector fields will
// re-invoke the traversal to mark address-taken locals.
// So, we may encounter a tree that has already been transformed to TYP_BYREF.
// If we do, leave it as-is.
if (!varTypeIsStruct(lclVarTree))
{
assert(lclVarTree->TypeGet() == TYP_BYREF);
return nullptr;
}
else if (lclVarDsc->lvPromoted)
{
// fgRetypeImplicitByRefArgs created a new promoted struct local to represent this
// arg. Rewrite this to refer to the new local.
assert(lclVarDsc->lvFieldLclStart != 0);
lclVarTree->AsLclVarCommon()->SetLclNum(lclVarDsc->lvFieldLclStart);
return tree;
}
fieldHnd = nullptr;
}
else if (lclVarDsc->lvIsStructField && lvaIsImplicitByRefLocal(lclVarDsc->lvParentLcl))
{
// This was a field reference to an implicit-by-reference struct parameter that was
// dependently promoted; update it to a field reference off the pointer.
// Grab the field handle from the struct field lclVar.
fieldHnd = lclVarDsc->lvFieldHnd;
fieldOffset = lclVarDsc->lvFldOffset;
assert(fieldHnd != nullptr);
// Update lclNum/lclVarDsc to refer to the parameter
lclNum = lclVarDsc->lvParentLcl;
lclVarDsc = lvaGetDesc(lclNum);
fieldRefType = lclVarTree->TypeGet();
}
else
{
// We only need to tranform the 'marked' implicit by ref parameters
return nullptr;
}
// This is no longer a def of the lclVar, even if it WAS a def of the struct.
lclVarTree->gtFlags &= ~(GTF_LIVENESS_MASK);
if (isAddr)
{
if (fieldHnd == nullptr)
{
// change &X into just plain X
tree->ReplaceWith(lclVarTree, this);
tree->gtType = TYP_BYREF;
}
else
{
// change &(X.f) [i.e. GT_ADDR of local for promoted arg field]
// into &(X, f) [i.e. GT_ADDR of GT_FIELD off ptr param]
lclVarTree->AsLclVarCommon()->SetLclNum(lclNum);
lclVarTree->gtType = TYP_BYREF;
tree->AsOp()->gtOp1 = gtNewFieldRef(fieldRefType, fieldHnd, lclVarTree, fieldOffset);
}
#ifdef DEBUG
if (verbose)
{
printf("Replacing address of implicit by ref struct parameter with byref:\n");
}
#endif // DEBUG
}
else
{
// Change X into OBJ(X) or FIELD(X, f)
var_types structType = tree->gtType;
tree->gtType = TYP_BYREF;
if (fieldHnd)
{
tree->AsLclVarCommon()->SetLclNum(lclNum);
tree = gtNewFieldRef(fieldRefType, fieldHnd, tree, fieldOffset);
}
else
{
tree = gtNewObjNode(lclVarDsc->GetStructHnd(), tree);
if (structType == TYP_STRUCT)
{
gtSetObjGcInfo(tree->AsObj());
}
}
// TODO-CQ: If the VM ever stops violating the ABI and passing heap references
// we could remove TGTANYWHERE
tree->gtFlags = ((tree->gtFlags & GTF_COMMON_MASK) | GTF_IND_TGTANYWHERE);
#ifdef DEBUG
if (verbose)
{
printf("Replacing value of implicit by ref struct parameter with indir of parameter:\n");
}
#endif // DEBUG
}
#ifdef DEBUG
if (verbose)
{
gtDispTree(tree);
}
#endif // DEBUG
return tree;
}
//------------------------------------------------------------------------
// fgAddFieldSeqForZeroOffset:
// Associate a fieldSeq (with a zero offset) with the GenTree node 'addr'
//
// Arguments:
// addr - A GenTree node
// fieldSeqZero - a fieldSeq (with a zero offset)
//
// Notes:
// Some GenTree nodes have internal fields that record the field sequence.
// If we have one of these nodes: GT_CNS_INT, GT_LCL_FLD
// we can append the field sequence using the gtFieldSeq
// If we have a GT_ADD of a GT_CNS_INT we can use the
// fieldSeq from child node.
// Otherwise we record 'fieldSeqZero' in the GenTree node using
// a Map: GetFieldSeqStore()
// When doing so we take care to preserve any existing zero field sequence
//
void Compiler::fgAddFieldSeqForZeroOffset(GenTree* addr, FieldSeqNode* fieldSeqZero)
{
// We expect 'addr' to be an address at this point.
assert(addr->TypeGet() == TYP_BYREF || addr->TypeGet() == TYP_I_IMPL || addr->TypeGet() == TYP_REF);
// Tunnel through any commas.
const bool commaOnly = true;
addr = addr->gtEffectiveVal(commaOnly);
// We still expect 'addr' to be an address at this point.
assert(addr->TypeGet() == TYP_BYREF || addr->TypeGet() == TYP_I_IMPL || addr->TypeGet() == TYP_REF);
FieldSeqNode* fieldSeqUpdate = fieldSeqZero;
GenTree* fieldSeqNode = addr;
bool fieldSeqRecorded = false;
#ifdef DEBUG
if (verbose)
{
printf("\nfgAddFieldSeqForZeroOffset for");
gtDispAnyFieldSeq(fieldSeqZero);
printf("\naddr (Before)\n");
gtDispNode(addr, nullptr, nullptr, false);
gtDispCommonEndLine(addr);
}
#endif // DEBUG
switch (addr->OperGet())
{
case GT_CNS_INT:
fieldSeqUpdate = GetFieldSeqStore()->Append(addr->AsIntCon()->gtFieldSeq, fieldSeqZero);
addr->AsIntCon()->gtFieldSeq = fieldSeqUpdate;
fieldSeqRecorded = true;
break;
case GT_ADDR:
if (addr->AsOp()->gtOp1->OperGet() == GT_LCL_FLD)
{
fieldSeqNode = addr->AsOp()->gtOp1;
GenTreeLclFld* lclFld = addr->AsOp()->gtOp1->AsLclFld();
fieldSeqUpdate = GetFieldSeqStore()->Append(lclFld->GetFieldSeq(), fieldSeqZero);
lclFld->SetFieldSeq(fieldSeqUpdate);
fieldSeqRecorded = true;
}
break;
case GT_ADD:
if (addr->AsOp()->gtOp1->OperGet() == GT_CNS_INT)
{
fieldSeqNode = addr->AsOp()->gtOp1;
fieldSeqUpdate = GetFieldSeqStore()->Append(addr->AsOp()->gtOp1->AsIntCon()->gtFieldSeq, fieldSeqZero);
addr->AsOp()->gtOp1->AsIntCon()->gtFieldSeq = fieldSeqUpdate;
fieldSeqRecorded = true;
}
else if (addr->AsOp()->gtOp2->OperGet() == GT_CNS_INT)
{
fieldSeqNode = addr->AsOp()->gtOp2;
fieldSeqUpdate = GetFieldSeqStore()->Append(addr->AsOp()->gtOp2->AsIntCon()->gtFieldSeq, fieldSeqZero);
addr->AsOp()->gtOp2->AsIntCon()->gtFieldSeq = fieldSeqUpdate;
fieldSeqRecorded = true;
}
break;
default:
break;
}
if (fieldSeqRecorded == false)
{
// Record in the general zero-offset map.
// The "addr" node might already be annotated with a zero-offset field sequence.
FieldSeqNode* existingFieldSeq = nullptr;
if (GetZeroOffsetFieldMap()->Lookup(addr, &existingFieldSeq))
{
// Append the zero field sequences
fieldSeqUpdate = GetFieldSeqStore()->Append(existingFieldSeq, fieldSeqZero);
}
// Overwrite the field sequence annotation for op1
GetZeroOffsetFieldMap()->Set(addr, fieldSeqUpdate, NodeToFieldSeqMap::Overwrite);
fieldSeqRecorded = true;
}
#ifdef DEBUG
if (verbose)
{
printf(" (After)\n");
gtDispNode(fieldSeqNode, nullptr, nullptr, false);
gtDispCommonEndLine(fieldSeqNode);
}
#endif // DEBUG
}
#ifdef FEATURE_SIMD
//-----------------------------------------------------------------------------------
// fgMorphCombineSIMDFieldAssignments:
// If the RHS of the input stmt is a read for simd vector X Field, then this function
// will keep reading next few stmts based on the vector size(2, 3, 4).
// If the next stmts LHS are located contiguous and RHS are also located
// contiguous, then we replace those statements with a copyblk.
//
// Argument:
// block - BasicBlock*. block which stmt belongs to
// stmt - Statement*. the stmt node we want to check
//
// return value:
// if this funciton successfully optimized the stmts, then return true. Otherwise
// return false;
bool Compiler::fgMorphCombineSIMDFieldAssignments(BasicBlock* block, Statement* stmt)
{
GenTree* tree = stmt->GetRootNode();
assert(tree->OperGet() == GT_ASG);
GenTree* originalLHS = tree->AsOp()->gtOp1;
GenTree* prevLHS = tree->AsOp()->gtOp1;
GenTree* prevRHS = tree->AsOp()->gtOp2;
unsigned index = 0;
CorInfoType simdBaseJitType = CORINFO_TYPE_UNDEF;
unsigned simdSize = 0;
GenTree* simdStructNode = getSIMDStructFromField(prevRHS, &simdBaseJitType, &index, &simdSize, true);
if (simdStructNode == nullptr || index != 0 || simdBaseJitType != CORINFO_TYPE_FLOAT)
{
// if the RHS is not from a SIMD vector field X, then there is no need to check further.
return false;
}
var_types simdBaseType = JitType2PreciseVarType(simdBaseJitType);
var_types simdType = getSIMDTypeForSize(simdSize);
int assignmentsCount = simdSize / genTypeSize(simdBaseType) - 1;
int remainingAssignments = assignmentsCount;
Statement* curStmt = stmt->GetNextStmt();
Statement* lastStmt = stmt;
while (curStmt != nullptr && remainingAssignments > 0)
{
GenTree* exp = curStmt->GetRootNode();
if (exp->OperGet() != GT_ASG)
{
break;
}
GenTree* curLHS = exp->gtGetOp1();
GenTree* curRHS = exp->gtGetOp2();
if (!areArgumentsContiguous(prevLHS, curLHS) || !areArgumentsContiguous(prevRHS, curRHS))
{
break;
}
remainingAssignments--;
prevLHS = curLHS;
prevRHS = curRHS;
lastStmt = curStmt;
curStmt = curStmt->GetNextStmt();
}
if (remainingAssignments > 0)
{
// if the left assignments number is bigger than zero, then this means
// that the assignments are not assgining to the contiguously memory
// locations from same vector.
return false;
}
#ifdef DEBUG
if (verbose)
{
printf("\nFound contiguous assignments from a SIMD vector to memory.\n");
printf("From " FMT_BB ", stmt ", block->bbNum);
printStmtID(stmt);
printf(" to stmt");
printStmtID(lastStmt);
printf("\n");
}
#endif
for (int i = 0; i < assignmentsCount; i++)
{
fgRemoveStmt(block, stmt->GetNextStmt());
}
GenTree* dstNode;
if (originalLHS->OperIs(GT_LCL_FLD))
{
dstNode = originalLHS;
dstNode->gtType = simdType;
dstNode->AsLclFld()->SetFieldSeq(FieldSeqStore::NotAField());
// This may have changed a partial local field into full local field
if (dstNode->IsPartialLclFld(this))
{
dstNode->gtFlags |= GTF_VAR_USEASG;
}
else
{
dstNode->gtFlags &= ~GTF_VAR_USEASG;
}
}
else
{
GenTree* copyBlkDst = createAddressNodeForSIMDInit(originalLHS, simdSize);
if (simdStructNode->OperIsLocal())
{
setLclRelatedToSIMDIntrinsic(simdStructNode);
}
GenTree* copyBlkAddr = copyBlkDst;
if (copyBlkAddr->gtOper == GT_LEA)
{
copyBlkAddr = copyBlkAddr->AsAddrMode()->Base();
}
GenTreeLclVarCommon* localDst = copyBlkAddr->IsLocalAddrExpr();
if (localDst != nullptr)
{
setLclRelatedToSIMDIntrinsic(localDst);
}
if (simdStructNode->TypeGet() == TYP_BYREF)
{
assert(simdStructNode->OperIsLocal());
assert(lvaIsImplicitByRefLocal(simdStructNode->AsLclVarCommon()->GetLclNum()));
simdStructNode = gtNewIndir(simdType, simdStructNode);
}
else
{
assert(varTypeIsSIMD(simdStructNode));
}
dstNode = gtNewOperNode(GT_IND, simdType, copyBlkDst);
}
#ifdef DEBUG
if (verbose)
{
printf("\n" FMT_BB " stmt ", block->bbNum);
printStmtID(stmt);
printf("(before)\n");
gtDispStmt(stmt);
}
#endif
assert(!simdStructNode->CanCSE());
simdStructNode->ClearDoNotCSE();
tree = gtNewAssignNode(dstNode, simdStructNode);
stmt->SetRootNode(tree);
// Since we generated a new address node which didn't exist before,
// we should expose this address manually here.
// TODO-ADDR: Remove this when LocalAddressVisitor transforms all
// local field access into LCL_FLDs, at that point we would be
// combining 2 existing LCL_FLDs or 2 FIELDs that do not reference
// a local and thus cannot result in a new address exposed local.
fgMarkAddressExposedLocals(stmt);
#ifdef DEBUG
if (verbose)
{
printf("\nReplaced " FMT_BB " stmt", block->bbNum);
printStmtID(stmt);
printf("(after)\n");
gtDispStmt(stmt);
}
#endif
return true;
}
#endif // FEATURE_SIMD
//------------------------------------------------------------------------
// fgCheckStmtAfterTailCall: check that statements after the tail call stmt
// candidate are in one of expected forms, that are desctibed below.
//
// Return Value:
// 'true' if stmts are in the expected form, else 'false'.
//
bool Compiler::fgCheckStmtAfterTailCall()
{
// For void calls, we would have created a GT_CALL in the stmt list.
// For non-void calls, we would have created a GT_RETURN(GT_CAST(GT_CALL)).
// For calls returning structs, we would have a void call, followed by a void return.
// For debuggable code, it would be an assignment of the call to a temp
// We want to get rid of any of this extra trees, and just leave
// the call.
Statement* callStmt = fgMorphStmt;
Statement* nextMorphStmt = callStmt->GetNextStmt();
// Check that the rest stmts in the block are in one of the following pattern:
// 1) ret(void)
// 2) ret(cast*(callResultLclVar))
// 3) lclVar = callResultLclVar, the actual ret(lclVar) in another block
// 4) nop
if (nextMorphStmt != nullptr)
{
GenTree* callExpr = callStmt->GetRootNode();
if (callExpr->gtOper != GT_ASG)
{
// The next stmt can be GT_RETURN(TYP_VOID) or GT_RETURN(lclVar),
// where lclVar was return buffer in the call for structs or simd.
Statement* retStmt = nextMorphStmt;
GenTree* retExpr = retStmt->GetRootNode();
noway_assert(retExpr->gtOper == GT_RETURN);
nextMorphStmt = retStmt->GetNextStmt();
}
else
{
noway_assert(callExpr->gtGetOp1()->OperIsLocal());
unsigned callResultLclNumber = callExpr->gtGetOp1()->AsLclVarCommon()->GetLclNum();
#if FEATURE_TAILCALL_OPT_SHARED_RETURN
// We can have a chain of assignments from the call result to
// various inline return spill temps. These are ok as long
// as the last one ultimately provides the return value or is ignored.
//
// And if we're returning a small type we may see a cast
// on the source side.
while ((nextMorphStmt != nullptr) && (nextMorphStmt->GetRootNode()->OperIs(GT_ASG, GT_NOP)))
{
if (nextMorphStmt->GetRootNode()->OperIs(GT_NOP))
{
nextMorphStmt = nextMorphStmt->GetNextStmt();
continue;
}
Statement* moveStmt = nextMorphStmt;
GenTree* moveExpr = nextMorphStmt->GetRootNode();
GenTree* moveDest = moveExpr->gtGetOp1();
noway_assert(moveDest->OperIsLocal());
// Tunnel through any casts on the source side.
GenTree* moveSource = moveExpr->gtGetOp2();
while (moveSource->OperIs(GT_CAST))
{
noway_assert(!moveSource->gtOverflow());
moveSource = moveSource->gtGetOp1();
}
noway_assert(moveSource->OperIsLocal());
// Verify we're just passing the value from one local to another
// along the chain.
const unsigned srcLclNum = moveSource->AsLclVarCommon()->GetLclNum();
noway_assert(srcLclNum == callResultLclNumber);
const unsigned dstLclNum = moveDest->AsLclVarCommon()->GetLclNum();
callResultLclNumber = dstLclNum;
nextMorphStmt = moveStmt->GetNextStmt();
}
if (nextMorphStmt != nullptr)
#endif
{
Statement* retStmt = nextMorphStmt;
GenTree* retExpr = nextMorphStmt->GetRootNode();
noway_assert(retExpr->gtOper == GT_RETURN);
GenTree* treeWithLcl = retExpr->gtGetOp1();
while (treeWithLcl->gtOper == GT_CAST)
{
noway_assert(!treeWithLcl->gtOverflow());
treeWithLcl = treeWithLcl->gtGetOp1();
}
noway_assert(callResultLclNumber == treeWithLcl->AsLclVarCommon()->GetLclNum());
nextMorphStmt = retStmt->GetNextStmt();
}
}
}
return nextMorphStmt == nullptr;
}
//------------------------------------------------------------------------
// fgCanTailCallViaJitHelper: check whether we can use the faster tailcall
// JIT helper on x86.
//
// Return Value:
// 'true' if we can; or 'false' if we should use the generic tailcall mechanism.
//
bool Compiler::fgCanTailCallViaJitHelper()
{
#if !defined(TARGET_X86) || defined(UNIX_X86_ABI) || defined(FEATURE_READYTORUN)
// On anything except windows X86 we have no faster mechanism available.
return false;
#else
// The JIT helper does not properly handle the case where localloc was used.
if (compLocallocUsed)
return false;
return true;
#endif
}
//------------------------------------------------------------------------
// fgMorphReduceAddOps: reduce successive variable adds into a single multiply,
// e.g., i + i + i + i => i * 4.
//
// Arguments:
// tree - tree for reduction
//
// Return Value:
// reduced tree if pattern matches, original tree otherwise
//
GenTree* Compiler::fgMorphReduceAddOps(GenTree* tree)
{
// ADD(_, V0) starts the pattern match.
if (!tree->OperIs(GT_ADD) || tree->gtOverflow())
{
return tree;
}
#ifndef TARGET_64BIT
// Transforming 64-bit ADD to 64-bit MUL on 32-bit system results in replacing
// ADD ops with a helper function call. Don't apply optimization in that case.
if (tree->TypeGet() == TYP_LONG)
{
return tree;
}
#endif
GenTree* lclVarTree = tree->AsOp()->gtOp2;
GenTree* consTree = tree->AsOp()->gtOp1;
GenTree* op1 = consTree;
GenTree* op2 = lclVarTree;
if (!op2->OperIs(GT_LCL_VAR) || !varTypeIsIntegral(op2))
{
return tree;
}
int foldCount = 0;
unsigned lclNum = op2->AsLclVarCommon()->GetLclNum();
// Search for pattern of shape ADD(ADD(ADD(lclNum, lclNum), lclNum), lclNum).
while (true)
{
// ADD(lclNum, lclNum), end of tree
if (op1->OperIs(GT_LCL_VAR) && op1->AsLclVarCommon()->GetLclNum() == lclNum && op2->OperIs(GT_LCL_VAR) &&
op2->AsLclVarCommon()->GetLclNum() == lclNum)
{
foldCount += 2;
break;
}
// ADD(ADD(X, Y), lclNum), keep descending
else if (op1->OperIs(GT_ADD) && !op1->gtOverflow() && op2->OperIs(GT_LCL_VAR) &&
op2->AsLclVarCommon()->GetLclNum() == lclNum)
{
foldCount++;
op2 = op1->AsOp()->gtOp2;
op1 = op1->AsOp()->gtOp1;
}
// Any other case is a pattern we won't attempt to fold for now.
else
{
return tree;
}
}
// V0 + V0 ... + V0 becomes V0 * foldCount, where postorder transform will optimize
// accordingly
consTree->BashToConst(foldCount, tree->TypeGet());
GenTree* morphed = gtNewOperNode(GT_MUL, tree->TypeGet(), lclVarTree, consTree);
DEBUG_DESTROY_NODE(tree);
return morphed;
}
| 1 |
dotnet/runtime | 66,251 | Do not generate `TYP_STRUCT` `LCL_FLD` nodes | Today, the morpher can fold `IND(struct ADDR(LCL))` into `LCL_FLD` `TYP_STRUCT` layout-less nodes, these end up being locations, always (well -- there is one case where that's not true, such a node can end up under a return of a small struct later to be retyped by lowering, but this is rare).
These will be a significant impediment to actual, proper `TYP_STRUCT` local field nodes, as it would mean handling the case, everywhere in code, where the layout would be missing, that we don't want to allow in the first place.
This change fixed that by introducing the folding that the creation of these location nodes achieves at the address level, where we can choose the type of the location node arbitrarily and freely.
Along the way, `IsLocalAddrExpr` was fixed to not forget to look at zero-offset sequences attached to `ADDR`s, to avoid regressions.
We are expecting some positive diffs here, from the `IsLocalAddrExpr` change (enabling more precise VNs) and the folding change itself (the old code path would, ever so rarely, miss out on some things).
We are also expecting one small regression due to call retyping of reg-sized `IND struct(ADDR(LCL))` nodes losing the field sequence. I did not fix it because a) it was small, and rare, b) I do not want to permeate the zero-offset code any more than is strictly required.
[Diffs](https://dev.azure.com/dnceng/public/_build/results?buildId=1653792&view=results). | SingleAccretion | 2022-03-05T21:01:45Z | 2022-03-30T04:48:39Z | cbf3f9c43649035f59e626792f82e7b579c44865 | 2453f16807b85b279efc26d17d6f20de87801c09 | Do not generate `TYP_STRUCT` `LCL_FLD` nodes. Today, the morpher can fold `IND(struct ADDR(LCL))` into `LCL_FLD` `TYP_STRUCT` layout-less nodes, these end up being locations, always (well -- there is one case where that's not true, such a node can end up under a return of a small struct later to be retyped by lowering, but this is rare).
These will be a significant impediment to actual, proper `TYP_STRUCT` local field nodes, as it would mean handling the case, everywhere in code, where the layout would be missing, that we don't want to allow in the first place.
This change fixed that by introducing the folding that the creation of these location nodes achieves at the address level, where we can choose the type of the location node arbitrarily and freely.
Along the way, `IsLocalAddrExpr` was fixed to not forget to look at zero-offset sequences attached to `ADDR`s, to avoid regressions.
We are expecting some positive diffs here, from the `IsLocalAddrExpr` change (enabling more precise VNs) and the folding change itself (the old code path would, ever so rarely, miss out on some things).
We are also expecting one small regression due to call retyping of reg-sized `IND struct(ADDR(LCL))` nodes losing the field sequence. I did not fix it because a) it was small, and rare, b) I do not want to permeate the zero-offset code any more than is strictly required.
[Diffs](https://dev.azure.com/dnceng/public/_build/results?buildId=1653792&view=results). | ./src/native/corehost/ijwhost/ijwthunk.cpp | // Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
#include "ijwhost.h"
#include "bootstrap_thunk_chunk.h"
#include "error_codes.h"
#include "trace.h"
#include "utils.h"
#include "corhdr.h"
#include <heapapi.h>
#include <new>
#include <mutex>
#ifdef _WIN64
#define COR_VTABLE_PTRSIZED COR_VTABLE_64BIT
#define COR_VTABLE_NOT_PTRSIZED COR_VTABLE_32BIT
#else
#define COR_VTABLE_PTRSIZED COR_VTABLE_32BIT
#define COR_VTABLE_NOT_PTRSIZED COR_VTABLE_64BIT
#endif
namespace
{
std::mutex g_thunkChunkLock{};
bootstrap_thunk_chunk* g_pVtableBootstrapThunkChunkList;
// We swallow the trace messages so we don't output to a stderr of a process that we do not own unless tracing is enabled.
void __cdecl swallow_trace(const pal::char_t* msg)
{
(void)msg;
}
}
HANDLE g_heapHandle;
bool patch_vtable_entries(PEDecoder& pe)
{
size_t numFixupRecords;
IMAGE_COR_VTABLEFIXUP* pFixupTable = pe.GetVTableFixups(&numFixupRecords);
if (numFixupRecords == 0)
{
// If we have no fixups, no need to allocate thunks.
return true;
}
size_t numThunks = 0;
for (size_t i = 0; i < numFixupRecords; ++i)
{
numThunks += pFixupTable[i].Count;
}
size_t chunkSize = sizeof(bootstrap_thunk_chunk) + sizeof(bootstrap_thunk) * numThunks;
void* pbChunk = HeapAlloc(g_heapHandle, 0, chunkSize);
if (pbChunk == nullptr)
{
return false;
}
bootstrap_thunk_chunk* chunk = new (pbChunk) bootstrap_thunk_chunk(numThunks, (pal::dll_t)pe.GetBase());
{
std::lock_guard<std::mutex> lock(g_thunkChunkLock);
chunk->SetNext(g_pVtableBootstrapThunkChunkList);
g_pVtableBootstrapThunkChunkList = chunk;
}
trace::setup();
error_writer_scope_t writer_scope(swallow_trace);
size_t currentThunk = 0;
for (size_t i = 0; i < numFixupRecords; ++i)
{
if (pFixupTable[i].Type & COR_VTABLE_PTRSIZED)
{
const BYTE** pointers = (const BYTE**)pe.GetRvaData(pFixupTable[i].RVA);
#ifdef _WIN64
DWORD oldProtect;
if (!VirtualProtect(pointers, (sizeof(BYTE*) * pFixupTable[i].Count), PAGE_READWRITE, &oldProtect))
{
trace::error(_X("Failed to change the vtfixup table from RO to R/W failed.\n"));
return false;
}
#endif
for (std::uint16_t method = 0; method < pFixupTable[i].Count; method++)
{
mdToken tok = (mdToken)(std::uintptr_t) pointers[method];
bootstrap_thunk* pThunk = chunk->GetThunk(currentThunk++);
pThunk->initialize((std::uintptr_t)&start_runtime_thunk_stub,
(pal::dll_t)pe.GetBase(),
tok,
(std::uintptr_t *)&pointers[method]);
pointers[method] = (BYTE*)pThunk->get_entrypoint();
}
#ifdef _WIN64
DWORD _;
if (!VirtualProtect(pointers, (sizeof(BYTE*) * pFixupTable[i].Count), oldProtect, &_))
{
trace::warning(_X("Failed to change the vtfixup table from R/W back to RO failed.\n"));
}
#endif
}
}
return true;
}
extern "C" std::uintptr_t __stdcall start_runtime_and_get_target_address(std::uintptr_t cookie)
{
trace::setup();
error_writer_scope_t writer_scope(swallow_trace);
bootstrap_thunk *pThunk = bootstrap_thunk::get_thunk_from_cookie(cookie);
load_in_memory_assembly_fn loadInMemoryAssembly;
pal::dll_t moduleHandle = pThunk->get_dll_handle();
pal::hresult_t status = get_load_in_memory_assembly_delegate(moduleHandle, &loadInMemoryAssembly);
if (status != StatusCode::Success)
{
// If we ignore the failure to patch bootstrap thunks we will come to this same
// function again, causing an infinite loop of "Failed to start the .NET runtime" errors.
// As we were taken here via an entry point with arbitrary signature,
// there's no way of returning the error code so we just throw it.
trace::error(_X("Failed to start the .NET runtime. Error code: %#x"), status);
#pragma warning (push)
#pragma warning (disable: 4297)
throw status;
#pragma warning (pop)
}
pal::string_t app_path;
if (!pal::get_module_path(moduleHandle, &app_path))
{
#pragma warning (push)
#pragma warning (disable: 4297)
throw StatusCode::LibHostCurExeFindFailure;
#pragma warning (pop)
}
loadInMemoryAssembly(moduleHandle, app_path.c_str(), nullptr);
std::uintptr_t thunkAddress = *(pThunk->get_slot_address());
return thunkAddress;
}
void release_bootstrap_thunks(PEDecoder& pe)
{
std::lock_guard<std::mutex> lock(g_thunkChunkLock);
// Clean up the VTable thunks if they exist.
for (bootstrap_thunk_chunk **ppCurChunk = &g_pVtableBootstrapThunkChunkList;
*ppCurChunk != NULL;
ppCurChunk = (*ppCurChunk)->GetNextPtr())
{
if ((*ppCurChunk)->get_dll_handle() == (pal::dll_t) pe.GetBase())
{
bootstrap_thunk_chunk *pDel = *ppCurChunk;
*ppCurChunk = (*ppCurChunk)->GetNext();
HeapFree(g_heapHandle, 0, pDel);
break;
}
}
}
bool are_thunks_installed_for_module(pal::dll_t instance)
{
std::lock_guard<std::mutex> lock{g_thunkChunkLock};
bootstrap_thunk_chunk* currentChunk = g_pVtableBootstrapThunkChunkList;
while (currentChunk != nullptr)
{
if (currentChunk->get_dll_handle() == instance)
{
return true;
}
currentChunk = currentChunk->GetNext();
}
return false;
}
| // Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
#include "ijwhost.h"
#include "bootstrap_thunk_chunk.h"
#include "error_codes.h"
#include "trace.h"
#include "utils.h"
#include "corhdr.h"
#include <heapapi.h>
#include <new>
#include <mutex>
#ifdef _WIN64
#define COR_VTABLE_PTRSIZED COR_VTABLE_64BIT
#define COR_VTABLE_NOT_PTRSIZED COR_VTABLE_32BIT
#else
#define COR_VTABLE_PTRSIZED COR_VTABLE_32BIT
#define COR_VTABLE_NOT_PTRSIZED COR_VTABLE_64BIT
#endif
namespace
{
std::mutex g_thunkChunkLock{};
bootstrap_thunk_chunk* g_pVtableBootstrapThunkChunkList;
// We swallow the trace messages so we don't output to a stderr of a process that we do not own unless tracing is enabled.
void __cdecl swallow_trace(const pal::char_t* msg)
{
(void)msg;
}
}
HANDLE g_heapHandle;
bool patch_vtable_entries(PEDecoder& pe)
{
size_t numFixupRecords;
IMAGE_COR_VTABLEFIXUP* pFixupTable = pe.GetVTableFixups(&numFixupRecords);
if (numFixupRecords == 0)
{
// If we have no fixups, no need to allocate thunks.
return true;
}
size_t numThunks = 0;
for (size_t i = 0; i < numFixupRecords; ++i)
{
numThunks += pFixupTable[i].Count;
}
size_t chunkSize = sizeof(bootstrap_thunk_chunk) + sizeof(bootstrap_thunk) * numThunks;
void* pbChunk = HeapAlloc(g_heapHandle, 0, chunkSize);
if (pbChunk == nullptr)
{
return false;
}
bootstrap_thunk_chunk* chunk = new (pbChunk) bootstrap_thunk_chunk(numThunks, (pal::dll_t)pe.GetBase());
{
std::lock_guard<std::mutex> lock(g_thunkChunkLock);
chunk->SetNext(g_pVtableBootstrapThunkChunkList);
g_pVtableBootstrapThunkChunkList = chunk;
}
trace::setup();
error_writer_scope_t writer_scope(swallow_trace);
size_t currentThunk = 0;
for (size_t i = 0; i < numFixupRecords; ++i)
{
if (pFixupTable[i].Type & COR_VTABLE_PTRSIZED)
{
const BYTE** pointers = (const BYTE**)pe.GetRvaData(pFixupTable[i].RVA);
#ifdef _WIN64
DWORD oldProtect;
if (!VirtualProtect(pointers, (sizeof(BYTE*) * pFixupTable[i].Count), PAGE_READWRITE, &oldProtect))
{
trace::error(_X("Failed to change the vtfixup table from RO to R/W failed.\n"));
return false;
}
#endif
for (std::uint16_t method = 0; method < pFixupTable[i].Count; method++)
{
mdToken tok = (mdToken)(std::uintptr_t) pointers[method];
bootstrap_thunk* pThunk = chunk->GetThunk(currentThunk++);
pThunk->initialize((std::uintptr_t)&start_runtime_thunk_stub,
(pal::dll_t)pe.GetBase(),
tok,
(std::uintptr_t *)&pointers[method]);
pointers[method] = (BYTE*)pThunk->get_entrypoint();
}
#ifdef _WIN64
DWORD _;
if (!VirtualProtect(pointers, (sizeof(BYTE*) * pFixupTable[i].Count), oldProtect, &_))
{
trace::warning(_X("Failed to change the vtfixup table from R/W back to RO failed.\n"));
}
#endif
}
}
return true;
}
extern "C" std::uintptr_t __stdcall start_runtime_and_get_target_address(std::uintptr_t cookie)
{
trace::setup();
error_writer_scope_t writer_scope(swallow_trace);
bootstrap_thunk *pThunk = bootstrap_thunk::get_thunk_from_cookie(cookie);
load_in_memory_assembly_fn loadInMemoryAssembly;
pal::dll_t moduleHandle = pThunk->get_dll_handle();
pal::hresult_t status = get_load_in_memory_assembly_delegate(moduleHandle, &loadInMemoryAssembly);
if (status != StatusCode::Success)
{
// If we ignore the failure to patch bootstrap thunks we will come to this same
// function again, causing an infinite loop of "Failed to start the .NET runtime" errors.
// As we were taken here via an entry point with arbitrary signature,
// there's no way of returning the error code so we just throw it.
trace::error(_X("Failed to start the .NET runtime. Error code: %#x"), status);
#pragma warning (push)
#pragma warning (disable: 4297)
throw status;
#pragma warning (pop)
}
pal::string_t app_path;
if (!pal::get_module_path(moduleHandle, &app_path))
{
#pragma warning (push)
#pragma warning (disable: 4297)
throw StatusCode::LibHostCurExeFindFailure;
#pragma warning (pop)
}
loadInMemoryAssembly(moduleHandle, app_path.c_str(), nullptr);
std::uintptr_t thunkAddress = *(pThunk->get_slot_address());
return thunkAddress;
}
void release_bootstrap_thunks(PEDecoder& pe)
{
std::lock_guard<std::mutex> lock(g_thunkChunkLock);
// Clean up the VTable thunks if they exist.
for (bootstrap_thunk_chunk **ppCurChunk = &g_pVtableBootstrapThunkChunkList;
*ppCurChunk != NULL;
ppCurChunk = (*ppCurChunk)->GetNextPtr())
{
if ((*ppCurChunk)->get_dll_handle() == (pal::dll_t) pe.GetBase())
{
bootstrap_thunk_chunk *pDel = *ppCurChunk;
*ppCurChunk = (*ppCurChunk)->GetNext();
HeapFree(g_heapHandle, 0, pDel);
break;
}
}
}
bool are_thunks_installed_for_module(pal::dll_t instance)
{
std::lock_guard<std::mutex> lock{g_thunkChunkLock};
bootstrap_thunk_chunk* currentChunk = g_pVtableBootstrapThunkChunkList;
while (currentChunk != nullptr)
{
if (currentChunk->get_dll_handle() == instance)
{
return true;
}
currentChunk = currentChunk->GetNext();
}
return false;
}
| -1 |
dotnet/runtime | 66,251 | Do not generate `TYP_STRUCT` `LCL_FLD` nodes | Today, the morpher can fold `IND(struct ADDR(LCL))` into `LCL_FLD` `TYP_STRUCT` layout-less nodes, these end up being locations, always (well -- there is one case where that's not true, such a node can end up under a return of a small struct later to be retyped by lowering, but this is rare).
These will be a significant impediment to actual, proper `TYP_STRUCT` local field nodes, as it would mean handling the case, everywhere in code, where the layout would be missing, that we don't want to allow in the first place.
This change fixed that by introducing the folding that the creation of these location nodes achieves at the address level, where we can choose the type of the location node arbitrarily and freely.
Along the way, `IsLocalAddrExpr` was fixed to not forget to look at zero-offset sequences attached to `ADDR`s, to avoid regressions.
We are expecting some positive diffs here, from the `IsLocalAddrExpr` change (enabling more precise VNs) and the folding change itself (the old code path would, ever so rarely, miss out on some things).
We are also expecting one small regression due to call retyping of reg-sized `IND struct(ADDR(LCL))` nodes losing the field sequence. I did not fix it because a) it was small, and rare, b) I do not want to permeate the zero-offset code any more than is strictly required.
[Diffs](https://dev.azure.com/dnceng/public/_build/results?buildId=1653792&view=results). | SingleAccretion | 2022-03-05T21:01:45Z | 2022-03-30T04:48:39Z | cbf3f9c43649035f59e626792f82e7b579c44865 | 2453f16807b85b279efc26d17d6f20de87801c09 | Do not generate `TYP_STRUCT` `LCL_FLD` nodes. Today, the morpher can fold `IND(struct ADDR(LCL))` into `LCL_FLD` `TYP_STRUCT` layout-less nodes, these end up being locations, always (well -- there is one case where that's not true, such a node can end up under a return of a small struct later to be retyped by lowering, but this is rare).
These will be a significant impediment to actual, proper `TYP_STRUCT` local field nodes, as it would mean handling the case, everywhere in code, where the layout would be missing, that we don't want to allow in the first place.
This change fixed that by introducing the folding that the creation of these location nodes achieves at the address level, where we can choose the type of the location node arbitrarily and freely.
Along the way, `IsLocalAddrExpr` was fixed to not forget to look at zero-offset sequences attached to `ADDR`s, to avoid regressions.
We are expecting some positive diffs here, from the `IsLocalAddrExpr` change (enabling more precise VNs) and the folding change itself (the old code path would, ever so rarely, miss out on some things).
We are also expecting one small regression due to call retyping of reg-sized `IND struct(ADDR(LCL))` nodes losing the field sequence. I did not fix it because a) it was small, and rare, b) I do not want to permeate the zero-offset code any more than is strictly required.
[Diffs](https://dev.azure.com/dnceng/public/_build/results?buildId=1653792&view=results). | ./src/coreclr/utilcode/winfix.cpp | // Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
//*****************************************************************************
// WinWrap.cpp
//
//
// This file contains wrapper functions for Win32 API's that take strings.
//
// COM+ internally uses UNICODE as the internal state and string format. This
// file will undef the mapping macros so that one cannot mistakingly call a
// method that isn't going to work. Instead, you have to call the correct
// wrapper API.
//
//*****************************************************************************
#include "stdafx.h" // Precompiled header key.
#include "winwrap.h" // Header for macros and functions.
#include "utilcode.h"
#include "holder.h"
#include "pedecoder.h"
// ====== READ BEFORE ADDING CONTRACTS ==================================================
// The functions in this file propagate SetLastError codes to their callers.
// Contracts are not guaranteed to preserve these codes (and no, we're not taking
// the overhead hit to make them do so. Don't bother asking.)
//
// Most of the wrappers have a contract of the form:
//
// NOTHROW;
// INJECT_FAULT(xxx);
//
// For such functions, use the special purpose construct:
//
// WINWRAPPER_NO_CONTRACT(xxx);
//
// For everything else, use STATIC_CONTRACT.
//
#undef CONTRACT
#define CONTRACT $$$$$$$$READ_COMMENT_IN_WINFIX_CPP$$$$$$$$$$
#undef CONTRACTL
#define CONTRACTL $$$$$$$$READ_COMMENT_IN_WINFIX_CPP$$$$$$$$$$
#ifdef ENABLE_CONTRACTS_IMPL
static BOOL gWinWrapperContractRecursionBreak = FALSE;
class WinWrapperContract
{
public:
WinWrapperContract(const char *szFunction, const char *szFile, int lineNum)
{
CANNOT_HAVE_CONTRACT;
m_pClrDebugState = NULL;
if (gWinWrapperContractRecursionBreak)
{
return;
}
m_pClrDebugState = GetClrDebugState();
// Save old debug state
m_IncomingClrDebugState = *m_pClrDebugState;
m_pClrDebugState->ViolationMaskReset( ThrowsViolation );
if (m_pClrDebugState->IsFaultForbid() && !(m_pClrDebugState->ViolationMask() & (FaultViolation|FaultNotFatal|BadDebugState)))
{
gWinWrapperContractRecursionBreak = TRUE;
CONTRACT_ASSERT("INJECT_FAULT called in a FAULTFORBID region.",
Contract::FAULT_Forbid,
Contract::FAULT_Mask,
szFunction,
szFile,
lineNum
);
}
};
~WinWrapperContract()
{
CANNOT_HAVE_CONTRACT;
//!!!!!! THIS DESTRUCTOR MUST NOT CHANGE THE GETLASTERROR VALUE !!!!!!
// Backout all changes to debug state.
if (m_pClrDebugState != NULL)
{
*m_pClrDebugState = m_IncomingClrDebugState;
}
}
private:
ClrDebugState *m_pClrDebugState;
ClrDebugState m_IncomingClrDebugState;
};
#endif
#ifdef ENABLE_CONTRACTS_IMPL
#define WINWRAPPER_NO_CONTRACT(stmt) \
STATIC_CONTRACT_NOTHROW; \
STATIC_CONTRACT_FAULT; \
STATIC_CONTRACT_CANNOT_TAKE_LOCK; \
WinWrapperContract __wcontract(__FUNCTION__, __FILE__, __LINE__); \
if (0) {stmt} \
#define STATIC_WINWRAPPER_NO_CONTRACT(stmt) \
STATIC_CONTRACT_NOTHROW; \
STATIC_CONTRACT_CANNOT_TAKE_LOCK; \
STATIC_CONTRACT_FAULT; \
if (0) {stmt} \
#else
#define WINWRAPPER_NO_CONTRACT(stmt)
#define STATIC_WINWRAPPER_NO_CONTRACT(stmt)
#endif
ULONG g_dwMaxDBCSCharByteSize = 0;
// The only purpose of this function is to make a local copy of lpCommandLine.
// Because windows implementation of CreateProcessW can actually change lpCommandLine,
// but we'd like to keep it const.
BOOL
WszCreateProcess(
LPCWSTR lpApplicationName,
LPCWSTR lpCommandLine,
LPSECURITY_ATTRIBUTES lpProcessAttributes,
LPSECURITY_ATTRIBUTES lpThreadAttributes,
BOOL bInheritHandles,
DWORD dwCreationFlags,
LPVOID lpEnvironment,
LPCWSTR lpCurrentDirectory,
LPSTARTUPINFOW lpStartupInfo,
LPPROCESS_INFORMATION lpProcessInformation
)
{
WINWRAPPER_NO_CONTRACT(SetLastError(ERROR_OUTOFMEMORY); return 0;);
BOOL fResult;
DWORD err;
{
size_t commandLineLength = wcslen(lpCommandLine) + 1;
NewArrayHolder<WCHAR> nonConstCommandLine(new (nothrow) WCHAR[commandLineLength]);
if (nonConstCommandLine == NULL)
{
SetLastError(ERROR_OUTOFMEMORY);
return 0;
}
memcpy(nonConstCommandLine, lpCommandLine, commandLineLength * sizeof(WCHAR));
fResult = CreateProcessW(lpApplicationName,
nonConstCommandLine,
lpProcessAttributes,
lpThreadAttributes,
bInheritHandles,
dwCreationFlags,
lpEnvironment,
(LPWSTR)lpCurrentDirectory,
lpStartupInfo,
lpProcessInformation);
// At the end of the current scope, the last error code will be overwritten by the destructor of
// NewArrayHolder. So we save the error code here, and restore it after the end of the current scope.
err = GetLastError();
}
SetLastError(err);
return fResult;
}
#ifndef HOST_UNIX
#include "psapi.h"
#include "tlhelp32.h"
#include "winnls.h"
//********** Globals. *********************************************************
bool g_fEnsureCharSetInfoInitialized = FALSE; // true if we've detected the platform's character set characteristics
// Detect Unicode support of the operating system, and initialize globals
void EnsureCharSetInfoInitialized()
{
STATIC_CONTRACT_NOTHROW;
STATIC_CONTRACT_FORBID_FAULT;
STATIC_CONTRACT_CANNOT_TAKE_LOCK;
if (!g_fEnsureCharSetInfoInitialized)
{
// NOTE: Do not use any of the Wsz* wrapper functions right now. They will have
// problems.
// Per Shupak, you're supposed to get the maximum size of a DBCS char
// dynamically to work properly on all locales (bug 2757).
CPINFO cpInfo;
if (GetCPInfo(CP_ACP, &cpInfo))
g_dwMaxDBCSCharByteSize = cpInfo.MaxCharSize;
else
g_dwMaxDBCSCharByteSize = 2;
VolatileStore(&g_fEnsureCharSetInfoInitialized, true);
}
return;
}
typedef HRESULT(WINAPI *pfnSetThreadDescription)(HANDLE hThread, PCWSTR lpThreadDescription);
extern pfnSetThreadDescription g_pfnSetThreadDescription;
// Dummy method if windows version does not support it
HRESULT SetThreadDescriptionDummy(HANDLE hThread, PCWSTR lpThreadDescription)
{
return NOERROR;
}
HRESULT WINAPI InitializeSetThreadDescription(HANDLE hThread, PCWSTR lpThreadDescription)
{
HMODULE hKernel32 = WszLoadLibrary(W("kernel32.dll"));
pfnSetThreadDescription pLocal = NULL;
if (hKernel32 != NULL)
{
// store to thread local variable to prevent data race
pLocal = (pfnSetThreadDescription)GetProcAddress(hKernel32, "SetThreadDescription");
}
if (pLocal == NULL) // method is only available with Windows 10 Creators Update or later
{
g_pfnSetThreadDescription = SetThreadDescriptionDummy;
}
else
{
g_pfnSetThreadDescription = pLocal;
}
return g_pfnSetThreadDescription(hThread, lpThreadDescription);
}
pfnSetThreadDescription g_pfnSetThreadDescription = &InitializeSetThreadDescription;
// Set unmanaged thread name which will show up in ETW and Debuggers which know how to read this data.
HRESULT SetThreadName(HANDLE hThread, PCWSTR lpThreadDescription)
{
return g_pfnSetThreadDescription(hThread, lpThreadDescription);
}
#else //!HOST_UNIX
HRESULT SetThreadName(HANDLE hThread, PCWSTR lpThreadDescription)
{
return SetThreadDescription(hThread, lpThreadDescription);
}
#endif //!HOST_UNIX
| // Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
//*****************************************************************************
// WinWrap.cpp
//
//
// This file contains wrapper functions for Win32 API's that take strings.
//
// COM+ internally uses UNICODE as the internal state and string format. This
// file will undef the mapping macros so that one cannot mistakingly call a
// method that isn't going to work. Instead, you have to call the correct
// wrapper API.
//
//*****************************************************************************
#include "stdafx.h" // Precompiled header key.
#include "winwrap.h" // Header for macros and functions.
#include "utilcode.h"
#include "holder.h"
#include "pedecoder.h"
// ====== READ BEFORE ADDING CONTRACTS ==================================================
// The functions in this file propagate SetLastError codes to their callers.
// Contracts are not guaranteed to preserve these codes (and no, we're not taking
// the overhead hit to make them do so. Don't bother asking.)
//
// Most of the wrappers have a contract of the form:
//
// NOTHROW;
// INJECT_FAULT(xxx);
//
// For such functions, use the special purpose construct:
//
// WINWRAPPER_NO_CONTRACT(xxx);
//
// For everything else, use STATIC_CONTRACT.
//
#undef CONTRACT
#define CONTRACT $$$$$$$$READ_COMMENT_IN_WINFIX_CPP$$$$$$$$$$
#undef CONTRACTL
#define CONTRACTL $$$$$$$$READ_COMMENT_IN_WINFIX_CPP$$$$$$$$$$
#ifdef ENABLE_CONTRACTS_IMPL
static BOOL gWinWrapperContractRecursionBreak = FALSE;
class WinWrapperContract
{
public:
WinWrapperContract(const char *szFunction, const char *szFile, int lineNum)
{
CANNOT_HAVE_CONTRACT;
m_pClrDebugState = NULL;
if (gWinWrapperContractRecursionBreak)
{
return;
}
m_pClrDebugState = GetClrDebugState();
// Save old debug state
m_IncomingClrDebugState = *m_pClrDebugState;
m_pClrDebugState->ViolationMaskReset( ThrowsViolation );
if (m_pClrDebugState->IsFaultForbid() && !(m_pClrDebugState->ViolationMask() & (FaultViolation|FaultNotFatal|BadDebugState)))
{
gWinWrapperContractRecursionBreak = TRUE;
CONTRACT_ASSERT("INJECT_FAULT called in a FAULTFORBID region.",
Contract::FAULT_Forbid,
Contract::FAULT_Mask,
szFunction,
szFile,
lineNum
);
}
};
~WinWrapperContract()
{
CANNOT_HAVE_CONTRACT;
//!!!!!! THIS DESTRUCTOR MUST NOT CHANGE THE GETLASTERROR VALUE !!!!!!
// Backout all changes to debug state.
if (m_pClrDebugState != NULL)
{
*m_pClrDebugState = m_IncomingClrDebugState;
}
}
private:
ClrDebugState *m_pClrDebugState;
ClrDebugState m_IncomingClrDebugState;
};
#endif
#ifdef ENABLE_CONTRACTS_IMPL
#define WINWRAPPER_NO_CONTRACT(stmt) \
STATIC_CONTRACT_NOTHROW; \
STATIC_CONTRACT_FAULT; \
STATIC_CONTRACT_CANNOT_TAKE_LOCK; \
WinWrapperContract __wcontract(__FUNCTION__, __FILE__, __LINE__); \
if (0) {stmt} \
#define STATIC_WINWRAPPER_NO_CONTRACT(stmt) \
STATIC_CONTRACT_NOTHROW; \
STATIC_CONTRACT_CANNOT_TAKE_LOCK; \
STATIC_CONTRACT_FAULT; \
if (0) {stmt} \
#else
#define WINWRAPPER_NO_CONTRACT(stmt)
#define STATIC_WINWRAPPER_NO_CONTRACT(stmt)
#endif
ULONG g_dwMaxDBCSCharByteSize = 0;
// The only purpose of this function is to make a local copy of lpCommandLine.
// Because windows implementation of CreateProcessW can actually change lpCommandLine,
// but we'd like to keep it const.
BOOL
WszCreateProcess(
LPCWSTR lpApplicationName,
LPCWSTR lpCommandLine,
LPSECURITY_ATTRIBUTES lpProcessAttributes,
LPSECURITY_ATTRIBUTES lpThreadAttributes,
BOOL bInheritHandles,
DWORD dwCreationFlags,
LPVOID lpEnvironment,
LPCWSTR lpCurrentDirectory,
LPSTARTUPINFOW lpStartupInfo,
LPPROCESS_INFORMATION lpProcessInformation
)
{
WINWRAPPER_NO_CONTRACT(SetLastError(ERROR_OUTOFMEMORY); return 0;);
BOOL fResult;
DWORD err;
{
size_t commandLineLength = wcslen(lpCommandLine) + 1;
NewArrayHolder<WCHAR> nonConstCommandLine(new (nothrow) WCHAR[commandLineLength]);
if (nonConstCommandLine == NULL)
{
SetLastError(ERROR_OUTOFMEMORY);
return 0;
}
memcpy(nonConstCommandLine, lpCommandLine, commandLineLength * sizeof(WCHAR));
fResult = CreateProcessW(lpApplicationName,
nonConstCommandLine,
lpProcessAttributes,
lpThreadAttributes,
bInheritHandles,
dwCreationFlags,
lpEnvironment,
(LPWSTR)lpCurrentDirectory,
lpStartupInfo,
lpProcessInformation);
// At the end of the current scope, the last error code will be overwritten by the destructor of
// NewArrayHolder. So we save the error code here, and restore it after the end of the current scope.
err = GetLastError();
}
SetLastError(err);
return fResult;
}
#ifndef HOST_UNIX
#include "psapi.h"
#include "tlhelp32.h"
#include "winnls.h"
//********** Globals. *********************************************************
bool g_fEnsureCharSetInfoInitialized = FALSE; // true if we've detected the platform's character set characteristics
// Detect Unicode support of the operating system, and initialize globals
void EnsureCharSetInfoInitialized()
{
STATIC_CONTRACT_NOTHROW;
STATIC_CONTRACT_FORBID_FAULT;
STATIC_CONTRACT_CANNOT_TAKE_LOCK;
if (!g_fEnsureCharSetInfoInitialized)
{
// NOTE: Do not use any of the Wsz* wrapper functions right now. They will have
// problems.
// Per Shupak, you're supposed to get the maximum size of a DBCS char
// dynamically to work properly on all locales (bug 2757).
CPINFO cpInfo;
if (GetCPInfo(CP_ACP, &cpInfo))
g_dwMaxDBCSCharByteSize = cpInfo.MaxCharSize;
else
g_dwMaxDBCSCharByteSize = 2;
VolatileStore(&g_fEnsureCharSetInfoInitialized, true);
}
return;
}
typedef HRESULT(WINAPI *pfnSetThreadDescription)(HANDLE hThread, PCWSTR lpThreadDescription);
extern pfnSetThreadDescription g_pfnSetThreadDescription;
// Dummy method if windows version does not support it
HRESULT SetThreadDescriptionDummy(HANDLE hThread, PCWSTR lpThreadDescription)
{
return NOERROR;
}
HRESULT WINAPI InitializeSetThreadDescription(HANDLE hThread, PCWSTR lpThreadDescription)
{
HMODULE hKernel32 = WszLoadLibrary(W("kernel32.dll"));
pfnSetThreadDescription pLocal = NULL;
if (hKernel32 != NULL)
{
// store to thread local variable to prevent data race
pLocal = (pfnSetThreadDescription)GetProcAddress(hKernel32, "SetThreadDescription");
}
if (pLocal == NULL) // method is only available with Windows 10 Creators Update or later
{
g_pfnSetThreadDescription = SetThreadDescriptionDummy;
}
else
{
g_pfnSetThreadDescription = pLocal;
}
return g_pfnSetThreadDescription(hThread, lpThreadDescription);
}
pfnSetThreadDescription g_pfnSetThreadDescription = &InitializeSetThreadDescription;
// Set unmanaged thread name which will show up in ETW and Debuggers which know how to read this data.
HRESULT SetThreadName(HANDLE hThread, PCWSTR lpThreadDescription)
{
return g_pfnSetThreadDescription(hThread, lpThreadDescription);
}
#else //!HOST_UNIX
HRESULT SetThreadName(HANDLE hThread, PCWSTR lpThreadDescription)
{
return SetThreadDescription(hThread, lpThreadDescription);
}
#endif //!HOST_UNIX
| -1 |
dotnet/runtime | 66,251 | Do not generate `TYP_STRUCT` `LCL_FLD` nodes | Today, the morpher can fold `IND(struct ADDR(LCL))` into `LCL_FLD` `TYP_STRUCT` layout-less nodes, these end up being locations, always (well -- there is one case where that's not true, such a node can end up under a return of a small struct later to be retyped by lowering, but this is rare).
These will be a significant impediment to actual, proper `TYP_STRUCT` local field nodes, as it would mean handling the case, everywhere in code, where the layout would be missing, that we don't want to allow in the first place.
This change fixed that by introducing the folding that the creation of these location nodes achieves at the address level, where we can choose the type of the location node arbitrarily and freely.
Along the way, `IsLocalAddrExpr` was fixed to not forget to look at zero-offset sequences attached to `ADDR`s, to avoid regressions.
We are expecting some positive diffs here, from the `IsLocalAddrExpr` change (enabling more precise VNs) and the folding change itself (the old code path would, ever so rarely, miss out on some things).
We are also expecting one small regression due to call retyping of reg-sized `IND struct(ADDR(LCL))` nodes losing the field sequence. I did not fix it because a) it was small, and rare, b) I do not want to permeate the zero-offset code any more than is strictly required.
[Diffs](https://dev.azure.com/dnceng/public/_build/results?buildId=1653792&view=results). | SingleAccretion | 2022-03-05T21:01:45Z | 2022-03-30T04:48:39Z | cbf3f9c43649035f59e626792f82e7b579c44865 | 2453f16807b85b279efc26d17d6f20de87801c09 | Do not generate `TYP_STRUCT` `LCL_FLD` nodes. Today, the morpher can fold `IND(struct ADDR(LCL))` into `LCL_FLD` `TYP_STRUCT` layout-less nodes, these end up being locations, always (well -- there is one case where that's not true, such a node can end up under a return of a small struct later to be retyped by lowering, but this is rare).
These will be a significant impediment to actual, proper `TYP_STRUCT` local field nodes, as it would mean handling the case, everywhere in code, where the layout would be missing, that we don't want to allow in the first place.
This change fixed that by introducing the folding that the creation of these location nodes achieves at the address level, where we can choose the type of the location node arbitrarily and freely.
Along the way, `IsLocalAddrExpr` was fixed to not forget to look at zero-offset sequences attached to `ADDR`s, to avoid regressions.
We are expecting some positive diffs here, from the `IsLocalAddrExpr` change (enabling more precise VNs) and the folding change itself (the old code path would, ever so rarely, miss out on some things).
We are also expecting one small regression due to call retyping of reg-sized `IND struct(ADDR(LCL))` nodes losing the field sequence. I did not fix it because a) it was small, and rare, b) I do not want to permeate the zero-offset code any more than is strictly required.
[Diffs](https://dev.azure.com/dnceng/public/_build/results?buildId=1653792&view=results). | ./src/coreclr/vm/arm/stubs.cpp | // Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
//
// File: stubs.cpp
//
// This file contains stub functions for unimplemented features need to
// run on the ARM platform.
#include "common.h"
#include "jitinterface.h"
#include "comdelegate.h"
#include "invokeutil.h"
#include "excep.h"
#include "class.h"
#include "field.h"
#include "dllimportcallback.h"
#include "dllimport.h"
#include "eeconfig.h"
#include "cgensys.h"
#include "asmconstants.h"
#include "virtualcallstub.h"
#include "gcdump.h"
#include "rtlfunctions.h"
#include "codeman.h"
#include "ecall.h"
#include "threadsuspend.h"
// target write barriers
EXTERN_C void JIT_WriteBarrier(Object **dst, Object *ref);
EXTERN_C void JIT_WriteBarrier_End();
EXTERN_C void JIT_CheckedWriteBarrier(Object **dst, Object *ref);
EXTERN_C void JIT_CheckedWriteBarrier_End();
EXTERN_C void JIT_ByRefWriteBarrier_End();
EXTERN_C void JIT_ByRefWriteBarrier_SP(Object **dst, Object *ref);
// source write barriers
EXTERN_C void JIT_WriteBarrier_SP_Pre(Object **dst, Object *ref);
EXTERN_C void JIT_WriteBarrier_SP_Pre_End();
EXTERN_C void JIT_WriteBarrier_SP_Post(Object **dst, Object *ref);
EXTERN_C void JIT_WriteBarrier_SP_Post_End();
EXTERN_C void JIT_WriteBarrier_MP_Pre(Object **dst, Object *ref);
EXTERN_C void JIT_WriteBarrier_MP_Pre_End();
EXTERN_C void JIT_WriteBarrier_MP_Post(Object **dst, Object *ref);
EXTERN_C void JIT_WriteBarrier_MP_Post_End();
EXTERN_C void JIT_CheckedWriteBarrier_SP_Pre(Object **dst, Object *ref);
EXTERN_C void JIT_CheckedWriteBarrier_SP_Pre_End();
EXTERN_C void JIT_CheckedWriteBarrier_SP_Post(Object **dst, Object *ref);
EXTERN_C void JIT_CheckedWriteBarrier_SP_Post_End();
EXTERN_C void JIT_CheckedWriteBarrier_MP_Pre(Object **dst, Object *ref);
EXTERN_C void JIT_CheckedWriteBarrier_MP_Pre_End();
EXTERN_C void JIT_CheckedWriteBarrier_MP_Post(Object **dst, Object *ref);
EXTERN_C void JIT_CheckedWriteBarrier_MP_Post_End();
EXTERN_C void JIT_ByRefWriteBarrier_SP_Pre();
EXTERN_C void JIT_ByRefWriteBarrier_SP_Pre_End();
EXTERN_C void JIT_ByRefWriteBarrier_SP_Post();
EXTERN_C void JIT_ByRefWriteBarrier_SP_Post_End();
EXTERN_C void JIT_ByRefWriteBarrier_MP_Pre();
EXTERN_C void JIT_ByRefWriteBarrier_MP_Pre_End();
EXTERN_C void JIT_ByRefWriteBarrier_MP_Post(Object **dst, Object *ref);
EXTERN_C void JIT_ByRefWriteBarrier_MP_Post_End();
EXTERN_C void JIT_PatchedWriteBarrierStart();
EXTERN_C void JIT_PatchedWriteBarrierLast();
#ifndef DACCESS_COMPILE
//-----------------------------------------------------------------------
// InstructionFormat for conditional jump.
//-----------------------------------------------------------------------
class ThumbCondJump : public InstructionFormat
{
public:
ThumbCondJump() : InstructionFormat(InstructionFormat::k16)
{
LIMITED_METHOD_CONTRACT;
}
virtual UINT GetSizeOfInstruction(UINT refsize, UINT variationCode)
{
LIMITED_METHOD_CONTRACT
_ASSERTE(refsize == InstructionFormat::k16);
return 2;
}
virtual UINT GetHotSpotOffset(UINT refsize, UINT variationCode)
{
LIMITED_METHOD_CONTRACT
_ASSERTE(refsize == InstructionFormat::k16);
return 4;
}
//CB{N}Z Rn, <Label>
//Encoding 1|0|1|1|op|0|i|1|imm5|Rn
//op = Bit3(variation)
//Rn = Bits2-0(variation)
virtual VOID EmitInstruction(UINT refsize, __int64 fixedUpReference, BYTE *pOutBufferRX, BYTE *pOutBufferRW, UINT variationCode, BYTE *pDataBuffer)
{
LIMITED_METHOD_CONTRACT
_ASSERTE(refsize == InstructionFormat::k16);
if(fixedUpReference <0 || fixedUpReference > 126)
COMPlusThrow(kNotSupportedException);
_ASSERTE((fixedUpReference & 0x1) == 0);
pOutBufferRW[0] = static_cast<BYTE>(((0x3e & fixedUpReference) << 2) | (0x7 & variationCode));
pOutBufferRW[1] = static_cast<BYTE>(0xb1 | (0x8 & variationCode)| ((0x40 & fixedUpReference)>>5));
}
};
//-----------------------------------------------------------------------
// InstructionFormat for near Jump and short Jump
//-----------------------------------------------------------------------
class ThumbNearJump : public InstructionFormat
{
public:
ThumbNearJump() : InstructionFormat(InstructionFormat::k16|InstructionFormat::k32)
{
LIMITED_METHOD_CONTRACT;
}
virtual UINT GetSizeOfInstruction(UINT refsize, UINT variationCode)
{
LIMITED_METHOD_CONTRACT
if(refsize == InstructionFormat::k16)
return 2;
else if(refsize == InstructionFormat::k32)
return 4;
else
_ASSERTE(!"Unknown refsize");
return 0;
}
virtual VOID EmitInstruction(UINT refsize, __int64 fixedUpReference, BYTE *pOutBufferRX, BYTE *pOutBufferRW, UINT cond, BYTE *pDataBuffer)
{
LIMITED_METHOD_CONTRACT
_ASSERTE(cond <15);
//offsets must be in multiples of 2
_ASSERTE((fixedUpReference & 0x1) == 0);
if(cond == 0xe) //Always execute
{
if(fixedUpReference >= -2048 && fixedUpReference <= 2046)
{
if(refsize != InstructionFormat::k16)
_ASSERTE(!"Expected refSize to be 2");
//Emit T2 encoding of B<c> <label> instruction
pOutBufferRW[0] = static_cast<BYTE>((fixedUpReference & 0x1fe)>>1);
pOutBufferRW[1] = static_cast<BYTE>(0xe0 | ((fixedUpReference & 0xe00)>>9));
}
else if(fixedUpReference >= -16777216 && fixedUpReference <= 16777214)
{
if(refsize != InstructionFormat::k32)
_ASSERTE(!"Expected refSize to be 4");
//Emit T4 encoding of B<c> <label> instruction
int s = (fixedUpReference & 0x1000000) >> 24;
int i1 = (fixedUpReference & 0x800000) >> 23;
int i2 = (fixedUpReference & 0x400000) >> 22;
pOutBufferRW[0] = static_cast<BYTE>((fixedUpReference & 0xff000) >> 12);
pOutBufferRW[1] = static_cast<BYTE>(0xf0 | (s << 2) |( (fixedUpReference & 0x300000) >>20));
pOutBufferRW[2] = static_cast<BYTE>((fixedUpReference & 0x1fe) >> 1);
pOutBufferRW[3] = static_cast<BYTE>(0x90 | (~(i1^s)) << 5 | (~(i2^s)) << 3 | (fixedUpReference & 0xe00) >> 9);
}
else
{
COMPlusThrow(kNotSupportedException);
}
}
else // conditional branch based on flags
{
if(fixedUpReference >= -256 && fixedUpReference <= 254)
{
if(refsize != InstructionFormat::k16)
_ASSERTE(!"Expected refSize to be 2");
//Emit T1 encoding of B<c> <label> instruction
pOutBufferRW[0] = static_cast<BYTE>((fixedUpReference & 0x1fe)>>1);
pOutBufferRW[1] = static_cast<BYTE>(0xd0 | (cond & 0xf));
}
else if(fixedUpReference >= -1048576 && fixedUpReference <= 1048574)
{
if(refsize != InstructionFormat::k32)
_ASSERTE(!"Expected refSize to be 4");
//Emit T3 encoding of B<c> <label> instruction
pOutBufferRW[0] = static_cast<BYTE>(((cond & 0x3) << 6) | ((fixedUpReference & 0x3f000) >>12));
pOutBufferRW[1] = static_cast<BYTE>(0xf0 | ((fixedUpReference & 0x100000) >>18) | ((cond & 0xc) >> 2));
pOutBufferRW[2] = static_cast<BYTE>((fixedUpReference & 0x1fe) >> 1);
pOutBufferRW[3] = static_cast<BYTE>(0x80 | ((fixedUpReference & 0x40000) >> 13) | ((fixedUpReference & 0x80000) >> 16) | ((fixedUpReference & 0xe00) >> 9));
}
else
{
COMPlusThrow(kNotSupportedException);
}
}
}
virtual BOOL CanReach(UINT refsize, UINT variationCode, BOOL fExternal, INT_PTR offset)
{
LIMITED_METHOD_CONTRACT
if (fExternal)
{
_ASSERTE(0);
return FALSE;
}
else
{
switch (refsize)
{
case InstructionFormat::k16:
if(variationCode == 0xe)
return (offset >= -2048 && offset <= 2046 && (offset & 0x1) == 0);
else
return (offset >= -256 && offset <= 254 && (offset & 0x1) == 0);
case InstructionFormat::k32:
if(variationCode == 0xe)
return ((offset >= -16777216) && (offset <= 16777214) && ((offset & 0x1) == 0));
else
return ((offset >= -1048576) && (offset <= 1048574) && ((offset & 0x1) == 0));
default:
_ASSERTE(!"Unknown refsize");
return FALSE;
}
}
}
virtual UINT GetHotSpotOffset(UINT refsize, UINT variationCode)
{
LIMITED_METHOD_CONTRACT
_ASSERTE(refsize == InstructionFormat::k16 || refsize == InstructionFormat::k32);
return 4;
}
};
//static conditional jump instruction format object
static BYTE gThumbCondJump[sizeof(ThumbCondJump)];
//static near jump instruction format object
static BYTE gThumbNearJump[sizeof(ThumbNearJump)];
void StubLinkerCPU::Init(void)
{
//Initialize the object
new (gThumbCondJump) ThumbCondJump();
new (gThumbNearJump) ThumbNearJump();
}
// GC write barrier support.
//
// To optimize our write barriers we code the values of several GC globals (e.g. g_lowest_address) directly
// into the barrier function itself, thus avoiding a double memory indirection. Every time the GC modifies one
// of these globals we need to update all of the write barriers accordingly.
//
// In order to keep this process non-brittle we don't hard code the offsets of the instructions that need to
// be changed. Instead the code used to create these barriers is implemented using special macros that record
// the necessary offsets in a descriptor table. Search for "GC write barrier support" in vm\arm\asmhelpers.asm
// for more details.
// Structure describing the layout of a single write barrier descriptor. This must be kept in sync with the
// code in vm\arm\asmhelpers.asm in the WRITE_BARRIER_END macro. Each offset recorded is for one of the
// supported GC globals (an offset of 0xffff is encoded if that global is not used by the particular barrier
// function). We currently only support one usage of each global by any single barrier function. The offset is
// the byte offset from the start of the function at which a movw,movt instruction pair is used to load the
// value of the global into a register.
struct WriteBarrierDescriptor
{
#ifdef TARGET_UNIX
DWORD m_funcStartOffset; // Offset to the start of the barrier function relative to this struct address
DWORD m_funcEndOffset; // Offset to the end of the barrier function relative to this struct address
#else // TARGET_UNIX
BYTE * m_pFuncStart; // Pointer to the start of the barrier function
BYTE * m_pFuncEnd; // Pointer to the end of the barrier function
#endif // TARGET_UNIX
DWORD m_dw_g_lowest_address_offset; // Offset of the instruction reading g_lowest_address
DWORD m_dw_g_highest_address_offset; // Offset of the instruction reading g_highest_address
DWORD m_dw_g_ephemeral_low_offset; // Offset of the instruction reading g_ephemeral_low
DWORD m_dw_g_ephemeral_high_offset; // Offset of the instruction reading g_ephemeral_high
DWORD m_dw_g_card_table_offset; // Offset of the instruction reading g_card_table
};
// Infrastructure used for mapping of the source and destination of current WB patching
struct WriteBarrierMapping
{
PBYTE to; // Pointer to the write-barrier where it was copied over
PBYTE from; // Pointer to write-barrier from which it was copied
};
const int WriteBarrierIndex = 0;
const int CheckedWriteBarrierIndex = 1;
const int ByRefWriteBarrierIndex = 2;
const int MaxWriteBarrierIndex = 3;
WriteBarrierMapping wbMapping[MaxWriteBarrierIndex] =
{
{(PBYTE)JIT_WriteBarrier, NULL},
{(PBYTE)JIT_CheckedWriteBarrier, NULL},
{(PBYTE)JIT_ByRefWriteBarrier, NULL}
};
PBYTE FindWBMapping(PBYTE from)
{
for(int i = 0; i < MaxWriteBarrierIndex; ++i)
{
if(wbMapping[i].from == from)
return wbMapping[i].to;
}
return NULL;
}
// Pointer to the start of the descriptor table. The end of the table is marked by a sentinel entry
// (m_pFuncStart is NULL).
EXTERN_C WriteBarrierDescriptor g_rgWriteBarrierDescriptors;
// Determine the range of memory containing all the write barrier implementations (these are clustered
// together and should fit in a page or maybe two).
void ComputeWriteBarrierRange(BYTE ** ppbStart, DWORD * pcbLength)
{
DWORD size = (PBYTE)JIT_PatchedWriteBarrierLast - (PBYTE)JIT_PatchedWriteBarrierStart;
*ppbStart = (PBYTE)JIT_PatchedWriteBarrierStart;
if (IsWriteBarrierCopyEnabled())
{
*ppbStart = GetWriteBarrierCodeLocation(*ppbStart);
}
*pcbLength = size;
}
void CopyWriteBarrier(PCODE dstCode, PCODE srcCode, PCODE endCode)
{
TADDR dst = (TADDR)PCODEToPINSTR((PCODE)GetWriteBarrierCodeLocation((void*)dstCode));
TADDR src = PCODEToPINSTR(srcCode);
TADDR end = PCODEToPINSTR(endCode);
size_t size = (PBYTE)end - (PBYTE)src;
ExecutableWriterHolderNoLog<void> writeBarrierWriterHolder;
if (IsWriteBarrierCopyEnabled())
{
writeBarrierWriterHolder.AssignExecutableWriterHolder((void*)dst, size);
dst = (TADDR)writeBarrierWriterHolder.GetRW();
}
memcpy((PVOID)dst, (PVOID)src, size);
}
#if _DEBUG
void ValidateWriteBarriers()
{
// Post-grow WB are bigger than pre-grow so validating that target WB has space to accomodate those
_ASSERTE( ((PBYTE)JIT_WriteBarrier_End - (PBYTE)JIT_WriteBarrier) >= ((PBYTE)JIT_WriteBarrier_MP_Post_End - (PBYTE)JIT_WriteBarrier_MP_Post));
_ASSERTE( ((PBYTE)JIT_WriteBarrier_End - (PBYTE)JIT_WriteBarrier) >= ((PBYTE)JIT_WriteBarrier_SP_Post_End - (PBYTE)JIT_WriteBarrier_SP_Post));
_ASSERTE( ((PBYTE)JIT_CheckedWriteBarrier_End - (PBYTE)JIT_CheckedWriteBarrier) >= ((PBYTE)JIT_CheckedWriteBarrier_MP_Post_End - (PBYTE)JIT_CheckedWriteBarrier_MP_Post));
_ASSERTE( ((PBYTE)JIT_CheckedWriteBarrier_End - (PBYTE)JIT_CheckedWriteBarrier) >= ((PBYTE)JIT_CheckedWriteBarrier_SP_Post_End - (PBYTE)JIT_CheckedWriteBarrier_SP_Post));
_ASSERTE( ((PBYTE)JIT_ByRefWriteBarrier_End - (PBYTE)JIT_ByRefWriteBarrier) >= ((PBYTE)JIT_ByRefWriteBarrier_MP_Post_End - (PBYTE)JIT_ByRefWriteBarrier_MP_Post));
_ASSERTE( ((PBYTE)JIT_ByRefWriteBarrier_End - (PBYTE)JIT_ByRefWriteBarrier) >= ((PBYTE)JIT_ByRefWriteBarrier_SP_Post_End - (PBYTE)JIT_ByRefWriteBarrier_SP_Post));
}
#endif // _DEBUG
#define UPDATE_WB(_proc,_grow) \
CopyWriteBarrier((PCODE)JIT_WriteBarrier, (PCODE)JIT_WriteBarrier_ ## _proc ## _ ## _grow , (PCODE)JIT_WriteBarrier_ ## _proc ## _ ## _grow ## _End); \
wbMapping[WriteBarrierIndex].from = (PBYTE)JIT_WriteBarrier_ ## _proc ## _ ## _grow ; \
\
CopyWriteBarrier((PCODE)JIT_CheckedWriteBarrier, (PCODE)JIT_CheckedWriteBarrier_ ## _proc ## _ ## _grow , (PCODE)JIT_CheckedWriteBarrier_ ## _proc ## _ ## _grow ## _End); \
wbMapping[CheckedWriteBarrierIndex].from = (PBYTE)JIT_CheckedWriteBarrier_ ## _proc ## _ ## _grow ; \
\
CopyWriteBarrier((PCODE)JIT_ByRefWriteBarrier, (PCODE)JIT_ByRefWriteBarrier_ ## _proc ## _ ## _grow , (PCODE)JIT_ByRefWriteBarrier_ ## _proc ## _ ## _grow ## _End); \
wbMapping[ByRefWriteBarrierIndex].from = (PBYTE)JIT_ByRefWriteBarrier_ ## _proc ## _ ## _grow ; \
// Update the instructions in our various write barrier implementations that refer directly to the values
// of GC globals such as g_lowest_address and g_card_table. We don't particularly care which values have
// changed on each of these callbacks, it's pretty cheap to refresh them all.
void UpdateGCWriteBarriers(bool postGrow = false)
{
// Define a helper macro that abstracts the minutia of patching the instructions to access the value of a
// particular GC global.
#if _DEBUG
ValidateWriteBarriers();
#endif // _DEBUG
static bool wbCopyRequired = true; // We begin with a wb copy
static bool wbIsPostGrow = false; // We begin with pre-Grow write barrier
if(postGrow && !wbIsPostGrow)
{
wbIsPostGrow = true;
wbCopyRequired = true;
}
if(wbCopyRequired)
{
BOOL mp = g_SystemInfo.dwNumberOfProcessors > 1;
if(mp)
{
if(wbIsPostGrow)
{
UPDATE_WB(MP,Post);
}
else
{
UPDATE_WB(MP,Pre);
}
}
else
{
if(wbIsPostGrow)
{
UPDATE_WB(SP,Post);
}
else
{
UPDATE_WB(SP,Pre);
}
}
wbCopyRequired = false;
}
#define GWB_PATCH_OFFSET(_global) \
if (pDesc->m_dw_##_global##_offset != 0xffff) \
PutThumb2Mov32((UINT16*)(to + pDesc->m_dw_##_global##_offset), (UINT32)(dac_cast<TADDR>(_global)));
// Iterate through the write barrier patch table created in the .clrwb section
// (see write barrier asm code)
WriteBarrierDescriptor * pDesc = &g_rgWriteBarrierDescriptors;
#ifdef TARGET_UNIX
while (pDesc->m_funcStartOffset)
#else // TARGET_UNIX
while (pDesc->m_pFuncStart)
#endif // TARGET_UNIX
{
// If the write barrier is being currently used (as in copied over to the patchable site)
// then read the patch location from the table and use the offset to patch the target asm code
#ifdef TARGET_UNIX
PBYTE to = FindWBMapping((BYTE *)pDesc + pDesc->m_funcStartOffset);
size_t barrierSize = pDesc->m_funcEndOffset - pDesc->m_funcStartOffset;
#else // TARGET_UNIX
PBYTE to = FindWBMapping(pDesc->m_pFuncStart);
size_t barrierSize = pDesc->m_pFuncEnd - pDesc->m_pFuncStart;
#endif // TARGET_UNIX
if(to)
{
to = (PBYTE)PCODEToPINSTR((PCODE)GetWriteBarrierCodeLocation(to));
ExecutableWriterHolderNoLog<BYTE> barrierWriterHolder;
if (IsWriteBarrierCopyEnabled())
{
barrierWriterHolder.AssignExecutableWriterHolder(to, barrierSize);
to = barrierWriterHolder.GetRW();
}
GWB_PATCH_OFFSET(g_lowest_address);
GWB_PATCH_OFFSET(g_highest_address);
GWB_PATCH_OFFSET(g_ephemeral_low);
GWB_PATCH_OFFSET(g_ephemeral_high);
GWB_PATCH_OFFSET(g_card_table);
}
pDesc++;
}
}
int StompWriteBarrierResize(bool isRuntimeSuspended, bool bReqUpperBoundsCheck)
{
// The runtime is not always suspended when this is called (unlike StompWriteBarrierEphemeral) but we have
// no way to update the barrier code atomically on ARM since each 32-bit value we change is loaded over
// two instructions. So we have to suspend the EE (which forces code out of the barrier functions) before
// proceeding. Luckily the case where the runtime is not already suspended is relatively rare (allocation
// of a new large object heap segment). Skip the suspend for the case where we're called during runtime
// startup.
// suspend/resuming the EE under GC stress will trigger a GC and if we're holding the
// GC lock due to allocating a LOH segment it will cause a deadlock so disable it here.
GCStressPolicy::InhibitHolder iholder;
int stompWBCompleteActions = SWB_ICACHE_FLUSH;
if (!isRuntimeSuspended)
{
ThreadSuspend::SuspendEE(ThreadSuspend::SUSPEND_OTHER);
stompWBCompleteActions |= SWB_EE_RESTART;
}
UpdateGCWriteBarriers(bReqUpperBoundsCheck);
return stompWBCompleteActions;
}
int StompWriteBarrierEphemeral(bool isRuntimeSuspended)
{
UNREFERENCED_PARAMETER(isRuntimeSuspended);
_ASSERTE(isRuntimeSuspended);
UpdateGCWriteBarriers();
return SWB_ICACHE_FLUSH;
}
void FlushWriteBarrierInstructionCache()
{
// We've changed code so we must flush the instruction cache.
BYTE *pbAlteredRange;
DWORD cbAlteredRange;
ComputeWriteBarrierRange(&pbAlteredRange, &cbAlteredRange);
FlushInstructionCache(GetCurrentProcess(), pbAlteredRange, cbAlteredRange);
}
#endif // !DACCESS_COMPILE
void LazyMachState::unwindLazyState(LazyMachState* baseState,
MachState* unwoundstate,
DWORD threadId,
int funCallDepth,
HostCallPreference hostCallPreference)
{
T_CONTEXT ctx;
T_KNONVOLATILE_CONTEXT_POINTERS nonVolRegPtrs;
ctx.ContextFlags = 0; // Read by PAL_VirtualUnwind.
ctx.Pc = baseState->captureIp;
ctx.Sp = baseState->captureSp;
ctx.R4 = unwoundstate->captureR4_R11[0] = baseState->captureR4_R11[0];
ctx.R5 = unwoundstate->captureR4_R11[1] = baseState->captureR4_R11[1];
ctx.R6 = unwoundstate->captureR4_R11[2] = baseState->captureR4_R11[2];
ctx.R7 = unwoundstate->captureR4_R11[3] = baseState->captureR4_R11[3];
ctx.R8 = unwoundstate->captureR4_R11[4] = baseState->captureR4_R11[4];
ctx.R9 = unwoundstate->captureR4_R11[5] = baseState->captureR4_R11[5];
ctx.R10 = unwoundstate->captureR4_R11[6] = baseState->captureR4_R11[6];
ctx.R11 = unwoundstate->captureR4_R11[7] = baseState->captureR4_R11[7];
#if !defined(DACCESS_COMPILE)
// For DAC, if we get here, it means that the LazyMachState is uninitialized and we have to unwind it.
// The API we use to unwind in DAC is StackWalk64(), which does not support the context pointers.
//
// Restore the integer registers to KNONVOLATILE_CONTEXT_POINTERS to be used for unwinding.
nonVolRegPtrs.R4 = &unwoundstate->captureR4_R11[0];
nonVolRegPtrs.R5 = &unwoundstate->captureR4_R11[1];
nonVolRegPtrs.R6 = &unwoundstate->captureR4_R11[2];
nonVolRegPtrs.R7 = &unwoundstate->captureR4_R11[3];
nonVolRegPtrs.R8 = &unwoundstate->captureR4_R11[4];
nonVolRegPtrs.R9 = &unwoundstate->captureR4_R11[5];
nonVolRegPtrs.R10 = &unwoundstate->captureR4_R11[6];
nonVolRegPtrs.R11 = &unwoundstate->captureR4_R11[7];
#endif // DACCESS_COMPILE
LOG((LF_GCROOTS, LL_INFO100000, "STACKWALK LazyMachState::unwindLazyState(ip:%p,sp:%p)\n", baseState->captureIp, baseState->captureSp));
PCODE pvControlPc;
do
{
#ifndef TARGET_UNIX
pvControlPc = Thread::VirtualUnwindCallFrame(&ctx, &nonVolRegPtrs);
#else // !TARGET_UNIX
#ifdef DACCESS_COMPILE
HRESULT hr = DacVirtualUnwind(threadId, &ctx, &nonVolRegPtrs);
if (FAILED(hr))
{
DacError(hr);
}
#else // DACCESS_COMPILE
BOOL success = PAL_VirtualUnwind(&ctx, &nonVolRegPtrs);
if (!success)
{
_ASSERTE(!"unwindLazyState: Unwinding failed");
EEPOLICY_HANDLE_FATAL_ERROR(COR_E_EXECUTIONENGINE);
}
#endif // DACCESS_COMPILE
pvControlPc = GetIP(&ctx);
#endif // !TARGET_UNIX
if (funCallDepth > 0)
{
--funCallDepth;
if (funCallDepth == 0)
break;
}
else
{
// Determine whether given IP resides in JITted code. (It returns nonzero in that case.)
// Use it now to see if we've unwound to managed code yet.
BOOL fFailedReaderLock = FALSE;
BOOL fIsManagedCode = ExecutionManager::IsManagedCode(pvControlPc, hostCallPreference, &fFailedReaderLock);
if (fFailedReaderLock)
{
// We don't know if we would have been able to find a JIT
// manager, because we couldn't enter the reader lock without
// yielding (and our caller doesn't want us to yield). So abort
// now.
// Invalidate the lazyState we're returning, so the caller knows
// we aborted before we could fully unwind
unwoundstate->_isValid = false;
return;
}
if (fIsManagedCode)
break;
}
}
while(TRUE);
//
// Update unwoundState so that HelperMethodFrameRestoreState knows which
// registers have been potentially modified.
//
unwoundstate->_pc = ctx.Pc;
unwoundstate->_sp = ctx.Sp;
#ifdef DACCESS_COMPILE
// For DAC builds, we update the registers directly since we dont have context pointers
unwoundstate->captureR4_R11[0] = ctx.R4;
unwoundstate->captureR4_R11[1] = ctx.R5;
unwoundstate->captureR4_R11[2] = ctx.R6;
unwoundstate->captureR4_R11[3] = ctx.R7;
unwoundstate->captureR4_R11[4] = ctx.R8;
unwoundstate->captureR4_R11[5] = ctx.R9;
unwoundstate->captureR4_R11[6] = ctx.R10;
unwoundstate->captureR4_R11[7] = ctx.R11;
#else // !DACCESS_COMPILE
// For non-DAC builds, update the register state from context pointers
unwoundstate->_R4_R11[0] = (PDWORD)nonVolRegPtrs.R4;
unwoundstate->_R4_R11[1] = (PDWORD)nonVolRegPtrs.R5;
unwoundstate->_R4_R11[2] = (PDWORD)nonVolRegPtrs.R6;
unwoundstate->_R4_R11[3] = (PDWORD)nonVolRegPtrs.R7;
unwoundstate->_R4_R11[4] = (PDWORD)nonVolRegPtrs.R8;
unwoundstate->_R4_R11[5] = (PDWORD)nonVolRegPtrs.R9;
unwoundstate->_R4_R11[6] = (PDWORD)nonVolRegPtrs.R10;
unwoundstate->_R4_R11[7] = (PDWORD)nonVolRegPtrs.R11;
#endif // DACCESS_COMPILE
unwoundstate->_isValid = true;
}
void HelperMethodFrame::UpdateRegDisplay(const PREGDISPLAY pRD)
{
CONTRACTL
{
NOTHROW;
GC_NOTRIGGER;
MODE_ANY;
SUPPORTS_DAC;
}
CONTRACTL_END;
pRD->IsCallerContextValid = FALSE;
pRD->IsCallerSPValid = FALSE; // Don't add usage of this field. This is only temporary.
//
// Copy the saved state from the frame to the current context.
//
LOG((LF_GCROOTS, LL_INFO100000, "STACKWALK HelperMethodFrame::UpdateRegDisplay cached ip:%p, sp:%p\n", m_MachState._pc, m_MachState._sp));
#if defined(DACCESS_COMPILE)
// For DAC, we may get here when the HMF is still uninitialized.
// So we may need to unwind here.
if (!m_MachState.isValid())
{
// This allocation throws on OOM.
MachState* pUnwoundState = (MachState*)DacAllocHostOnlyInstance(sizeof(*pUnwoundState), true);
InsureInit(false, pUnwoundState);
pRD->pCurrentContext->Pc = pRD->ControlPC = pUnwoundState->_pc;
pRD->pCurrentContext->Sp = pRD->SP = pUnwoundState->_sp;
pRD->pCurrentContext->R4 = (DWORD)(pUnwoundState->captureR4_R11[0]);
pRD->pCurrentContext->R5 = (DWORD)(pUnwoundState->captureR4_R11[1]);
pRD->pCurrentContext->R6 = (DWORD)(pUnwoundState->captureR4_R11[2]);
pRD->pCurrentContext->R7 = (DWORD)(pUnwoundState->captureR4_R11[3]);
pRD->pCurrentContext->R8 = (DWORD)(pUnwoundState->captureR4_R11[4]);
pRD->pCurrentContext->R9 = (DWORD)(pUnwoundState->captureR4_R11[5]);
pRD->pCurrentContext->R10 = (DWORD)(pUnwoundState->captureR4_R11[6]);
pRD->pCurrentContext->R11 = (DWORD)(pUnwoundState->captureR4_R11[7]);
return;
}
#endif // DACCESS_COMPILE
// reset pContext; it's only valid for active (top-most) frame
pRD->pContext = NULL;
pRD->ControlPC = GetReturnAddress();
pRD->SP = (DWORD)(size_t)m_MachState._sp;
pRD->pCurrentContext->Pc = pRD->ControlPC;
pRD->pCurrentContext->Sp = pRD->SP;
pRD->pCurrentContext->R4 = *m_MachState._R4_R11[0];
pRD->pCurrentContext->R5 = *m_MachState._R4_R11[1];
pRD->pCurrentContext->R6 = *m_MachState._R4_R11[2];
pRD->pCurrentContext->R7 = *m_MachState._R4_R11[3];
pRD->pCurrentContext->R8 = *m_MachState._R4_R11[4];
pRD->pCurrentContext->R9 = *m_MachState._R4_R11[5];
pRD->pCurrentContext->R10 = *m_MachState._R4_R11[6];
pRD->pCurrentContext->R11 = *m_MachState._R4_R11[7];
pRD->pCurrentContextPointers->R4 = m_MachState._R4_R11[0];
pRD->pCurrentContextPointers->R5 = m_MachState._R4_R11[1];
pRD->pCurrentContextPointers->R6 = m_MachState._R4_R11[2];
pRD->pCurrentContextPointers->R7 = m_MachState._R4_R11[3];
pRD->pCurrentContextPointers->R8 = m_MachState._R4_R11[4];
pRD->pCurrentContextPointers->R9 = m_MachState._R4_R11[5];
pRD->pCurrentContextPointers->R10 = m_MachState._R4_R11[6];
pRD->pCurrentContextPointers->R11 = m_MachState._R4_R11[7];
pRD->pCurrentContextPointers->Lr = NULL;
}
#ifndef DACCESS_COMPILE
void ThisPtrRetBufPrecode::Init(MethodDesc* pMD, LoaderAllocator *pLoaderAllocator)
{
WRAPPER_NO_CONTRACT;
int n = 0;
m_rgCode[n++] = 0x4684; // mov r12, r0
m_rgCode[n++] = 0x4608; // mov r0, r1
m_rgCode[n++] = 0xea4f; // mov r1, r12
m_rgCode[n++] = 0x010c;
m_rgCode[n++] = 0xf8df; // ldr pc, [pc, #0]
m_rgCode[n++] = 0xf000;
_ASSERTE(n == ARRAY_SIZE(m_rgCode));
m_pTarget = GetPreStubEntryPoint();
m_pMethodDesc = (TADDR)pMD;
}
/*
Rough pseudo-code of interface dispatching:
// jitted code sets r0, r4:
r0 = object;
r4 = indirectionCell;
// jitted code calls *indirectionCell
switch (*indirectionCell)
{
case LookupHolder._stub:
// ResolveWorkerAsmStub:
*indirectionCell = DispatchHolder._stub;
call ResolveWorkerStatic, jump to target method;
case DispatchHolder._stub:
if (r0.methodTable == expectedMethodTable) jump to target method;
// ResolveHolder._stub._failEntryPoint:
jump to case ResolveHolder._stub._resolveEntryPoint;
case ResolveHolder._stub._resolveEntryPoint:
if (r0.methodTable in hashTable) jump to target method;
// ResolveHolder._stub._slowEntryPoint:
// ResolveWorkerChainLookupAsmStub:
// ResolveWorkerAsmStub:
if (_failEntryPoint called too many times) *indirectionCell = ResolveHolder._stub._resolveEntryPoint;
call ResolveWorkerStatic, jump to target method;
}
Note that ResolveWorkerChainLookupAsmStub currently points directly
to ResolveWorkerAsmStub; in the future, this could be separate.
*/
void LookupHolder::Initialize(LookupHolder* pLookupHolderRX, PCODE resolveWorkerTarget, size_t dispatchToken)
{
// Called directly by JITTED code
// See ResolveWorkerAsmStub
// ldr r12, [pc + 8] ; #_token
_stub._entryPoint[0] = 0xf8df;
_stub._entryPoint[1] = 0xc008;
// ldr pc, [pc] ; #_resolveWorkerTarget
_stub._entryPoint[2] = 0xf8df;
_stub._entryPoint[3] = 0xf000;
_stub._resolveWorkerTarget = resolveWorkerTarget;
_stub._token = dispatchToken;
_ASSERTE(4 == LookupStub::entryPointLen);
}
void DispatchHolder::Initialize(DispatchHolder* pDispatchHolderRX, PCODE implTarget, PCODE failTarget, size_t expectedMT)
{
// Called directly by JITTED code
// DispatchHolder._stub._entryPoint(r0:object, r1, r2, r3, r4:IndirectionCell)
// {
// if (r0.methodTable == this._expectedMT) (this._implTarget)(r0, r1, r2, r3);
// else (this._failTarget)(r0, r1, r2, r3, r4);
// }
int n = 0;
WORD offset;
// We rely on the stub entry-point being DWORD aligned (so we can tell whether any subsequent WORD is
// DWORD-aligned or not, which matters in the calculation of PC-relative offsets).
_ASSERTE(((UINT_PTR)_stub._entryPoint & 0x3) == 0);
// Compute a PC-relative offset for use in an instruction encoding. Must call this prior to emitting the
// instruction halfword to which it applies. For thumb-2 encodings the offset must be computed before emitting
// the first of the halfwords.
#undef PC_REL_OFFSET
#define PC_REL_OFFSET(_field) (WORD)(offsetof(DispatchStub, _field) - ((offsetof(DispatchStub, _entryPoint) + sizeof(*DispatchStub::_entryPoint) * (n + 2)) & 0xfffffffc))
// r0 : object. It can be null as well.
// when it is null the code causes an AV. This AV is seen by the VM's personality routine
// and it converts it into nullRef. We want the AV to happen before modifying the stack so that we can get the
// call stack in windbg at the point of AV. So therefore "ldr r12, [r0]" should be the first instruction.
// ldr r12, [r0 + #Object.m_pMethTab]
_stub._entryPoint[n++] = DISPATCH_STUB_FIRST_WORD;
_stub._entryPoint[n++] = 0xc000;
// push {r5}
_stub._entryPoint[n++] = 0xb420;
// ldr r5, [pc + #_expectedMT]
offset = PC_REL_OFFSET(_expectedMT);
_ASSERTE((offset & 0x3) == 0);
_stub._entryPoint[n++] = 0x4d00 | (offset >> 2);
// cmp r5, r12
_stub._entryPoint[n++] = 0x4565;
// pop {r5}
_stub._entryPoint[n++] = 0xbc20;
// bne failTarget
_stub._entryPoint[n++] = 0xd101;
// ldr pc, [pc + #_implTarget]
offset = PC_REL_OFFSET(_implTarget);
_stub._entryPoint[n++] = 0xf8df;
_stub._entryPoint[n++] = 0xf000 | offset;
// failTarget:
// ldr pc, [pc + #_failTarget]
offset = PC_REL_OFFSET(_failTarget);
_stub._entryPoint[n++] = 0xf8df;
_stub._entryPoint[n++] = 0xf000 | offset;
// nop - insert padding
_stub._entryPoint[n++] = 0xbf00;
_ASSERTE(n == DispatchStub::entryPointLen);
// Make sure that the data members below are aligned
_ASSERTE((n & 1) == 0);
_stub._expectedMT = DWORD(expectedMT);
_stub._failTarget = failTarget;
_stub._implTarget = implTarget;
}
void ResolveHolder::Initialize(ResolveHolder* pResolveHolderRX,
PCODE resolveWorkerTarget, PCODE patcherTarget,
size_t dispatchToken, UINT32 hashedToken,
void * cacheAddr, INT32 * counterAddr)
{
// Called directly by JITTED code
// ResolveStub._resolveEntryPoint(r0:Object*, r1, r2, r3, r4:IndirectionCellAndFlags)
// {
// MethodTable mt = r0.m_pMethTab;
// int i = ((mt + mt >> 12) ^ this._hashedToken) & this._cacheMask
// ResolveCacheElem e = this._cacheAddress + i
// do
// {
// if (mt == e.pMT && this._token == e.token) (e.target)(r0, r1, r2, r3);
// e = e.pNext;
// } while (e != null)
// (this._slowEntryPoint)(r0, r1, r2, r3, r4);
// }
//
int n = 0;
WORD offset;
// We rely on the stub entry-point being DWORD aligned (so we can tell whether any subsequent WORD is
// DWORD-aligned or not, which matters in the calculation of PC-relative offsets).
_ASSERTE(((UINT_PTR)_stub._resolveEntryPoint & 0x3) == 0);
// Compute a PC-relative offset for use in an instruction encoding. Must call this prior to emitting the
// instruction halfword to which it applies. For thumb-2 encodings the offset must be computed before emitting
// the first of the halfwords.
#undef PC_REL_OFFSET
#define PC_REL_OFFSET(_field) (WORD)(offsetof(ResolveStub, _field) - ((offsetof(ResolveStub, _resolveEntryPoint) + sizeof(*ResolveStub::_resolveEntryPoint) * (n + 2)) & 0xfffffffc))
// ldr r12, [r0 + #Object.m_pMethTab]
_stub._resolveEntryPoint[n++] = RESOLVE_STUB_FIRST_WORD;
_stub._resolveEntryPoint[n++] = 0xc000;
// ;; We need two scratch registers, r5 and r6
// push {r5,r6}
_stub._resolveEntryPoint[n++] = 0xb460;
// ;; Compute i = ((mt + mt >> 12) ^ this._hashedToken) & this._cacheMask
// add r6, r12, r12 lsr #12
_stub._resolveEntryPoint[n++] = 0xeb0c;
_stub._resolveEntryPoint[n++] = 0x361c;
// ldr r5, [pc + #_hashedToken]
offset = PC_REL_OFFSET(_hashedToken);
_ASSERTE((offset & 0x3) == 0);
_stub._resolveEntryPoint[n++] = 0x4d00 | (offset >> 2);
// eor r6, r6, r5
_stub._resolveEntryPoint[n++] = 0xea86;
_stub._resolveEntryPoint[n++] = 0x0605;
// ldr r5, [pc + #_cacheMask]
offset = PC_REL_OFFSET(_cacheMask);
_ASSERTE((offset & 0x3) == 0);
_stub._resolveEntryPoint[n++] = 0x4d00 | (offset >> 2);
// and r6, r6, r5
_stub._resolveEntryPoint[n++] = 0xea06;
_stub._resolveEntryPoint[n++] = 0x0605;
// ;; ResolveCacheElem e = this._cacheAddress + i
// ldr r5, [pc + #_cacheAddress]
offset = PC_REL_OFFSET(_cacheAddress);
_ASSERTE((offset & 0x3) == 0);
_stub._resolveEntryPoint[n++] = 0x4d00 | (offset >> 2);
// ldr r6, [r5 + r6] ;; r6 = e = this._cacheAddress + i
_stub._resolveEntryPoint[n++] = 0x59ae;
// ;; do {
int loop = n;
// ;; Check mt == e.pMT
// ldr r5, [r6 + #ResolveCacheElem.pMT]
offset = offsetof(ResolveCacheElem, pMT);
_ASSERTE(offset <= 124 && (offset & 0x3) == 0);
_stub._resolveEntryPoint[n++] = 0x6835 | (offset<< 4);
// cmp r12, r5
_stub._resolveEntryPoint[n++] = 0x45ac;
// bne nextEntry
_stub._resolveEntryPoint[n++] = 0xd108;
// ;; Check this._token == e.token
// ldr r5, [pc + #_token]
offset = PC_REL_OFFSET(_token);
_ASSERTE((offset & 0x3) == 0);
_stub._resolveEntryPoint[n++] = 0x4d00 | (offset>>2);
// ldr r12, [r6 + #ResolveCacheElem.token]
offset = offsetof(ResolveCacheElem, token);
_stub._resolveEntryPoint[n++] = 0xf8d6;
_stub._resolveEntryPoint[n++] = 0xc000 | offset;
// cmp r12, r5
_stub._resolveEntryPoint[n++] = 0x45ac;
// bne nextEntry
_stub._resolveEntryPoint[n++] = 0xd103;
// ldr r12, [r6 + #ResolveCacheElem.target] ;; r12 : e.target
offset = offsetof(ResolveCacheElem, target);
_stub._resolveEntryPoint[n++] = 0xf8d6;
_stub._resolveEntryPoint[n++] = 0xc000 | offset;
// ;; Restore r5 and r6
// pop {r5,r6}
_stub._resolveEntryPoint[n++] = 0xbc60;
// ;; Branch to e.target
// bx r12 ;; (e.target)(r0,r1,r2,r3)
_stub._resolveEntryPoint[n++] = 0x4760;
// nextEntry:
// ;; e = e.pNext;
// ldr r6, [r6 + #ResolveCacheElem.pNext]
offset = offsetof(ResolveCacheElem, pNext);
_ASSERTE(offset <=124 && (offset & 0x3) == 0);
_stub._resolveEntryPoint[n++] = 0x6836 | (offset << 4);
// ;; } while(e != null);
// cbz r6, slowEntryPoint
_stub._resolveEntryPoint[n++] = 0xb116;
// ldr r12, [r0 + #Object.m_pMethTab]
_stub._resolveEntryPoint[n++] = 0xf8d0;
_stub._resolveEntryPoint[n++] = 0xc000;
// b loop
offset = (WORD)((loop - (n + 2)) * sizeof(WORD));
offset = (offset >> 1) & 0x07ff;
_stub._resolveEntryPoint[n++] = 0xe000 | offset;
// slowEntryPoint:
// pop {r5,r6}
_stub._resolveEntryPoint[n++] = 0xbc60;
// nop for alignment
_stub._resolveEntryPoint[n++] = 0xbf00;
// the slow entry point be DWORD-aligned (see _ASSERTE below) insert nops if necessary .
// ARMSTUB TODO: promotion
// fall through to slow case
_ASSERTE(_stub._resolveEntryPoint + n == _stub._slowEntryPoint);
_ASSERTE(n == ResolveStub::resolveEntryPointLen);
// ResolveStub._slowEntryPoint(r0:MethodToken, r1, r2, r3, r4:IndirectionCellAndFlags)
// {
// r12 = this._tokenSlow;
// this._resolveWorkerTarget(r0, r1, r2, r3, r4, r12);
// }
// The following macro relies on this entry point being DWORD-aligned. We've already asserted that the
// overall stub is aligned above, just need to check that the preceding stubs occupy an even number of
// WORD slots.
_ASSERTE((n & 1) == 0);
#undef PC_REL_OFFSET
#define PC_REL_OFFSET(_field) (WORD)(offsetof(ResolveStub, _field) - ((offsetof(ResolveStub, _slowEntryPoint) + sizeof(*ResolveStub::_slowEntryPoint) * (n + 2)) & 0xfffffffc))
n = 0;
// ldr r12, [pc + #_tokenSlow]
offset = PC_REL_OFFSET(_tokenSlow);
_stub._slowEntryPoint[n++] = 0xf8df;
_stub._slowEntryPoint[n++] = 0xc000 | offset;
// ldr pc, [pc + #_resolveWorkerTarget]
offset = PC_REL_OFFSET(_resolveWorkerTarget);
_stub._slowEntryPoint[n++] = 0xf8df;
_stub._slowEntryPoint[n++] = 0xf000 | offset;
_ASSERTE(n == ResolveStub::slowEntryPointLen);
// ResolveStub._failEntryPoint(r0:MethodToken, r1, r2, r3, r4:IndirectionCellAndFlags)
// {
// if(--*(this._pCounter) < 0) r4 = r4 | SDF_ResolveBackPatch;
// this._resolveEntryPoint(r0, r1, r2, r3, r4);
// }
// The following macro relies on this entry point being DWORD-aligned. We've already asserted that the
// overall stub is aligned above, just need to check that the preceding stubs occupy an even number of
// WORD slots.
_ASSERTE((n & 1) == 0);
#undef PC_REL_OFFSET
#define PC_REL_OFFSET(_field) (WORD)(offsetof(ResolveStub, _field) - ((offsetof(ResolveStub, _failEntryPoint) + sizeof(*ResolveStub::_failEntryPoint) * (n + 2)) & 0xfffffffc))
n = 0;
// push {r5}
_stub._failEntryPoint[n++] = 0xb420;
// ldr r5, [pc + #_pCounter]
offset = PC_REL_OFFSET(_pCounter);
_ASSERTE((offset & 0x3) == 0);
_stub._failEntryPoint[n++] = 0x4d00 | (offset >>2);
// ldr r12, [r5]
_stub._failEntryPoint[n++] = 0xf8d5;
_stub._failEntryPoint[n++] = 0xc000;
// subs r12, r12, #1
_stub._failEntryPoint[n++] = 0xf1bc;
_stub._failEntryPoint[n++] = 0x0c01;
// str r12, [r5]
_stub._failEntryPoint[n++] = 0xf8c5;
_stub._failEntryPoint[n++] = 0xc000;
// pop {r5}
_stub._failEntryPoint[n++] = 0xbc20;
// bge resolveEntryPoint
_stub._failEntryPoint[n++] = 0xda01;
// or r4, r4, SDF_ResolveBackPatch
_ASSERTE(SDF_ResolveBackPatch < 256);
_stub._failEntryPoint[n++] = 0xf044;
_stub._failEntryPoint[n++] = 0x0400 | SDF_ResolveBackPatch;
// resolveEntryPoint:
// b _resolveEntryPoint
offset = (WORD)(offsetof(ResolveStub, _resolveEntryPoint) - (offsetof(ResolveStub, _failEntryPoint) + sizeof(*ResolveStub::_failEntryPoint) * (n + 2)));
_ASSERTE((offset & 1) == 0);
offset = (offset >> 1) & 0x07ff;
_stub._failEntryPoint[n++] = 0xe000 | offset;
// nop for alignment
_stub._failEntryPoint[n++] = 0xbf00;
_ASSERTE(n == ResolveStub::failEntryPointLen);
_stub._pCounter = counterAddr;
_stub._hashedToken = hashedToken << LOG2_PTRSIZE;
_stub._cacheAddress = (size_t) cacheAddr;
_stub._token = dispatchToken;
_stub._tokenSlow = dispatchToken;
_stub._resolveWorkerTarget = resolveWorkerTarget;
_stub._cacheMask = CALL_STUB_CACHE_MASK * sizeof(void*);
_ASSERTE(resolveWorkerTarget == (PCODE)ResolveWorkerChainLookupAsmStub);
_ASSERTE(patcherTarget == NULL);
}
Stub *GenerateInitPInvokeFrameHelper()
{
CONTRACT(Stub*)
{
THROWS;
GC_NOTRIGGER;
MODE_ANY;
POSTCONDITION(CheckPointer(RETVAL));
}
CONTRACT_END;
CPUSTUBLINKER sl;
CPUSTUBLINKER *psl = &sl;
CORINFO_EE_INFO::InlinedCallFrameInfo FrameInfo;
InlinedCallFrame::GetEEInfo(&FrameInfo);
// R4 contains address of the frame on stack (the frame ptr, not its neg space)
unsigned negSpace = FrameInfo.offsetOfFrameVptr;
ThumbReg regFrame = ThumbReg(4);
ThumbReg regThread = ThumbReg(5);
ThumbReg regScratch = ThumbReg(6);
ThumbReg regR9 = ThumbReg(9);
#ifdef TARGET_UNIX
// Erect frame to perform call to GetThread
psl->ThumbEmitProlog(1, sizeof(ArgumentRegisters), FALSE); // Save r4 for aligned stack
// Save argument registers around the GetThread call. Don't bother with using ldm/stm since this inefficient path anyway.
for (int reg = 0; reg < 4; reg++)
psl->ThumbEmitStoreRegIndirect(ThumbReg(reg), thumbRegSp, offsetof(ArgumentRegisters, r) + sizeof(*ArgumentRegisters::r) * reg);
#endif
psl->ThumbEmitGetThread(regThread);
#ifdef TARGET_UNIX
for (int reg = 0; reg < 4; reg++)
psl->ThumbEmitLoadRegIndirect(ThumbReg(reg), thumbRegSp, offsetof(ArgumentRegisters, r) + sizeof(*ArgumentRegisters::r) * reg);
#endif
// mov [regFrame + FrameInfo.offsetOfGSCookie], GetProcessGSCookie()
psl->ThumbEmitMovConstant(regScratch, GetProcessGSCookie());
psl->ThumbEmitStoreRegIndirect(regScratch, regFrame, FrameInfo.offsetOfGSCookie - negSpace);
// mov [regFrame + FrameInfo.offsetOfFrameVptr], InlinedCallFrame::GetMethodFrameVPtr()
psl->ThumbEmitMovConstant(regScratch, InlinedCallFrame::GetMethodFrameVPtr());
psl->ThumbEmitStoreRegIndirect(regScratch, regFrame, FrameInfo.offsetOfFrameVptr - negSpace);
// ldr regScratch, [regThread + offsetof(Thread, m_pFrame)]
// str regScratch, [regFrame + FrameInfo.offsetOfFrameLink]
psl->ThumbEmitLoadRegIndirect(regScratch, regThread, offsetof(Thread, m_pFrame));
psl->ThumbEmitStoreRegIndirect(regScratch, regFrame, FrameInfo.offsetOfFrameLink - negSpace);
// str FP, [regFrame + FrameInfo.offsetOfCalleeSavedFP]
psl->ThumbEmitStoreRegIndirect(thumbRegFp, regFrame, FrameInfo.offsetOfCalleeSavedFP - negSpace);
// str R9, [regFrame + FrameInfo.offsetOfSPAfterProlog]
psl->ThumbEmitStoreRegIndirect(regR9, regFrame, FrameInfo.offsetOfSPAfterProlog - negSpace);
// mov [regFrame + FrameInfo.offsetOfReturnAddress], 0
psl->ThumbEmitMovConstant(regScratch, 0);
psl->ThumbEmitStoreRegIndirect(regScratch, regFrame, FrameInfo.offsetOfReturnAddress - negSpace);
#ifdef TARGET_UNIX
DWORD cbSavedRegs = sizeof(ArgumentRegisters) + 2 * 4; // r0-r3, r4, lr
psl->ThumbEmitAdd(regScratch, thumbRegSp, cbSavedRegs);
psl->ThumbEmitStoreRegIndirect(regScratch, regFrame, FrameInfo.offsetOfCallSiteSP - negSpace);
#else
// str SP, [regFrame + FrameInfo.offsetOfCallSiteSP]
psl->ThumbEmitStoreRegIndirect(thumbRegSp, regFrame, FrameInfo.offsetOfCallSiteSP - negSpace);
#endif
// mov [regThread + offsetof(Thread, m_pFrame)], regFrame
psl->ThumbEmitStoreRegIndirect(regFrame, regThread, offsetof(Thread, m_pFrame));
// leave current Thread in R4
#ifdef TARGET_UNIX
psl->ThumbEmitEpilog();
#else
// Return. The return address has been restored into LR at this point.
// bx lr
psl->ThumbEmitJumpRegister(thumbRegLr);
#endif
// A single process-wide stub that will never unload
RETURN psl->Link(SystemDomain::GetGlobalLoaderAllocator()->GetStubHeap());
}
void StubLinkerCPU::ThumbEmitGetThread(ThumbReg dest)
{
#ifdef TARGET_UNIX
ThumbEmitMovConstant(ThumbReg(0), (TADDR)GetThreadHelper);
ThumbEmitCallRegister(ThumbReg(0));
if (dest != ThumbReg(0))
{
ThumbEmitMovRegReg(dest, ThumbReg(0));
}
#else // TARGET_UNIX
// mrc p15, 0, dest, c13, c0, 2
Emit16(0xee1d);
Emit16((WORD)(0x0f50 | (dest << 12)));
ThumbEmitLoadRegIndirect(dest, dest, offsetof(TEB, ThreadLocalStoragePointer));
ThumbEmitLoadRegIndirect(dest, dest, sizeof(void *) * _tls_index);
ThumbEmitLoadRegIndirect(dest, dest, (int)Thread::GetOffsetOfThreadStatic(&gCurrentThreadInfo));
#endif // TARGET_UNIX
}
// Emits code to adjust for a static delegate target.
VOID StubLinkerCPU::EmitShuffleThunk(ShuffleEntry *pShuffleEntryArray)
{
// Scan the shuffle entries to see if there any stack-to-stack operations. If there aren't we can emit a
// much simpler thunk (simply because we generate code that doesn't require more than one scratch
// register).
bool fSimpleCase = true;
ShuffleEntry *pEntry = pShuffleEntryArray;
while (pEntry->srcofs != ShuffleEntry::SENTINEL)
{
// It's enough to check whether we have a destination stack location (there are no register to stack
// scenarios).
if (!(pEntry->dstofs & ShuffleEntry::REGMASK))
{
fSimpleCase = false;
break;
}
pEntry++;
}
if (fSimpleCase)
{
// No real prolog for the simple case, we're a tail call so we shouldn't be on the stack for any walk
// or unwind.
// On entry r0 holds the delegate instance. Look up the real target address stored in the MethodPtrAux
// field and stash it in r12.
// ldr r12, [r0, #offsetof(DelegateObject, _methodPtrAux)]
ThumbEmitLoadRegIndirect(ThumbReg(12), ThumbReg(0), DelegateObject::GetOffsetOfMethodPtrAux());
// Emit the instructions to rewrite the argument registers. Most will be register-to-register (e.g.
// move r1 to r0) but one or two of them might move values from the top of the incoming stack
// arguments into registers r2 and r3. Note that the entries are ordered so that we don't need to
// worry about a move overwriting a register we'll need to use as input for the next move (i.e. we get
// move r1 to r0, move r2 to r1 etc.).
pEntry = pShuffleEntryArray;
while (pEntry->srcofs != ShuffleEntry::SENTINEL)
{
_ASSERTE(pEntry->dstofs & ShuffleEntry::REGMASK);
if (pEntry->srcofs & ShuffleEntry::REGMASK)
{
// Move from register case.
ThumbEmitMovRegReg(ThumbReg(pEntry->dstofs & ShuffleEntry::OFSMASK),
ThumbReg(pEntry->srcofs & ShuffleEntry::OFSMASK));
}
else
{
// Move from the stack case.
// ldr <dest>, [sp + #source_offset]
ThumbEmitLoadRegIndirect(ThumbReg(pEntry->dstofs & ShuffleEntry::OFSMASK),
thumbRegSp,
(pEntry->srcofs & ShuffleEntry::OFSMASK) * 4);
}
pEntry++;
}
// Tail call to real target.
// bx r12
ThumbEmitJumpRegister(ThumbReg(12));
return;
}
// In the more complex case we need to re-write at least some of the arguments on the stack as well as
// argument registers. We need some temporary registers to perform stack-to-stack copies and we've
// reserved our one remaining volatile register, r12, to store the eventual target method address. So
// we're going to generate a hybrid-tail call. Using a tail call has the advantage that we don't need to
// erect and link an explicit CLR frame to enable crawling of this thunk. Additionally re-writing the
// stack can be more peformant in some scenarios than copying the stack (in the presence of floating point
// or arguments requieing 64-bit alignment we might not have to move some or even most of the values).
// The hybrid nature is that we'll erect a standard native frame (with a proper prolog and epilog) so we
// can save some non-volatile registers to act as temporaries. Once we've performed the stack re-write
// we'll poke the saved LR value (which will become a PC value on the pop in the epilog) to return to the
// target method instead of us, thus atomically removing our frame from the stack and tail-calling the
// real target.
// Prolog:
ThumbEmitProlog(3, // Save r4-r6,lr (count doesn't include lr)
0, // No additional space in the stack frame required
FALSE); // Don't push argument registers
// On entry r0 holds the delegate instance. Look up the real target address stored in the MethodPtrAux
// field and stash it in r12.
// ldr r12, [r0, #offsetof(DelegateObject, _methodPtrAux)]
ThumbEmitLoadRegIndirect(ThumbReg(12), ThumbReg(0), DelegateObject::GetOffsetOfMethodPtrAux());
// As we copy slots from lower in the argument stack to higher we need to keep track of source and
// destination pointers into those arguments (if we just use offsets from SP we get into trouble with
// argument frames larger than 4K). We'll use r4 to track the source (original location of an argument
// from the caller's perspective) and r5 to track the destination (new location of the argument from the
// callee's perspective). Both start at the current value of SP plus the offset created by pushing our
// stack frame in the prolog.
// add r4, sp, #cbSavedRegs
// add r5, sp, #cbSavedRegs
DWORD cbSavedRegs = 4 * 4; // r4, r5, r6, lr
ThumbEmitAdd(ThumbReg(4), thumbRegSp, cbSavedRegs);
ThumbEmitAdd(ThumbReg(5), thumbRegSp, cbSavedRegs);
// Follow the shuffle array instructions to re-write some subset of r0-r3 and the stacked arguments to
// remove the unwanted delegate instance in r0. Arguments only ever move from higher registers to lower
// registers or higher stack addresses to lower stack addresses and are ordered from lowest register to
// highest stack address. As a result we can do all updates in order and in place and we'll never
// overwrite a register or stack location needed as a source value in a later iteration.
DWORD dwLastSrcIndex = (DWORD)-1;
DWORD dwLastDstIndex = (DWORD)-1;
pEntry = pShuffleEntryArray;
while (pEntry->srcofs != ShuffleEntry::SENTINEL)
{
// If this is a register-to-register move we can do it in one instruction.
if ((pEntry->srcofs & ShuffleEntry::REGMASK) && (pEntry->dstofs & ShuffleEntry::REGMASK))
{
ThumbEmitMovRegReg(ThumbReg(pEntry->dstofs & ShuffleEntry::OFSMASK),
ThumbReg(pEntry->srcofs & ShuffleEntry::OFSMASK));
}
else
{
// There is no case where a source argument register is moved into a destination stack slot.
_ASSERTE((pEntry->srcofs & ShuffleEntry::REGMASK) == 0);
// Source or destination stack offsets might not be contiguous (though they often will be).
// Floating point arguments and 64-bit aligned values can cause discontinuities. While we copy
// values we'll use post increment addressing modes to move both source and destination stack
// pointers forward 4 bytes at a time, the common case. But we'll insert additional add
// instructions for any holes we find (we detect these by remembering the last source and
// destination stack offset we used).
// Add any additional offset to the source pointer (r4) to account for holes in the copy.
DWORD dwSrcIndex = pEntry->srcofs & ShuffleEntry::OFSMASK;
if (dwSrcIndex != (dwLastSrcIndex + 1))
{
// If the gap is at the very beginning, then dwLastSrcIndex is still -1, so we need to allow
// for that. Note that the calculation below handles this properly, due to DWORD wrapping.
_ASSERTE((dwLastSrcIndex == (DWORD)-1) || (dwSrcIndex > dwLastSrcIndex));
// add r4, #gap_size
ThumbEmitIncrement(ThumbReg(4), (dwSrcIndex - dwLastSrcIndex - 1) * 4);
}
dwLastSrcIndex = dwSrcIndex;
// Load the source value from the stack and increment our source pointer (r4) in one instruction.
// If the target is a register we can move the value directly there. Otherwise we move it to the
// r6 temporary register.
if (pEntry->dstofs & ShuffleEntry::REGMASK)
{
// ldr <regnum>, [r4], #4
ThumbEmitLoadIndirectPostIncrement(ThumbReg(pEntry->dstofs & ShuffleEntry::OFSMASK), ThumbReg(4), 4);
}
else
{
// ldr r6, [r4], #4
ThumbEmitLoadIndirectPostIncrement(ThumbReg(6), ThumbReg(4), 4);
// Add any additional offset to the destination pointer (r5) to account for holes in the copy.
DWORD dwDstIndex = pEntry->dstofs & ShuffleEntry::OFSMASK;
if (dwDstIndex != (dwLastDstIndex + 1))
{
// If the gap is at the very beginning, then dwLastDstIndex is still -1, so we need to allow
// for that. Note that the calculation below handles this properly, due to DWORD wrapping.
_ASSERTE((dwLastDstIndex == (DWORD)-1) || (dwDstIndex > dwLastDstIndex));
// add r5, #gap_size
ThumbEmitIncrement(ThumbReg(5), (dwDstIndex - dwLastDstIndex - 1) * 4);
}
dwLastDstIndex = dwDstIndex;
// Write the value in r6 to it's final home on the stack and increment our destination pointer
// (r5).
// str r6, [r5], #4
ThumbEmitStoreIndirectPostIncrement(ThumbReg(6), ThumbReg(5), 4);
}
}
pEntry++;
}
// Arguments are copied. Now we modify the saved value of LR we created in our prolog (which will be
// popped back off into PC in our epilog) so that it points to the real target address in r12 rather than
// our return address. We haven't modified LR ourselves, so the net result is that executing our epilog
// will pop our frame and tail call to the real method.
// str r12, [sp + #(cbSavedRegs-4)]
ThumbEmitStoreRegIndirect(ThumbReg(12), thumbRegSp, cbSavedRegs - 4);
// Epilog:
ThumbEmitEpilog();
}
void StubLinkerCPU::ThumbEmitTailCallManagedMethod(MethodDesc *pMD)
{
// Use direct call if possible.
if (pMD->HasStableEntryPoint())
{
// mov r12, #entry_point
ThumbEmitMovConstant(ThumbReg(12), (TADDR)pMD->GetStableEntryPoint());
}
else
{
// mov r12, #slotaddress
ThumbEmitMovConstant(ThumbReg(12), (TADDR)pMD->GetAddrOfSlot());
// ldr r12, [r12]
ThumbEmitLoadRegIndirect(ThumbReg(12), ThumbReg(12), 0);
}
// bx r12
ThumbEmitJumpRegister(ThumbReg(12));
}
VOID StubLinkerCPU::EmitComputedInstantiatingMethodStub(MethodDesc* pSharedMD, struct ShuffleEntry *pShuffleEntryArray, void* extraArg)
{
STANDARD_VM_CONTRACT;
struct ShuffleEntry *pEntry = pShuffleEntryArray;
while (pEntry->srcofs != ShuffleEntry::SENTINEL)
{
_ASSERTE(pEntry->dstofs & ShuffleEntry::REGMASK);
_ASSERTE(pEntry->srcofs & ShuffleEntry::REGMASK);
_ASSERTE(!(pEntry->dstofs & ShuffleEntry::FPREGMASK));
_ASSERTE(!(pEntry->srcofs & ShuffleEntry::FPREGMASK));
_ASSERTE(pEntry->dstofs != ShuffleEntry::HELPERREG);
_ASSERTE(pEntry->srcofs != ShuffleEntry::HELPERREG);
ThumbEmitMovRegReg(ThumbReg(pEntry->dstofs & ShuffleEntry::OFSMASK),
ThumbReg(pEntry->srcofs & ShuffleEntry::OFSMASK));
pEntry++;
}
MetaSig msig(pSharedMD);
ArgIterator argit(&msig);
if (argit.HasParamType())
{
// Place instantiation parameter into the correct register.
ArgLocDesc sInstArgLoc;
argit.GetParamTypeLoc(&sInstArgLoc);
int regHidden = sInstArgLoc.m_idxGenReg;
_ASSERTE(regHidden != -1);
if (extraArg == NULL)
{
if (pSharedMD->RequiresInstMethodTableArg())
{
// Unboxing stub case
// Extract MethodTable pointer (the hidden arg) from the object instance.
// ldr regHidden, [r0]
ThumbEmitLoadRegIndirect(ThumbReg(regHidden), ThumbReg(0), 0);
}
}
else
{
// mov regHidden, #pHiddenArg
ThumbEmitMovConstant(ThumbReg(regHidden), (TADDR)extraArg);
}
}
if (extraArg == NULL)
{
// Unboxing stub case
// Skip over the MethodTable* to find the address of the unboxed value type.
// add r0, #sizeof(MethodTable*)
ThumbEmitIncrement(ThumbReg(0), sizeof(MethodTable*));
}
ThumbEmitTailCallManagedMethod(pSharedMD);
SetTargetMethod(pSharedMD);
}
#endif // !DACCESS_COMPILE
LONG CLRNoCatchHandler(EXCEPTION_POINTERS* pExceptionInfo, PVOID pv)
{
return EXCEPTION_CONTINUE_SEARCH;
}
void UpdateRegDisplayFromCalleeSavedRegisters(REGDISPLAY * pRD, CalleeSavedRegisters * pRegs)
{
LIMITED_METHOD_CONTRACT;
T_CONTEXT * pContext = pRD->pCurrentContext;
pContext->R4 = pRegs->r4;
pContext->R5 = pRegs->r5;
pContext->R6 = pRegs->r6;
pContext->R7 = pRegs->r7;
pContext->R8 = pRegs->r8;
pContext->R9 = pRegs->r9;
pContext->R10 = pRegs->r10;
pContext->R11 = pRegs->r11;
pContext->Lr = pRegs->r14;
T_KNONVOLATILE_CONTEXT_POINTERS * pContextPointers = pRD->pCurrentContextPointers;
pRD->pCurrentContextPointers->R4 = (PDWORD)&pRegs->r4;
pRD->pCurrentContextPointers->R5 = (PDWORD)&pRegs->r5;
pRD->pCurrentContextPointers->R6 = (PDWORD)&pRegs->r6;
pRD->pCurrentContextPointers->R7 = (PDWORD)&pRegs->r7;
pRD->pCurrentContextPointers->R8 = (PDWORD)&pRegs->r8;
pRD->pCurrentContextPointers->R9 = (PDWORD)&pRegs->r9;
pRD->pCurrentContextPointers->R10 = (PDWORD)&pRegs->r10;
pRD->pCurrentContextPointers->R11 = (PDWORD)&pRegs->r11;
pRD->pCurrentContextPointers->Lr = NULL;
}
void TransitionFrame::UpdateRegDisplay(const PREGDISPLAY pRD)
{
pRD->IsCallerContextValid = FALSE;
pRD->IsCallerSPValid = FALSE; // Don't add usage of this field. This is only temporary.
// Copy the saved argument registers into the current context
ArgumentRegisters * pArgRegs = GetArgumentRegisters();
pRD->pCurrentContext->R0 = pArgRegs->r[0];
pRD->pCurrentContext->R1 = pArgRegs->r[1];
pRD->pCurrentContext->R2 = pArgRegs->r[2];
pRD->pCurrentContext->R3 = pArgRegs->r[3];
// Next, copy all the callee saved registers
UpdateRegDisplayFromCalleeSavedRegisters(pRD, GetCalleeSavedRegisters());
// Set ControlPC to be the same as the saved "return address"
// value, which is actually a ControlPC in the frameless method (e.g.
// faulting address incase of AV or TAE).
pRD->pCurrentContext->Pc = GetReturnAddress();
// Set the caller SP
pRD->pCurrentContext->Sp = this->GetSP();
// Finally, syncup the regdisplay with the context
SyncRegDisplayToCurrentContext(pRD);
LOG((LF_GCROOTS, LL_INFO100000, "STACKWALK TransitionFrame::UpdateRegDisplay(rip:%p, rsp:%p)\n", pRD->ControlPC, pRD->SP));
}
void FaultingExceptionFrame::UpdateRegDisplay(const PREGDISPLAY pRD)
{
LIMITED_METHOD_DAC_CONTRACT;
// Copy the context to regdisplay
memcpy(pRD->pCurrentContext, &m_ctx, sizeof(T_CONTEXT));
pRD->ControlPC = ::GetIP(&m_ctx);
pRD->SP = ::GetSP(&m_ctx);
// Update the integer registers in KNONVOLATILE_CONTEXT_POINTERS from
// the exception context we have.
pRD->pCurrentContextPointers->R4 = (PDWORD)&m_ctx.R4;
pRD->pCurrentContextPointers->R5 = (PDWORD)&m_ctx.R5;
pRD->pCurrentContextPointers->R6 = (PDWORD)&m_ctx.R6;
pRD->pCurrentContextPointers->R7 = (PDWORD)&m_ctx.R7;
pRD->pCurrentContextPointers->R8 = (PDWORD)&m_ctx.R8;
pRD->pCurrentContextPointers->R9 = (PDWORD)&m_ctx.R9;
pRD->pCurrentContextPointers->R10 = (PDWORD)&m_ctx.R10;
pRD->pCurrentContextPointers->R11 = (PDWORD)&m_ctx.R11;
pRD->pCurrentContextPointers->Lr = NULL;
pRD->IsCallerContextValid = FALSE;
pRD->IsCallerSPValid = FALSE; // Don't add usage of this field. This is only temporary.
}
void InlinedCallFrame::UpdateRegDisplay(const PREGDISPLAY pRD)
{
CONTRACT_VOID
{
NOTHROW;
GC_NOTRIGGER;
// We should skip over InlinedCallFrame if it is not active.
// It will be part of a JITed method's frame, and the stack-walker
// can handle such a case.
#ifdef PROFILING_SUPPORTED
PRECONDITION(CORProfilerStackSnapshotEnabled() || InlinedCallFrame::FrameHasActiveCall(this));
#endif
HOST_NOCALLS;
MODE_ANY;
SUPPORTS_DAC;
}
CONTRACT_END;
// @TODO: Remove this after the debugger is fixed to avoid stack-walks from bad places
// @TODO: This may be still needed for sampling profilers
if (!InlinedCallFrame::FrameHasActiveCall(this))
{
LOG((LF_CORDB, LL_ERROR, "WARNING: InlinedCallFrame::UpdateRegDisplay called on inactive frame %p\n", this));
return;
}
// reset pContext; it's only valid for active (top-most) frame
pRD->pContext = NULL;
*(pRD->pPC) = m_pCallerReturnAddress;
pRD->SP = (DWORD) dac_cast<TADDR>(m_pCallSiteSP);
pRD->IsCallerContextValid = FALSE;
pRD->IsCallerSPValid = FALSE; // Don't add usage of this field. This is only temporary.
pRD->pCurrentContext->Pc = *(pRD->pPC);
pRD->pCurrentContext->Sp = pRD->SP;
// Update the frame pointer in the current context.
pRD->pCurrentContext->R11 = m_pCalleeSavedFP;
pRD->pCurrentContextPointers->R11 = &m_pCalleeSavedFP;
// This is necessary to unwind methods with alloca. This needs to stay
// in sync with definition of REG_SAVED_LOCALLOC_SP in the JIT.
pRD->pCurrentContext->R9 = (DWORD) dac_cast<TADDR>(m_pSPAfterProlog);
pRD->pCurrentContextPointers->R9 = (DWORD *)&m_pSPAfterProlog;
RETURN;
}
#ifdef FEATURE_HIJACK
TADDR ResumableFrame::GetReturnAddressPtr(void)
{
LIMITED_METHOD_DAC_CONTRACT;
return dac_cast<TADDR>(m_Regs) + offsetof(T_CONTEXT, Pc);
}
void ResumableFrame::UpdateRegDisplay(const PREGDISPLAY pRD)
{
CONTRACT_VOID
{
NOTHROW;
GC_NOTRIGGER;
MODE_ANY;
SUPPORTS_DAC;
}
CONTRACT_END;
CopyMemory(pRD->pCurrentContext, m_Regs, sizeof(T_CONTEXT));
pRD->ControlPC = m_Regs->Pc;
pRD->SP = m_Regs->Sp;
pRD->pCurrentContextPointers->R4 = &m_Regs->R4;
pRD->pCurrentContextPointers->R5 = &m_Regs->R5;
pRD->pCurrentContextPointers->R6 = &m_Regs->R6;
pRD->pCurrentContextPointers->R7 = &m_Regs->R7;
pRD->pCurrentContextPointers->R8 = &m_Regs->R8;
pRD->pCurrentContextPointers->R9 = &m_Regs->R9;
pRD->pCurrentContextPointers->R10 = &m_Regs->R10;
pRD->pCurrentContextPointers->R11 = &m_Regs->R11;
pRD->pCurrentContextPointers->Lr = &m_Regs->Lr;
pRD->volatileCurrContextPointers.R0 = &m_Regs->R0;
pRD->volatileCurrContextPointers.R1 = &m_Regs->R1;
pRD->volatileCurrContextPointers.R2 = &m_Regs->R2;
pRD->volatileCurrContextPointers.R3 = &m_Regs->R3;
pRD->volatileCurrContextPointers.R12 = &m_Regs->R12;
pRD->IsCallerContextValid = FALSE;
pRD->IsCallerSPValid = FALSE; // Don't add usage of this field. This is only temporary.
}
void HijackFrame::UpdateRegDisplay(const PREGDISPLAY pRD)
{
CONTRACTL {
NOTHROW;
GC_NOTRIGGER;
SUPPORTS_DAC;
}
CONTRACTL_END;
pRD->IsCallerContextValid = FALSE;
pRD->IsCallerSPValid = FALSE;
pRD->pCurrentContext->Pc = m_ReturnAddress;
pRD->pCurrentContext->Sp = PTR_TO_TADDR(m_Args) + sizeof(struct HijackArgs);
pRD->pCurrentContext->R0 = m_Args->R0;
pRD->pCurrentContext->R4 = m_Args->R4;
pRD->pCurrentContext->R5 = m_Args->R5;
pRD->pCurrentContext->R6 = m_Args->R6;
pRD->pCurrentContext->R7 = m_Args->R7;
pRD->pCurrentContext->R8 = m_Args->R8;
pRD->pCurrentContext->R9 = m_Args->R9;
pRD->pCurrentContext->R10 = m_Args->R10;
pRD->pCurrentContext->R11 = m_Args->R11;
pRD->pCurrentContextPointers->R4 = &m_Args->R4;
pRD->pCurrentContextPointers->R5 = &m_Args->R5;
pRD->pCurrentContextPointers->R6 = &m_Args->R6;
pRD->pCurrentContextPointers->R7 = &m_Args->R7;
pRD->pCurrentContextPointers->R8 = &m_Args->R8;
pRD->pCurrentContextPointers->R9 = &m_Args->R9;
pRD->pCurrentContextPointers->R10 = &m_Args->R10;
pRD->pCurrentContextPointers->R11 = &m_Args->R11;
pRD->pCurrentContextPointers->Lr = NULL;
SyncRegDisplayToCurrentContext(pRD);
}
#endif // FEATURE_HIJACK
class UMEntryThunk * UMEntryThunk::Decode(void *pCallback)
{
_ASSERTE(offsetof(UMEntryThunkCode, m_code) == 0);
UMEntryThunkCode * pCode = (UMEntryThunkCode*)((ULONG_PTR)pCallback & ~THUMB_CODE);
// We may be called with an unmanaged external code pointer instead. So if it doesn't look like one of our
// stubs (see UMEntryThunkCode::Encode below) then we'll return NULL. Luckily in these scenarios our
// caller will perform a hash lookup on successful return to verify our result in case random unmanaged
// code happens to look like ours.
if ((pCode->m_code[0] == 0xf8df) &&
(pCode->m_code[1] == 0xc008) &&
(pCode->m_code[2] == 0xf8df) &&
(pCode->m_code[3] == 0xf000))
{
return (UMEntryThunk*)pCode->m_pvSecretParam;
}
return NULL;
}
void UMEntryThunkCode::Encode(UMEntryThunkCode *pEntryThunkCodeRX, BYTE* pTargetCode, void* pvSecretParam)
{
// ldr r12, [pc + 8]
m_code[0] = 0xf8df;
m_code[1] = 0xc008;
// ldr pc, [pc]
m_code[2] = 0xf8df;
m_code[3] = 0xf000;
m_pTargetCode = (TADDR)pTargetCode;
m_pvSecretParam = (TADDR)pvSecretParam;
FlushInstructionCache(GetCurrentProcess(),&pEntryThunkCodeRX->m_code,sizeof(m_code));
}
#ifndef DACCESS_COMPILE
void UMEntryThunkCode::Poison()
{
ExecutableWriterHolder<UMEntryThunkCode> thunkWriterHolder(this, sizeof(UMEntryThunkCode));
UMEntryThunkCode *pThisRW = thunkWriterHolder.GetRW();
pThisRW->m_pTargetCode = (TADDR)UMEntryThunk::ReportViolation;
// ldr r0, [pc + 8]
pThisRW->m_code[0] = 0x4802;
// nop
pThisRW->m_code[1] = 0xbf00;
ClrFlushInstructionCache(&m_code,sizeof(m_code));
}
#endif // DACCESS_COMPILE
///////////////////////////// UNIMPLEMENTED //////////////////////////////////
#ifndef DACCESS_COMPILE
extern "C" void STDCALL JIT_PatchedCodeStart();
extern "C" void STDCALL JIT_PatchedCodeLast();
void InitJITHelpers1()
{
STANDARD_VM_CONTRACT;
// Allocation helpers, faster but non-logging.
if (!(TrackAllocationsEnabled()
|| LoggingOn(LF_GCALLOC, LL_INFO10)
#ifdef _DEBUG
|| (g_pConfig->ShouldInjectFault(INJECTFAULT_GCHEAP) != 0)
#endif // _DEBUG
))
{
_ASSERTE(GCHeapUtilities::UseThreadAllocationContexts());
SetJitHelperFunction(CORINFO_HELP_NEWSFAST, JIT_NewS_MP_FastPortable);
SetJitHelperFunction(CORINFO_HELP_NEWARR_1_VC, JIT_NewArr1VC_MP_FastPortable);
SetJitHelperFunction(CORINFO_HELP_NEWARR_1_OBJ, JIT_NewArr1OBJ_MP_FastPortable);
ECall::DynamicallyAssignFCallImpl(GetEEFuncEntryPoint(AllocateString_MP_FastPortable), ECall::FastAllocateString);
}
}
VOID ResetCurrentContext()
{
LIMITED_METHOD_CONTRACT;
}
#endif // !DACCESS_COMPILE
#ifdef FEATURE_COMINTEROP
void emitCOMStubCall (ComCallMethodDesc *pCOMMethodRX, ComCallMethodDesc *pCOMMethodRW, PCODE target)
{
WRAPPER_NO_CONTRACT;
// mov r12, pc
// ldr pc, [pc, #0]
// dcd 0
// dcd target
WORD rgCode[] = {
0x46fc,
0xf8df, 0xf004
};
BYTE *pBufferRX = (BYTE*)pCOMMethodRX - COMMETHOD_CALL_PRESTUB_SIZE;
BYTE *pBufferRW = (BYTE*)pCOMMethodRW - COMMETHOD_CALL_PRESTUB_SIZE;
memcpy(pBufferRW, rgCode, sizeof(rgCode));
*((PCODE*)(pBufferRW + sizeof(rgCode) + 2)) = target;
// Ensure that the updated instructions get actually written
ClrFlushInstructionCache(pBufferRX, COMMETHOD_CALL_PRESTUB_SIZE);
_ASSERTE(IS_ALIGNED(pBufferRX + COMMETHOD_CALL_PRESTUB_ADDRESS_OFFSET, sizeof(void*)) &&
*((PCODE*)(pBufferRX + COMMETHOD_CALL_PRESTUB_ADDRESS_OFFSET)) == target);
}
#endif // FEATURE_COMINTEROP
void MovRegImm(BYTE* p, int reg, TADDR imm)
{
LIMITED_METHOD_CONTRACT;
*(WORD *)(p + 0) = 0xF240;
*(WORD *)(p + 2) = (UINT16)(reg << 8);
*(WORD *)(p + 4) = 0xF2C0;
*(WORD *)(p + 6) = (UINT16)(reg << 8);
PutThumb2Mov32((UINT16 *)p, imm);
}
#ifndef DACCESS_COMPILE
#ifdef FEATURE_READYTORUN
//
// Allocation of dynamic helpers
//
#define DYNAMIC_HELPER_ALIGNMENT sizeof(TADDR)
#define BEGIN_DYNAMIC_HELPER_EMIT(size) \
SIZE_T cb = size; \
SIZE_T cbAligned = ALIGN_UP(cb, DYNAMIC_HELPER_ALIGNMENT); \
BYTE * pStartRX = (BYTE *)(void*)pAllocator->GetDynamicHelpersHeap()->AllocAlignedMem(cbAligned, DYNAMIC_HELPER_ALIGNMENT); \
ExecutableWriterHolder<BYTE> startWriterHolder(pStartRX, cbAligned); \
BYTE * pStart = startWriterHolder.GetRW(); \
size_t rxOffset = pStartRX - pStart; \
BYTE * p = pStart;
#define END_DYNAMIC_HELPER_EMIT() \
_ASSERTE(pStart + cb == p); \
while (p < pStart + cbAligned) { *(WORD *)p = 0xdefe; p += 2; } \
ClrFlushInstructionCache(pStartRX, cbAligned); \
return (PCODE)((TADDR)pStartRX | THUMB_CODE)
PCODE DynamicHelpers::CreateHelper(LoaderAllocator * pAllocator, TADDR arg, PCODE target)
{
STANDARD_VM_CONTRACT;
BEGIN_DYNAMIC_HELPER_EMIT(18);
// mov r0, arg
MovRegImm(p, 0, arg);
p += 8;
// mov r12, target
MovRegImm(p, 12, target);
p += 8;
// bx r12
*(WORD *)p = 0x4760;
p += 2;
END_DYNAMIC_HELPER_EMIT();
}
void DynamicHelpers::EmitHelperWithArg(BYTE*& p, size_t rxOffset, LoaderAllocator * pAllocator, TADDR arg, PCODE target)
{
// mov r1, arg
MovRegImm(p, 1, arg);
p += 8;
// mov r12, target
MovRegImm(p, 12, target);
p += 8;
// bx r12
*(WORD *)p = 0x4760;
p += 2;
}
PCODE DynamicHelpers::CreateHelperWithArg(LoaderAllocator * pAllocator, TADDR arg, PCODE target)
{
BEGIN_DYNAMIC_HELPER_EMIT(18);
EmitHelperWithArg(p, rxOffset, pAllocator, arg, target);
END_DYNAMIC_HELPER_EMIT();
}
PCODE DynamicHelpers::CreateHelper(LoaderAllocator * pAllocator, TADDR arg, TADDR arg2, PCODE target)
{
BEGIN_DYNAMIC_HELPER_EMIT(26);
// mov r0, arg
MovRegImm(p, 0, arg);
p += 8;
// mov r1, arg2
MovRegImm(p, 1, arg2);
p += 8;
// mov r12, target
MovRegImm(p, 12, target);
p += 8;
// bx r12
*(WORD *)p = 0x4760;
p += 2;
END_DYNAMIC_HELPER_EMIT();
}
PCODE DynamicHelpers::CreateHelperArgMove(LoaderAllocator * pAllocator, TADDR arg, PCODE target)
{
BEGIN_DYNAMIC_HELPER_EMIT(20);
// mov r1, r0
*(WORD *)p = 0x4601;
p += 2;
// mov r0, arg
MovRegImm(p, 0, arg);
p += 8;
// mov r12, target
MovRegImm(p, 12, target);
p += 8;
// bx r12
*(WORD *)p = 0x4760;
p += 2;
END_DYNAMIC_HELPER_EMIT();
}
PCODE DynamicHelpers::CreateReturn(LoaderAllocator * pAllocator)
{
BEGIN_DYNAMIC_HELPER_EMIT(2);
*(WORD *)p = 0x4770; // bx lr
p += 2;
END_DYNAMIC_HELPER_EMIT();
}
PCODE DynamicHelpers::CreateReturnConst(LoaderAllocator * pAllocator, TADDR arg)
{
BEGIN_DYNAMIC_HELPER_EMIT(10);
// mov r0, arg
MovRegImm(p, 0, arg);
p += 8;
// bx lr
*(WORD *)p = 0x4770;
p += 2;
END_DYNAMIC_HELPER_EMIT();
}
PCODE DynamicHelpers::CreateReturnIndirConst(LoaderAllocator * pAllocator, TADDR arg, INT8 offset)
{
BEGIN_DYNAMIC_HELPER_EMIT((offset != 0) ? 16 : 12);
// mov r0, arg
MovRegImm(p, 0, arg);
p += 8;
// ldr r0, [r0]
*(WORD *)p = 0x6800;
p += 2;
if (offset != 0)
{
// add r0, r0, <offset>
*(WORD *)(p + 0) = 0xF100;
*(WORD *)(p + 2) = offset;
p += 4;
}
// bx lr
*(WORD *)p = 0x4770;
p += 2;
END_DYNAMIC_HELPER_EMIT();
}
PCODE DynamicHelpers::CreateHelperWithTwoArgs(LoaderAllocator * pAllocator, TADDR arg, PCODE target)
{
BEGIN_DYNAMIC_HELPER_EMIT(18);
// mov r2, arg
MovRegImm(p, 2, arg);
p += 8;
// mov r12, target
MovRegImm(p, 12, target);
p += 8;
// bx r12
*(WORD *)p = 0x4760;
p += 2;
END_DYNAMIC_HELPER_EMIT();
}
PCODE DynamicHelpers::CreateHelperWithTwoArgs(LoaderAllocator * pAllocator, TADDR arg, TADDR arg2, PCODE target)
{
BEGIN_DYNAMIC_HELPER_EMIT(26);
// mov r2, arg
MovRegImm(p, 2, arg);
p += 8;
// mov r3, arg
MovRegImm(p, 3, arg2);
p += 8;
// mov r12, target
MovRegImm(p, 12, target);
p += 8;
// bx r12
*(WORD *)p = 0x4760;
p += 2;
END_DYNAMIC_HELPER_EMIT();
}
PCODE DynamicHelpers::CreateDictionaryLookupHelper(LoaderAllocator * pAllocator, CORINFO_RUNTIME_LOOKUP * pLookup, DWORD dictionaryIndexAndSlot, Module * pModule)
{
STANDARD_VM_CONTRACT;
PCODE helperAddress = (pLookup->helper == CORINFO_HELP_RUNTIMEHANDLE_METHOD ?
GetEEFuncEntryPoint(JIT_GenericHandleMethodWithSlotAndModule) :
GetEEFuncEntryPoint(JIT_GenericHandleClassWithSlotAndModule));
GenericHandleArgs * pArgs = (GenericHandleArgs *)(void *)pAllocator->GetDynamicHelpersHeap()->AllocAlignedMem(sizeof(GenericHandleArgs), DYNAMIC_HELPER_ALIGNMENT);
ExecutableWriterHolder<GenericHandleArgs> argsWriterHolder(pArgs, sizeof(GenericHandleArgs));
argsWriterHolder.GetRW()->dictionaryIndexAndSlot = dictionaryIndexAndSlot;
argsWriterHolder.GetRW()->signature = pLookup->signature;
argsWriterHolder.GetRW()->module = (CORINFO_MODULE_HANDLE)pModule;
WORD slotOffset = (WORD)(dictionaryIndexAndSlot & 0xFFFF) * sizeof(Dictionary*);
// It's available only via the run-time helper function,
if (pLookup->indirections == CORINFO_USEHELPER)
{
BEGIN_DYNAMIC_HELPER_EMIT(18);
EmitHelperWithArg(p, rxOffset, pAllocator, (TADDR)pArgs, helperAddress);
END_DYNAMIC_HELPER_EMIT();
}
else
{
int indirectionsSize = 0;
if (pLookup->sizeOffset != CORINFO_NO_SIZE_CHECK)
{
indirectionsSize += (pLookup->sizeOffset >= 0xFFF ? 10 : 4);
indirectionsSize += 12;
}
for (WORD i = 0; i < pLookup->indirections; i++)
{
indirectionsSize += (pLookup->offsets[i] >= 0xFFF ? 10 : 4);
}
int codeSize = indirectionsSize + (pLookup->testForNull ? 26 : 2);
BEGIN_DYNAMIC_HELPER_EMIT(codeSize);
if (pLookup->testForNull)
{
// mov r3, r0
*(WORD *)p = 0x4603;
p += 2;
}
BYTE* pBLECall = NULL;
for (WORD i = 0; i < pLookup->indirections; i++)
{
if (i == pLookup->indirections - 1 && pLookup->sizeOffset != CORINFO_NO_SIZE_CHECK)
{
_ASSERTE(pLookup->testForNull && i > 0);
if (pLookup->sizeOffset >= 0xFFF)
{
// mov r2, offset
MovRegImm(p, 2, pLookup->sizeOffset); p += 8;
// ldr r1, [r0, r2]
*(WORD*)p = 0x5881; p += 2;
}
else
{
// ldr r1, [r0 + offset]
*(WORD*)p = 0xF8D0; p += 2;
*(WORD*)p = (WORD)(0xFFF & pLookup->sizeOffset) | 0x1000; p += 2;
}
// mov r2, slotOffset
MovRegImm(p, 2, slotOffset); p += 8;
// cmp r1,r2
*(WORD*)p = 0x4291; p += 2;
// ble 'CALL HELPER'
pBLECall = p; // Offset filled later
*(WORD*)p = 0xdd00; p += 2;
}
if (pLookup->offsets[i] >= 0xFFF)
{
// mov r2, offset
MovRegImm(p, 2, pLookup->offsets[i]);
p += 8;
// ldr r0, [r0, r2]
*(WORD *)p = 0x5880;
p += 2;
}
else
{
// ldr r0, [r0 + offset]
*(WORD *)p = 0xF8D0;
p += 2;
*(WORD *)p = (WORD)(0xFFF & pLookup->offsets[i]);
p += 2;
}
}
// No null test required
if (!pLookup->testForNull)
{
_ASSERTE(pLookup->sizeOffset == CORINFO_NO_SIZE_CHECK);
// mov pc, lr
*(WORD *)p = 0x46F7;
p += 2;
}
else
{
// cbz r0, 'CALL HELPER'
*(WORD *)p = 0xB100;
p += 2;
// mov pc, lr
*(WORD *)p = 0x46F7;
p += 2;
// CALL HELPER:
if (pBLECall != NULL)
*(WORD*)pBLECall |= (((BYTE)(p - pBLECall) - 4) >> 1);
// mov r0, r3
*(WORD *)p = 0x4618;
p += 2;
EmitHelperWithArg(p, rxOffset, pAllocator, (TADDR)pArgs, helperAddress);
}
END_DYNAMIC_HELPER_EMIT();
}
}
#endif // FEATURE_READYTORUN
#endif // !DACCESS_COMPILE
| // Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
//
// File: stubs.cpp
//
// This file contains stub functions for unimplemented features need to
// run on the ARM platform.
#include "common.h"
#include "jitinterface.h"
#include "comdelegate.h"
#include "invokeutil.h"
#include "excep.h"
#include "class.h"
#include "field.h"
#include "dllimportcallback.h"
#include "dllimport.h"
#include "eeconfig.h"
#include "cgensys.h"
#include "asmconstants.h"
#include "virtualcallstub.h"
#include "gcdump.h"
#include "rtlfunctions.h"
#include "codeman.h"
#include "ecall.h"
#include "threadsuspend.h"
// target write barriers
EXTERN_C void JIT_WriteBarrier(Object **dst, Object *ref);
EXTERN_C void JIT_WriteBarrier_End();
EXTERN_C void JIT_CheckedWriteBarrier(Object **dst, Object *ref);
EXTERN_C void JIT_CheckedWriteBarrier_End();
EXTERN_C void JIT_ByRefWriteBarrier_End();
EXTERN_C void JIT_ByRefWriteBarrier_SP(Object **dst, Object *ref);
// source write barriers
EXTERN_C void JIT_WriteBarrier_SP_Pre(Object **dst, Object *ref);
EXTERN_C void JIT_WriteBarrier_SP_Pre_End();
EXTERN_C void JIT_WriteBarrier_SP_Post(Object **dst, Object *ref);
EXTERN_C void JIT_WriteBarrier_SP_Post_End();
EXTERN_C void JIT_WriteBarrier_MP_Pre(Object **dst, Object *ref);
EXTERN_C void JIT_WriteBarrier_MP_Pre_End();
EXTERN_C void JIT_WriteBarrier_MP_Post(Object **dst, Object *ref);
EXTERN_C void JIT_WriteBarrier_MP_Post_End();
EXTERN_C void JIT_CheckedWriteBarrier_SP_Pre(Object **dst, Object *ref);
EXTERN_C void JIT_CheckedWriteBarrier_SP_Pre_End();
EXTERN_C void JIT_CheckedWriteBarrier_SP_Post(Object **dst, Object *ref);
EXTERN_C void JIT_CheckedWriteBarrier_SP_Post_End();
EXTERN_C void JIT_CheckedWriteBarrier_MP_Pre(Object **dst, Object *ref);
EXTERN_C void JIT_CheckedWriteBarrier_MP_Pre_End();
EXTERN_C void JIT_CheckedWriteBarrier_MP_Post(Object **dst, Object *ref);
EXTERN_C void JIT_CheckedWriteBarrier_MP_Post_End();
EXTERN_C void JIT_ByRefWriteBarrier_SP_Pre();
EXTERN_C void JIT_ByRefWriteBarrier_SP_Pre_End();
EXTERN_C void JIT_ByRefWriteBarrier_SP_Post();
EXTERN_C void JIT_ByRefWriteBarrier_SP_Post_End();
EXTERN_C void JIT_ByRefWriteBarrier_MP_Pre();
EXTERN_C void JIT_ByRefWriteBarrier_MP_Pre_End();
EXTERN_C void JIT_ByRefWriteBarrier_MP_Post(Object **dst, Object *ref);
EXTERN_C void JIT_ByRefWriteBarrier_MP_Post_End();
EXTERN_C void JIT_PatchedWriteBarrierStart();
EXTERN_C void JIT_PatchedWriteBarrierLast();
#ifndef DACCESS_COMPILE
//-----------------------------------------------------------------------
// InstructionFormat for conditional jump.
//-----------------------------------------------------------------------
class ThumbCondJump : public InstructionFormat
{
public:
ThumbCondJump() : InstructionFormat(InstructionFormat::k16)
{
LIMITED_METHOD_CONTRACT;
}
virtual UINT GetSizeOfInstruction(UINT refsize, UINT variationCode)
{
LIMITED_METHOD_CONTRACT
_ASSERTE(refsize == InstructionFormat::k16);
return 2;
}
virtual UINT GetHotSpotOffset(UINT refsize, UINT variationCode)
{
LIMITED_METHOD_CONTRACT
_ASSERTE(refsize == InstructionFormat::k16);
return 4;
}
//CB{N}Z Rn, <Label>
//Encoding 1|0|1|1|op|0|i|1|imm5|Rn
//op = Bit3(variation)
//Rn = Bits2-0(variation)
virtual VOID EmitInstruction(UINT refsize, __int64 fixedUpReference, BYTE *pOutBufferRX, BYTE *pOutBufferRW, UINT variationCode, BYTE *pDataBuffer)
{
LIMITED_METHOD_CONTRACT
_ASSERTE(refsize == InstructionFormat::k16);
if(fixedUpReference <0 || fixedUpReference > 126)
COMPlusThrow(kNotSupportedException);
_ASSERTE((fixedUpReference & 0x1) == 0);
pOutBufferRW[0] = static_cast<BYTE>(((0x3e & fixedUpReference) << 2) | (0x7 & variationCode));
pOutBufferRW[1] = static_cast<BYTE>(0xb1 | (0x8 & variationCode)| ((0x40 & fixedUpReference)>>5));
}
};
//-----------------------------------------------------------------------
// InstructionFormat for near Jump and short Jump
//-----------------------------------------------------------------------
class ThumbNearJump : public InstructionFormat
{
public:
ThumbNearJump() : InstructionFormat(InstructionFormat::k16|InstructionFormat::k32)
{
LIMITED_METHOD_CONTRACT;
}
virtual UINT GetSizeOfInstruction(UINT refsize, UINT variationCode)
{
LIMITED_METHOD_CONTRACT
if(refsize == InstructionFormat::k16)
return 2;
else if(refsize == InstructionFormat::k32)
return 4;
else
_ASSERTE(!"Unknown refsize");
return 0;
}
virtual VOID EmitInstruction(UINT refsize, __int64 fixedUpReference, BYTE *pOutBufferRX, BYTE *pOutBufferRW, UINT cond, BYTE *pDataBuffer)
{
LIMITED_METHOD_CONTRACT
_ASSERTE(cond <15);
//offsets must be in multiples of 2
_ASSERTE((fixedUpReference & 0x1) == 0);
if(cond == 0xe) //Always execute
{
if(fixedUpReference >= -2048 && fixedUpReference <= 2046)
{
if(refsize != InstructionFormat::k16)
_ASSERTE(!"Expected refSize to be 2");
//Emit T2 encoding of B<c> <label> instruction
pOutBufferRW[0] = static_cast<BYTE>((fixedUpReference & 0x1fe)>>1);
pOutBufferRW[1] = static_cast<BYTE>(0xe0 | ((fixedUpReference & 0xe00)>>9));
}
else if(fixedUpReference >= -16777216 && fixedUpReference <= 16777214)
{
if(refsize != InstructionFormat::k32)
_ASSERTE(!"Expected refSize to be 4");
//Emit T4 encoding of B<c> <label> instruction
int s = (fixedUpReference & 0x1000000) >> 24;
int i1 = (fixedUpReference & 0x800000) >> 23;
int i2 = (fixedUpReference & 0x400000) >> 22;
pOutBufferRW[0] = static_cast<BYTE>((fixedUpReference & 0xff000) >> 12);
pOutBufferRW[1] = static_cast<BYTE>(0xf0 | (s << 2) |( (fixedUpReference & 0x300000) >>20));
pOutBufferRW[2] = static_cast<BYTE>((fixedUpReference & 0x1fe) >> 1);
pOutBufferRW[3] = static_cast<BYTE>(0x90 | (~(i1^s)) << 5 | (~(i2^s)) << 3 | (fixedUpReference & 0xe00) >> 9);
}
else
{
COMPlusThrow(kNotSupportedException);
}
}
else // conditional branch based on flags
{
if(fixedUpReference >= -256 && fixedUpReference <= 254)
{
if(refsize != InstructionFormat::k16)
_ASSERTE(!"Expected refSize to be 2");
//Emit T1 encoding of B<c> <label> instruction
pOutBufferRW[0] = static_cast<BYTE>((fixedUpReference & 0x1fe)>>1);
pOutBufferRW[1] = static_cast<BYTE>(0xd0 | (cond & 0xf));
}
else if(fixedUpReference >= -1048576 && fixedUpReference <= 1048574)
{
if(refsize != InstructionFormat::k32)
_ASSERTE(!"Expected refSize to be 4");
//Emit T3 encoding of B<c> <label> instruction
pOutBufferRW[0] = static_cast<BYTE>(((cond & 0x3) << 6) | ((fixedUpReference & 0x3f000) >>12));
pOutBufferRW[1] = static_cast<BYTE>(0xf0 | ((fixedUpReference & 0x100000) >>18) | ((cond & 0xc) >> 2));
pOutBufferRW[2] = static_cast<BYTE>((fixedUpReference & 0x1fe) >> 1);
pOutBufferRW[3] = static_cast<BYTE>(0x80 | ((fixedUpReference & 0x40000) >> 13) | ((fixedUpReference & 0x80000) >> 16) | ((fixedUpReference & 0xe00) >> 9));
}
else
{
COMPlusThrow(kNotSupportedException);
}
}
}
virtual BOOL CanReach(UINT refsize, UINT variationCode, BOOL fExternal, INT_PTR offset)
{
LIMITED_METHOD_CONTRACT
if (fExternal)
{
_ASSERTE(0);
return FALSE;
}
else
{
switch (refsize)
{
case InstructionFormat::k16:
if(variationCode == 0xe)
return (offset >= -2048 && offset <= 2046 && (offset & 0x1) == 0);
else
return (offset >= -256 && offset <= 254 && (offset & 0x1) == 0);
case InstructionFormat::k32:
if(variationCode == 0xe)
return ((offset >= -16777216) && (offset <= 16777214) && ((offset & 0x1) == 0));
else
return ((offset >= -1048576) && (offset <= 1048574) && ((offset & 0x1) == 0));
default:
_ASSERTE(!"Unknown refsize");
return FALSE;
}
}
}
virtual UINT GetHotSpotOffset(UINT refsize, UINT variationCode)
{
LIMITED_METHOD_CONTRACT
_ASSERTE(refsize == InstructionFormat::k16 || refsize == InstructionFormat::k32);
return 4;
}
};
//static conditional jump instruction format object
static BYTE gThumbCondJump[sizeof(ThumbCondJump)];
//static near jump instruction format object
static BYTE gThumbNearJump[sizeof(ThumbNearJump)];
void StubLinkerCPU::Init(void)
{
//Initialize the object
new (gThumbCondJump) ThumbCondJump();
new (gThumbNearJump) ThumbNearJump();
}
// GC write barrier support.
//
// To optimize our write barriers we code the values of several GC globals (e.g. g_lowest_address) directly
// into the barrier function itself, thus avoiding a double memory indirection. Every time the GC modifies one
// of these globals we need to update all of the write barriers accordingly.
//
// In order to keep this process non-brittle we don't hard code the offsets of the instructions that need to
// be changed. Instead the code used to create these barriers is implemented using special macros that record
// the necessary offsets in a descriptor table. Search for "GC write barrier support" in vm\arm\asmhelpers.asm
// for more details.
// Structure describing the layout of a single write barrier descriptor. This must be kept in sync with the
// code in vm\arm\asmhelpers.asm in the WRITE_BARRIER_END macro. Each offset recorded is for one of the
// supported GC globals (an offset of 0xffff is encoded if that global is not used by the particular barrier
// function). We currently only support one usage of each global by any single barrier function. The offset is
// the byte offset from the start of the function at which a movw,movt instruction pair is used to load the
// value of the global into a register.
struct WriteBarrierDescriptor
{
#ifdef TARGET_UNIX
DWORD m_funcStartOffset; // Offset to the start of the barrier function relative to this struct address
DWORD m_funcEndOffset; // Offset to the end of the barrier function relative to this struct address
#else // TARGET_UNIX
BYTE * m_pFuncStart; // Pointer to the start of the barrier function
BYTE * m_pFuncEnd; // Pointer to the end of the barrier function
#endif // TARGET_UNIX
DWORD m_dw_g_lowest_address_offset; // Offset of the instruction reading g_lowest_address
DWORD m_dw_g_highest_address_offset; // Offset of the instruction reading g_highest_address
DWORD m_dw_g_ephemeral_low_offset; // Offset of the instruction reading g_ephemeral_low
DWORD m_dw_g_ephemeral_high_offset; // Offset of the instruction reading g_ephemeral_high
DWORD m_dw_g_card_table_offset; // Offset of the instruction reading g_card_table
};
// Infrastructure used for mapping of the source and destination of current WB patching
struct WriteBarrierMapping
{
PBYTE to; // Pointer to the write-barrier where it was copied over
PBYTE from; // Pointer to write-barrier from which it was copied
};
const int WriteBarrierIndex = 0;
const int CheckedWriteBarrierIndex = 1;
const int ByRefWriteBarrierIndex = 2;
const int MaxWriteBarrierIndex = 3;
WriteBarrierMapping wbMapping[MaxWriteBarrierIndex] =
{
{(PBYTE)JIT_WriteBarrier, NULL},
{(PBYTE)JIT_CheckedWriteBarrier, NULL},
{(PBYTE)JIT_ByRefWriteBarrier, NULL}
};
PBYTE FindWBMapping(PBYTE from)
{
for(int i = 0; i < MaxWriteBarrierIndex; ++i)
{
if(wbMapping[i].from == from)
return wbMapping[i].to;
}
return NULL;
}
// Pointer to the start of the descriptor table. The end of the table is marked by a sentinel entry
// (m_pFuncStart is NULL).
EXTERN_C WriteBarrierDescriptor g_rgWriteBarrierDescriptors;
// Determine the range of memory containing all the write barrier implementations (these are clustered
// together and should fit in a page or maybe two).
void ComputeWriteBarrierRange(BYTE ** ppbStart, DWORD * pcbLength)
{
DWORD size = (PBYTE)JIT_PatchedWriteBarrierLast - (PBYTE)JIT_PatchedWriteBarrierStart;
*ppbStart = (PBYTE)JIT_PatchedWriteBarrierStart;
if (IsWriteBarrierCopyEnabled())
{
*ppbStart = GetWriteBarrierCodeLocation(*ppbStart);
}
*pcbLength = size;
}
void CopyWriteBarrier(PCODE dstCode, PCODE srcCode, PCODE endCode)
{
TADDR dst = (TADDR)PCODEToPINSTR((PCODE)GetWriteBarrierCodeLocation((void*)dstCode));
TADDR src = PCODEToPINSTR(srcCode);
TADDR end = PCODEToPINSTR(endCode);
size_t size = (PBYTE)end - (PBYTE)src;
ExecutableWriterHolderNoLog<void> writeBarrierWriterHolder;
if (IsWriteBarrierCopyEnabled())
{
writeBarrierWriterHolder.AssignExecutableWriterHolder((void*)dst, size);
dst = (TADDR)writeBarrierWriterHolder.GetRW();
}
memcpy((PVOID)dst, (PVOID)src, size);
}
#if _DEBUG
void ValidateWriteBarriers()
{
// Post-grow WB are bigger than pre-grow so validating that target WB has space to accomodate those
_ASSERTE( ((PBYTE)JIT_WriteBarrier_End - (PBYTE)JIT_WriteBarrier) >= ((PBYTE)JIT_WriteBarrier_MP_Post_End - (PBYTE)JIT_WriteBarrier_MP_Post));
_ASSERTE( ((PBYTE)JIT_WriteBarrier_End - (PBYTE)JIT_WriteBarrier) >= ((PBYTE)JIT_WriteBarrier_SP_Post_End - (PBYTE)JIT_WriteBarrier_SP_Post));
_ASSERTE( ((PBYTE)JIT_CheckedWriteBarrier_End - (PBYTE)JIT_CheckedWriteBarrier) >= ((PBYTE)JIT_CheckedWriteBarrier_MP_Post_End - (PBYTE)JIT_CheckedWriteBarrier_MP_Post));
_ASSERTE( ((PBYTE)JIT_CheckedWriteBarrier_End - (PBYTE)JIT_CheckedWriteBarrier) >= ((PBYTE)JIT_CheckedWriteBarrier_SP_Post_End - (PBYTE)JIT_CheckedWriteBarrier_SP_Post));
_ASSERTE( ((PBYTE)JIT_ByRefWriteBarrier_End - (PBYTE)JIT_ByRefWriteBarrier) >= ((PBYTE)JIT_ByRefWriteBarrier_MP_Post_End - (PBYTE)JIT_ByRefWriteBarrier_MP_Post));
_ASSERTE( ((PBYTE)JIT_ByRefWriteBarrier_End - (PBYTE)JIT_ByRefWriteBarrier) >= ((PBYTE)JIT_ByRefWriteBarrier_SP_Post_End - (PBYTE)JIT_ByRefWriteBarrier_SP_Post));
}
#endif // _DEBUG
#define UPDATE_WB(_proc,_grow) \
CopyWriteBarrier((PCODE)JIT_WriteBarrier, (PCODE)JIT_WriteBarrier_ ## _proc ## _ ## _grow , (PCODE)JIT_WriteBarrier_ ## _proc ## _ ## _grow ## _End); \
wbMapping[WriteBarrierIndex].from = (PBYTE)JIT_WriteBarrier_ ## _proc ## _ ## _grow ; \
\
CopyWriteBarrier((PCODE)JIT_CheckedWriteBarrier, (PCODE)JIT_CheckedWriteBarrier_ ## _proc ## _ ## _grow , (PCODE)JIT_CheckedWriteBarrier_ ## _proc ## _ ## _grow ## _End); \
wbMapping[CheckedWriteBarrierIndex].from = (PBYTE)JIT_CheckedWriteBarrier_ ## _proc ## _ ## _grow ; \
\
CopyWriteBarrier((PCODE)JIT_ByRefWriteBarrier, (PCODE)JIT_ByRefWriteBarrier_ ## _proc ## _ ## _grow , (PCODE)JIT_ByRefWriteBarrier_ ## _proc ## _ ## _grow ## _End); \
wbMapping[ByRefWriteBarrierIndex].from = (PBYTE)JIT_ByRefWriteBarrier_ ## _proc ## _ ## _grow ; \
// Update the instructions in our various write barrier implementations that refer directly to the values
// of GC globals such as g_lowest_address and g_card_table. We don't particularly care which values have
// changed on each of these callbacks, it's pretty cheap to refresh them all.
void UpdateGCWriteBarriers(bool postGrow = false)
{
// Define a helper macro that abstracts the minutia of patching the instructions to access the value of a
// particular GC global.
#if _DEBUG
ValidateWriteBarriers();
#endif // _DEBUG
static bool wbCopyRequired = true; // We begin with a wb copy
static bool wbIsPostGrow = false; // We begin with pre-Grow write barrier
if(postGrow && !wbIsPostGrow)
{
wbIsPostGrow = true;
wbCopyRequired = true;
}
if(wbCopyRequired)
{
BOOL mp = g_SystemInfo.dwNumberOfProcessors > 1;
if(mp)
{
if(wbIsPostGrow)
{
UPDATE_WB(MP,Post);
}
else
{
UPDATE_WB(MP,Pre);
}
}
else
{
if(wbIsPostGrow)
{
UPDATE_WB(SP,Post);
}
else
{
UPDATE_WB(SP,Pre);
}
}
wbCopyRequired = false;
}
#define GWB_PATCH_OFFSET(_global) \
if (pDesc->m_dw_##_global##_offset != 0xffff) \
PutThumb2Mov32((UINT16*)(to + pDesc->m_dw_##_global##_offset), (UINT32)(dac_cast<TADDR>(_global)));
// Iterate through the write barrier patch table created in the .clrwb section
// (see write barrier asm code)
WriteBarrierDescriptor * pDesc = &g_rgWriteBarrierDescriptors;
#ifdef TARGET_UNIX
while (pDesc->m_funcStartOffset)
#else // TARGET_UNIX
while (pDesc->m_pFuncStart)
#endif // TARGET_UNIX
{
// If the write barrier is being currently used (as in copied over to the patchable site)
// then read the patch location from the table and use the offset to patch the target asm code
#ifdef TARGET_UNIX
PBYTE to = FindWBMapping((BYTE *)pDesc + pDesc->m_funcStartOffset);
size_t barrierSize = pDesc->m_funcEndOffset - pDesc->m_funcStartOffset;
#else // TARGET_UNIX
PBYTE to = FindWBMapping(pDesc->m_pFuncStart);
size_t barrierSize = pDesc->m_pFuncEnd - pDesc->m_pFuncStart;
#endif // TARGET_UNIX
if(to)
{
to = (PBYTE)PCODEToPINSTR((PCODE)GetWriteBarrierCodeLocation(to));
ExecutableWriterHolderNoLog<BYTE> barrierWriterHolder;
if (IsWriteBarrierCopyEnabled())
{
barrierWriterHolder.AssignExecutableWriterHolder(to, barrierSize);
to = barrierWriterHolder.GetRW();
}
GWB_PATCH_OFFSET(g_lowest_address);
GWB_PATCH_OFFSET(g_highest_address);
GWB_PATCH_OFFSET(g_ephemeral_low);
GWB_PATCH_OFFSET(g_ephemeral_high);
GWB_PATCH_OFFSET(g_card_table);
}
pDesc++;
}
}
int StompWriteBarrierResize(bool isRuntimeSuspended, bool bReqUpperBoundsCheck)
{
// The runtime is not always suspended when this is called (unlike StompWriteBarrierEphemeral) but we have
// no way to update the barrier code atomically on ARM since each 32-bit value we change is loaded over
// two instructions. So we have to suspend the EE (which forces code out of the barrier functions) before
// proceeding. Luckily the case where the runtime is not already suspended is relatively rare (allocation
// of a new large object heap segment). Skip the suspend for the case where we're called during runtime
// startup.
// suspend/resuming the EE under GC stress will trigger a GC and if we're holding the
// GC lock due to allocating a LOH segment it will cause a deadlock so disable it here.
GCStressPolicy::InhibitHolder iholder;
int stompWBCompleteActions = SWB_ICACHE_FLUSH;
if (!isRuntimeSuspended)
{
ThreadSuspend::SuspendEE(ThreadSuspend::SUSPEND_OTHER);
stompWBCompleteActions |= SWB_EE_RESTART;
}
UpdateGCWriteBarriers(bReqUpperBoundsCheck);
return stompWBCompleteActions;
}
int StompWriteBarrierEphemeral(bool isRuntimeSuspended)
{
UNREFERENCED_PARAMETER(isRuntimeSuspended);
_ASSERTE(isRuntimeSuspended);
UpdateGCWriteBarriers();
return SWB_ICACHE_FLUSH;
}
void FlushWriteBarrierInstructionCache()
{
// We've changed code so we must flush the instruction cache.
BYTE *pbAlteredRange;
DWORD cbAlteredRange;
ComputeWriteBarrierRange(&pbAlteredRange, &cbAlteredRange);
FlushInstructionCache(GetCurrentProcess(), pbAlteredRange, cbAlteredRange);
}
#endif // !DACCESS_COMPILE
void LazyMachState::unwindLazyState(LazyMachState* baseState,
MachState* unwoundstate,
DWORD threadId,
int funCallDepth,
HostCallPreference hostCallPreference)
{
T_CONTEXT ctx;
T_KNONVOLATILE_CONTEXT_POINTERS nonVolRegPtrs;
ctx.ContextFlags = 0; // Read by PAL_VirtualUnwind.
ctx.Pc = baseState->captureIp;
ctx.Sp = baseState->captureSp;
ctx.R4 = unwoundstate->captureR4_R11[0] = baseState->captureR4_R11[0];
ctx.R5 = unwoundstate->captureR4_R11[1] = baseState->captureR4_R11[1];
ctx.R6 = unwoundstate->captureR4_R11[2] = baseState->captureR4_R11[2];
ctx.R7 = unwoundstate->captureR4_R11[3] = baseState->captureR4_R11[3];
ctx.R8 = unwoundstate->captureR4_R11[4] = baseState->captureR4_R11[4];
ctx.R9 = unwoundstate->captureR4_R11[5] = baseState->captureR4_R11[5];
ctx.R10 = unwoundstate->captureR4_R11[6] = baseState->captureR4_R11[6];
ctx.R11 = unwoundstate->captureR4_R11[7] = baseState->captureR4_R11[7];
#if !defined(DACCESS_COMPILE)
// For DAC, if we get here, it means that the LazyMachState is uninitialized and we have to unwind it.
// The API we use to unwind in DAC is StackWalk64(), which does not support the context pointers.
//
// Restore the integer registers to KNONVOLATILE_CONTEXT_POINTERS to be used for unwinding.
nonVolRegPtrs.R4 = &unwoundstate->captureR4_R11[0];
nonVolRegPtrs.R5 = &unwoundstate->captureR4_R11[1];
nonVolRegPtrs.R6 = &unwoundstate->captureR4_R11[2];
nonVolRegPtrs.R7 = &unwoundstate->captureR4_R11[3];
nonVolRegPtrs.R8 = &unwoundstate->captureR4_R11[4];
nonVolRegPtrs.R9 = &unwoundstate->captureR4_R11[5];
nonVolRegPtrs.R10 = &unwoundstate->captureR4_R11[6];
nonVolRegPtrs.R11 = &unwoundstate->captureR4_R11[7];
#endif // DACCESS_COMPILE
LOG((LF_GCROOTS, LL_INFO100000, "STACKWALK LazyMachState::unwindLazyState(ip:%p,sp:%p)\n", baseState->captureIp, baseState->captureSp));
PCODE pvControlPc;
do
{
#ifndef TARGET_UNIX
pvControlPc = Thread::VirtualUnwindCallFrame(&ctx, &nonVolRegPtrs);
#else // !TARGET_UNIX
#ifdef DACCESS_COMPILE
HRESULT hr = DacVirtualUnwind(threadId, &ctx, &nonVolRegPtrs);
if (FAILED(hr))
{
DacError(hr);
}
#else // DACCESS_COMPILE
BOOL success = PAL_VirtualUnwind(&ctx, &nonVolRegPtrs);
if (!success)
{
_ASSERTE(!"unwindLazyState: Unwinding failed");
EEPOLICY_HANDLE_FATAL_ERROR(COR_E_EXECUTIONENGINE);
}
#endif // DACCESS_COMPILE
pvControlPc = GetIP(&ctx);
#endif // !TARGET_UNIX
if (funCallDepth > 0)
{
--funCallDepth;
if (funCallDepth == 0)
break;
}
else
{
// Determine whether given IP resides in JITted code. (It returns nonzero in that case.)
// Use it now to see if we've unwound to managed code yet.
BOOL fFailedReaderLock = FALSE;
BOOL fIsManagedCode = ExecutionManager::IsManagedCode(pvControlPc, hostCallPreference, &fFailedReaderLock);
if (fFailedReaderLock)
{
// We don't know if we would have been able to find a JIT
// manager, because we couldn't enter the reader lock without
// yielding (and our caller doesn't want us to yield). So abort
// now.
// Invalidate the lazyState we're returning, so the caller knows
// we aborted before we could fully unwind
unwoundstate->_isValid = false;
return;
}
if (fIsManagedCode)
break;
}
}
while(TRUE);
//
// Update unwoundState so that HelperMethodFrameRestoreState knows which
// registers have been potentially modified.
//
unwoundstate->_pc = ctx.Pc;
unwoundstate->_sp = ctx.Sp;
#ifdef DACCESS_COMPILE
// For DAC builds, we update the registers directly since we dont have context pointers
unwoundstate->captureR4_R11[0] = ctx.R4;
unwoundstate->captureR4_R11[1] = ctx.R5;
unwoundstate->captureR4_R11[2] = ctx.R6;
unwoundstate->captureR4_R11[3] = ctx.R7;
unwoundstate->captureR4_R11[4] = ctx.R8;
unwoundstate->captureR4_R11[5] = ctx.R9;
unwoundstate->captureR4_R11[6] = ctx.R10;
unwoundstate->captureR4_R11[7] = ctx.R11;
#else // !DACCESS_COMPILE
// For non-DAC builds, update the register state from context pointers
unwoundstate->_R4_R11[0] = (PDWORD)nonVolRegPtrs.R4;
unwoundstate->_R4_R11[1] = (PDWORD)nonVolRegPtrs.R5;
unwoundstate->_R4_R11[2] = (PDWORD)nonVolRegPtrs.R6;
unwoundstate->_R4_R11[3] = (PDWORD)nonVolRegPtrs.R7;
unwoundstate->_R4_R11[4] = (PDWORD)nonVolRegPtrs.R8;
unwoundstate->_R4_R11[5] = (PDWORD)nonVolRegPtrs.R9;
unwoundstate->_R4_R11[6] = (PDWORD)nonVolRegPtrs.R10;
unwoundstate->_R4_R11[7] = (PDWORD)nonVolRegPtrs.R11;
#endif // DACCESS_COMPILE
unwoundstate->_isValid = true;
}
void HelperMethodFrame::UpdateRegDisplay(const PREGDISPLAY pRD)
{
CONTRACTL
{
NOTHROW;
GC_NOTRIGGER;
MODE_ANY;
SUPPORTS_DAC;
}
CONTRACTL_END;
pRD->IsCallerContextValid = FALSE;
pRD->IsCallerSPValid = FALSE; // Don't add usage of this field. This is only temporary.
//
// Copy the saved state from the frame to the current context.
//
LOG((LF_GCROOTS, LL_INFO100000, "STACKWALK HelperMethodFrame::UpdateRegDisplay cached ip:%p, sp:%p\n", m_MachState._pc, m_MachState._sp));
#if defined(DACCESS_COMPILE)
// For DAC, we may get here when the HMF is still uninitialized.
// So we may need to unwind here.
if (!m_MachState.isValid())
{
// This allocation throws on OOM.
MachState* pUnwoundState = (MachState*)DacAllocHostOnlyInstance(sizeof(*pUnwoundState), true);
InsureInit(false, pUnwoundState);
pRD->pCurrentContext->Pc = pRD->ControlPC = pUnwoundState->_pc;
pRD->pCurrentContext->Sp = pRD->SP = pUnwoundState->_sp;
pRD->pCurrentContext->R4 = (DWORD)(pUnwoundState->captureR4_R11[0]);
pRD->pCurrentContext->R5 = (DWORD)(pUnwoundState->captureR4_R11[1]);
pRD->pCurrentContext->R6 = (DWORD)(pUnwoundState->captureR4_R11[2]);
pRD->pCurrentContext->R7 = (DWORD)(pUnwoundState->captureR4_R11[3]);
pRD->pCurrentContext->R8 = (DWORD)(pUnwoundState->captureR4_R11[4]);
pRD->pCurrentContext->R9 = (DWORD)(pUnwoundState->captureR4_R11[5]);
pRD->pCurrentContext->R10 = (DWORD)(pUnwoundState->captureR4_R11[6]);
pRD->pCurrentContext->R11 = (DWORD)(pUnwoundState->captureR4_R11[7]);
return;
}
#endif // DACCESS_COMPILE
// reset pContext; it's only valid for active (top-most) frame
pRD->pContext = NULL;
pRD->ControlPC = GetReturnAddress();
pRD->SP = (DWORD)(size_t)m_MachState._sp;
pRD->pCurrentContext->Pc = pRD->ControlPC;
pRD->pCurrentContext->Sp = pRD->SP;
pRD->pCurrentContext->R4 = *m_MachState._R4_R11[0];
pRD->pCurrentContext->R5 = *m_MachState._R4_R11[1];
pRD->pCurrentContext->R6 = *m_MachState._R4_R11[2];
pRD->pCurrentContext->R7 = *m_MachState._R4_R11[3];
pRD->pCurrentContext->R8 = *m_MachState._R4_R11[4];
pRD->pCurrentContext->R9 = *m_MachState._R4_R11[5];
pRD->pCurrentContext->R10 = *m_MachState._R4_R11[6];
pRD->pCurrentContext->R11 = *m_MachState._R4_R11[7];
pRD->pCurrentContextPointers->R4 = m_MachState._R4_R11[0];
pRD->pCurrentContextPointers->R5 = m_MachState._R4_R11[1];
pRD->pCurrentContextPointers->R6 = m_MachState._R4_R11[2];
pRD->pCurrentContextPointers->R7 = m_MachState._R4_R11[3];
pRD->pCurrentContextPointers->R8 = m_MachState._R4_R11[4];
pRD->pCurrentContextPointers->R9 = m_MachState._R4_R11[5];
pRD->pCurrentContextPointers->R10 = m_MachState._R4_R11[6];
pRD->pCurrentContextPointers->R11 = m_MachState._R4_R11[7];
pRD->pCurrentContextPointers->Lr = NULL;
}
#ifndef DACCESS_COMPILE
void ThisPtrRetBufPrecode::Init(MethodDesc* pMD, LoaderAllocator *pLoaderAllocator)
{
WRAPPER_NO_CONTRACT;
int n = 0;
m_rgCode[n++] = 0x4684; // mov r12, r0
m_rgCode[n++] = 0x4608; // mov r0, r1
m_rgCode[n++] = 0xea4f; // mov r1, r12
m_rgCode[n++] = 0x010c;
m_rgCode[n++] = 0xf8df; // ldr pc, [pc, #0]
m_rgCode[n++] = 0xf000;
_ASSERTE(n == ARRAY_SIZE(m_rgCode));
m_pTarget = GetPreStubEntryPoint();
m_pMethodDesc = (TADDR)pMD;
}
/*
Rough pseudo-code of interface dispatching:
// jitted code sets r0, r4:
r0 = object;
r4 = indirectionCell;
// jitted code calls *indirectionCell
switch (*indirectionCell)
{
case LookupHolder._stub:
// ResolveWorkerAsmStub:
*indirectionCell = DispatchHolder._stub;
call ResolveWorkerStatic, jump to target method;
case DispatchHolder._stub:
if (r0.methodTable == expectedMethodTable) jump to target method;
// ResolveHolder._stub._failEntryPoint:
jump to case ResolveHolder._stub._resolveEntryPoint;
case ResolveHolder._stub._resolveEntryPoint:
if (r0.methodTable in hashTable) jump to target method;
// ResolveHolder._stub._slowEntryPoint:
// ResolveWorkerChainLookupAsmStub:
// ResolveWorkerAsmStub:
if (_failEntryPoint called too many times) *indirectionCell = ResolveHolder._stub._resolveEntryPoint;
call ResolveWorkerStatic, jump to target method;
}
Note that ResolveWorkerChainLookupAsmStub currently points directly
to ResolveWorkerAsmStub; in the future, this could be separate.
*/
void LookupHolder::Initialize(LookupHolder* pLookupHolderRX, PCODE resolveWorkerTarget, size_t dispatchToken)
{
// Called directly by JITTED code
// See ResolveWorkerAsmStub
// ldr r12, [pc + 8] ; #_token
_stub._entryPoint[0] = 0xf8df;
_stub._entryPoint[1] = 0xc008;
// ldr pc, [pc] ; #_resolveWorkerTarget
_stub._entryPoint[2] = 0xf8df;
_stub._entryPoint[3] = 0xf000;
_stub._resolveWorkerTarget = resolveWorkerTarget;
_stub._token = dispatchToken;
_ASSERTE(4 == LookupStub::entryPointLen);
}
void DispatchHolder::Initialize(DispatchHolder* pDispatchHolderRX, PCODE implTarget, PCODE failTarget, size_t expectedMT)
{
// Called directly by JITTED code
// DispatchHolder._stub._entryPoint(r0:object, r1, r2, r3, r4:IndirectionCell)
// {
// if (r0.methodTable == this._expectedMT) (this._implTarget)(r0, r1, r2, r3);
// else (this._failTarget)(r0, r1, r2, r3, r4);
// }
int n = 0;
WORD offset;
// We rely on the stub entry-point being DWORD aligned (so we can tell whether any subsequent WORD is
// DWORD-aligned or not, which matters in the calculation of PC-relative offsets).
_ASSERTE(((UINT_PTR)_stub._entryPoint & 0x3) == 0);
// Compute a PC-relative offset for use in an instruction encoding. Must call this prior to emitting the
// instruction halfword to which it applies. For thumb-2 encodings the offset must be computed before emitting
// the first of the halfwords.
#undef PC_REL_OFFSET
#define PC_REL_OFFSET(_field) (WORD)(offsetof(DispatchStub, _field) - ((offsetof(DispatchStub, _entryPoint) + sizeof(*DispatchStub::_entryPoint) * (n + 2)) & 0xfffffffc))
// r0 : object. It can be null as well.
// when it is null the code causes an AV. This AV is seen by the VM's personality routine
// and it converts it into nullRef. We want the AV to happen before modifying the stack so that we can get the
// call stack in windbg at the point of AV. So therefore "ldr r12, [r0]" should be the first instruction.
// ldr r12, [r0 + #Object.m_pMethTab]
_stub._entryPoint[n++] = DISPATCH_STUB_FIRST_WORD;
_stub._entryPoint[n++] = 0xc000;
// push {r5}
_stub._entryPoint[n++] = 0xb420;
// ldr r5, [pc + #_expectedMT]
offset = PC_REL_OFFSET(_expectedMT);
_ASSERTE((offset & 0x3) == 0);
_stub._entryPoint[n++] = 0x4d00 | (offset >> 2);
// cmp r5, r12
_stub._entryPoint[n++] = 0x4565;
// pop {r5}
_stub._entryPoint[n++] = 0xbc20;
// bne failTarget
_stub._entryPoint[n++] = 0xd101;
// ldr pc, [pc + #_implTarget]
offset = PC_REL_OFFSET(_implTarget);
_stub._entryPoint[n++] = 0xf8df;
_stub._entryPoint[n++] = 0xf000 | offset;
// failTarget:
// ldr pc, [pc + #_failTarget]
offset = PC_REL_OFFSET(_failTarget);
_stub._entryPoint[n++] = 0xf8df;
_stub._entryPoint[n++] = 0xf000 | offset;
// nop - insert padding
_stub._entryPoint[n++] = 0xbf00;
_ASSERTE(n == DispatchStub::entryPointLen);
// Make sure that the data members below are aligned
_ASSERTE((n & 1) == 0);
_stub._expectedMT = DWORD(expectedMT);
_stub._failTarget = failTarget;
_stub._implTarget = implTarget;
}
void ResolveHolder::Initialize(ResolveHolder* pResolveHolderRX,
PCODE resolveWorkerTarget, PCODE patcherTarget,
size_t dispatchToken, UINT32 hashedToken,
void * cacheAddr, INT32 * counterAddr)
{
// Called directly by JITTED code
// ResolveStub._resolveEntryPoint(r0:Object*, r1, r2, r3, r4:IndirectionCellAndFlags)
// {
// MethodTable mt = r0.m_pMethTab;
// int i = ((mt + mt >> 12) ^ this._hashedToken) & this._cacheMask
// ResolveCacheElem e = this._cacheAddress + i
// do
// {
// if (mt == e.pMT && this._token == e.token) (e.target)(r0, r1, r2, r3);
// e = e.pNext;
// } while (e != null)
// (this._slowEntryPoint)(r0, r1, r2, r3, r4);
// }
//
int n = 0;
WORD offset;
// We rely on the stub entry-point being DWORD aligned (so we can tell whether any subsequent WORD is
// DWORD-aligned or not, which matters in the calculation of PC-relative offsets).
_ASSERTE(((UINT_PTR)_stub._resolveEntryPoint & 0x3) == 0);
// Compute a PC-relative offset for use in an instruction encoding. Must call this prior to emitting the
// instruction halfword to which it applies. For thumb-2 encodings the offset must be computed before emitting
// the first of the halfwords.
#undef PC_REL_OFFSET
#define PC_REL_OFFSET(_field) (WORD)(offsetof(ResolveStub, _field) - ((offsetof(ResolveStub, _resolveEntryPoint) + sizeof(*ResolveStub::_resolveEntryPoint) * (n + 2)) & 0xfffffffc))
// ldr r12, [r0 + #Object.m_pMethTab]
_stub._resolveEntryPoint[n++] = RESOLVE_STUB_FIRST_WORD;
_stub._resolveEntryPoint[n++] = 0xc000;
// ;; We need two scratch registers, r5 and r6
// push {r5,r6}
_stub._resolveEntryPoint[n++] = 0xb460;
// ;; Compute i = ((mt + mt >> 12) ^ this._hashedToken) & this._cacheMask
// add r6, r12, r12 lsr #12
_stub._resolveEntryPoint[n++] = 0xeb0c;
_stub._resolveEntryPoint[n++] = 0x361c;
// ldr r5, [pc + #_hashedToken]
offset = PC_REL_OFFSET(_hashedToken);
_ASSERTE((offset & 0x3) == 0);
_stub._resolveEntryPoint[n++] = 0x4d00 | (offset >> 2);
// eor r6, r6, r5
_stub._resolveEntryPoint[n++] = 0xea86;
_stub._resolveEntryPoint[n++] = 0x0605;
// ldr r5, [pc + #_cacheMask]
offset = PC_REL_OFFSET(_cacheMask);
_ASSERTE((offset & 0x3) == 0);
_stub._resolveEntryPoint[n++] = 0x4d00 | (offset >> 2);
// and r6, r6, r5
_stub._resolveEntryPoint[n++] = 0xea06;
_stub._resolveEntryPoint[n++] = 0x0605;
// ;; ResolveCacheElem e = this._cacheAddress + i
// ldr r5, [pc + #_cacheAddress]
offset = PC_REL_OFFSET(_cacheAddress);
_ASSERTE((offset & 0x3) == 0);
_stub._resolveEntryPoint[n++] = 0x4d00 | (offset >> 2);
// ldr r6, [r5 + r6] ;; r6 = e = this._cacheAddress + i
_stub._resolveEntryPoint[n++] = 0x59ae;
// ;; do {
int loop = n;
// ;; Check mt == e.pMT
// ldr r5, [r6 + #ResolveCacheElem.pMT]
offset = offsetof(ResolveCacheElem, pMT);
_ASSERTE(offset <= 124 && (offset & 0x3) == 0);
_stub._resolveEntryPoint[n++] = 0x6835 | (offset<< 4);
// cmp r12, r5
_stub._resolveEntryPoint[n++] = 0x45ac;
// bne nextEntry
_stub._resolveEntryPoint[n++] = 0xd108;
// ;; Check this._token == e.token
// ldr r5, [pc + #_token]
offset = PC_REL_OFFSET(_token);
_ASSERTE((offset & 0x3) == 0);
_stub._resolveEntryPoint[n++] = 0x4d00 | (offset>>2);
// ldr r12, [r6 + #ResolveCacheElem.token]
offset = offsetof(ResolveCacheElem, token);
_stub._resolveEntryPoint[n++] = 0xf8d6;
_stub._resolveEntryPoint[n++] = 0xc000 | offset;
// cmp r12, r5
_stub._resolveEntryPoint[n++] = 0x45ac;
// bne nextEntry
_stub._resolveEntryPoint[n++] = 0xd103;
// ldr r12, [r6 + #ResolveCacheElem.target] ;; r12 : e.target
offset = offsetof(ResolveCacheElem, target);
_stub._resolveEntryPoint[n++] = 0xf8d6;
_stub._resolveEntryPoint[n++] = 0xc000 | offset;
// ;; Restore r5 and r6
// pop {r5,r6}
_stub._resolveEntryPoint[n++] = 0xbc60;
// ;; Branch to e.target
// bx r12 ;; (e.target)(r0,r1,r2,r3)
_stub._resolveEntryPoint[n++] = 0x4760;
// nextEntry:
// ;; e = e.pNext;
// ldr r6, [r6 + #ResolveCacheElem.pNext]
offset = offsetof(ResolveCacheElem, pNext);
_ASSERTE(offset <=124 && (offset & 0x3) == 0);
_stub._resolveEntryPoint[n++] = 0x6836 | (offset << 4);
// ;; } while(e != null);
// cbz r6, slowEntryPoint
_stub._resolveEntryPoint[n++] = 0xb116;
// ldr r12, [r0 + #Object.m_pMethTab]
_stub._resolveEntryPoint[n++] = 0xf8d0;
_stub._resolveEntryPoint[n++] = 0xc000;
// b loop
offset = (WORD)((loop - (n + 2)) * sizeof(WORD));
offset = (offset >> 1) & 0x07ff;
_stub._resolveEntryPoint[n++] = 0xe000 | offset;
// slowEntryPoint:
// pop {r5,r6}
_stub._resolveEntryPoint[n++] = 0xbc60;
// nop for alignment
_stub._resolveEntryPoint[n++] = 0xbf00;
// the slow entry point be DWORD-aligned (see _ASSERTE below) insert nops if necessary .
// ARMSTUB TODO: promotion
// fall through to slow case
_ASSERTE(_stub._resolveEntryPoint + n == _stub._slowEntryPoint);
_ASSERTE(n == ResolveStub::resolveEntryPointLen);
// ResolveStub._slowEntryPoint(r0:MethodToken, r1, r2, r3, r4:IndirectionCellAndFlags)
// {
// r12 = this._tokenSlow;
// this._resolveWorkerTarget(r0, r1, r2, r3, r4, r12);
// }
// The following macro relies on this entry point being DWORD-aligned. We've already asserted that the
// overall stub is aligned above, just need to check that the preceding stubs occupy an even number of
// WORD slots.
_ASSERTE((n & 1) == 0);
#undef PC_REL_OFFSET
#define PC_REL_OFFSET(_field) (WORD)(offsetof(ResolveStub, _field) - ((offsetof(ResolveStub, _slowEntryPoint) + sizeof(*ResolveStub::_slowEntryPoint) * (n + 2)) & 0xfffffffc))
n = 0;
// ldr r12, [pc + #_tokenSlow]
offset = PC_REL_OFFSET(_tokenSlow);
_stub._slowEntryPoint[n++] = 0xf8df;
_stub._slowEntryPoint[n++] = 0xc000 | offset;
// ldr pc, [pc + #_resolveWorkerTarget]
offset = PC_REL_OFFSET(_resolveWorkerTarget);
_stub._slowEntryPoint[n++] = 0xf8df;
_stub._slowEntryPoint[n++] = 0xf000 | offset;
_ASSERTE(n == ResolveStub::slowEntryPointLen);
// ResolveStub._failEntryPoint(r0:MethodToken, r1, r2, r3, r4:IndirectionCellAndFlags)
// {
// if(--*(this._pCounter) < 0) r4 = r4 | SDF_ResolveBackPatch;
// this._resolveEntryPoint(r0, r1, r2, r3, r4);
// }
// The following macro relies on this entry point being DWORD-aligned. We've already asserted that the
// overall stub is aligned above, just need to check that the preceding stubs occupy an even number of
// WORD slots.
_ASSERTE((n & 1) == 0);
#undef PC_REL_OFFSET
#define PC_REL_OFFSET(_field) (WORD)(offsetof(ResolveStub, _field) - ((offsetof(ResolveStub, _failEntryPoint) + sizeof(*ResolveStub::_failEntryPoint) * (n + 2)) & 0xfffffffc))
n = 0;
// push {r5}
_stub._failEntryPoint[n++] = 0xb420;
// ldr r5, [pc + #_pCounter]
offset = PC_REL_OFFSET(_pCounter);
_ASSERTE((offset & 0x3) == 0);
_stub._failEntryPoint[n++] = 0x4d00 | (offset >>2);
// ldr r12, [r5]
_stub._failEntryPoint[n++] = 0xf8d5;
_stub._failEntryPoint[n++] = 0xc000;
// subs r12, r12, #1
_stub._failEntryPoint[n++] = 0xf1bc;
_stub._failEntryPoint[n++] = 0x0c01;
// str r12, [r5]
_stub._failEntryPoint[n++] = 0xf8c5;
_stub._failEntryPoint[n++] = 0xc000;
// pop {r5}
_stub._failEntryPoint[n++] = 0xbc20;
// bge resolveEntryPoint
_stub._failEntryPoint[n++] = 0xda01;
// or r4, r4, SDF_ResolveBackPatch
_ASSERTE(SDF_ResolveBackPatch < 256);
_stub._failEntryPoint[n++] = 0xf044;
_stub._failEntryPoint[n++] = 0x0400 | SDF_ResolveBackPatch;
// resolveEntryPoint:
// b _resolveEntryPoint
offset = (WORD)(offsetof(ResolveStub, _resolveEntryPoint) - (offsetof(ResolveStub, _failEntryPoint) + sizeof(*ResolveStub::_failEntryPoint) * (n + 2)));
_ASSERTE((offset & 1) == 0);
offset = (offset >> 1) & 0x07ff;
_stub._failEntryPoint[n++] = 0xe000 | offset;
// nop for alignment
_stub._failEntryPoint[n++] = 0xbf00;
_ASSERTE(n == ResolveStub::failEntryPointLen);
_stub._pCounter = counterAddr;
_stub._hashedToken = hashedToken << LOG2_PTRSIZE;
_stub._cacheAddress = (size_t) cacheAddr;
_stub._token = dispatchToken;
_stub._tokenSlow = dispatchToken;
_stub._resolveWorkerTarget = resolveWorkerTarget;
_stub._cacheMask = CALL_STUB_CACHE_MASK * sizeof(void*);
_ASSERTE(resolveWorkerTarget == (PCODE)ResolveWorkerChainLookupAsmStub);
_ASSERTE(patcherTarget == NULL);
}
Stub *GenerateInitPInvokeFrameHelper()
{
CONTRACT(Stub*)
{
THROWS;
GC_NOTRIGGER;
MODE_ANY;
POSTCONDITION(CheckPointer(RETVAL));
}
CONTRACT_END;
CPUSTUBLINKER sl;
CPUSTUBLINKER *psl = &sl;
CORINFO_EE_INFO::InlinedCallFrameInfo FrameInfo;
InlinedCallFrame::GetEEInfo(&FrameInfo);
// R4 contains address of the frame on stack (the frame ptr, not its neg space)
unsigned negSpace = FrameInfo.offsetOfFrameVptr;
ThumbReg regFrame = ThumbReg(4);
ThumbReg regThread = ThumbReg(5);
ThumbReg regScratch = ThumbReg(6);
ThumbReg regR9 = ThumbReg(9);
#ifdef TARGET_UNIX
// Erect frame to perform call to GetThread
psl->ThumbEmitProlog(1, sizeof(ArgumentRegisters), FALSE); // Save r4 for aligned stack
// Save argument registers around the GetThread call. Don't bother with using ldm/stm since this inefficient path anyway.
for (int reg = 0; reg < 4; reg++)
psl->ThumbEmitStoreRegIndirect(ThumbReg(reg), thumbRegSp, offsetof(ArgumentRegisters, r) + sizeof(*ArgumentRegisters::r) * reg);
#endif
psl->ThumbEmitGetThread(regThread);
#ifdef TARGET_UNIX
for (int reg = 0; reg < 4; reg++)
psl->ThumbEmitLoadRegIndirect(ThumbReg(reg), thumbRegSp, offsetof(ArgumentRegisters, r) + sizeof(*ArgumentRegisters::r) * reg);
#endif
// mov [regFrame + FrameInfo.offsetOfGSCookie], GetProcessGSCookie()
psl->ThumbEmitMovConstant(regScratch, GetProcessGSCookie());
psl->ThumbEmitStoreRegIndirect(regScratch, regFrame, FrameInfo.offsetOfGSCookie - negSpace);
// mov [regFrame + FrameInfo.offsetOfFrameVptr], InlinedCallFrame::GetMethodFrameVPtr()
psl->ThumbEmitMovConstant(regScratch, InlinedCallFrame::GetMethodFrameVPtr());
psl->ThumbEmitStoreRegIndirect(regScratch, regFrame, FrameInfo.offsetOfFrameVptr - negSpace);
// ldr regScratch, [regThread + offsetof(Thread, m_pFrame)]
// str regScratch, [regFrame + FrameInfo.offsetOfFrameLink]
psl->ThumbEmitLoadRegIndirect(regScratch, regThread, offsetof(Thread, m_pFrame));
psl->ThumbEmitStoreRegIndirect(regScratch, regFrame, FrameInfo.offsetOfFrameLink - negSpace);
// str FP, [regFrame + FrameInfo.offsetOfCalleeSavedFP]
psl->ThumbEmitStoreRegIndirect(thumbRegFp, regFrame, FrameInfo.offsetOfCalleeSavedFP - negSpace);
// str R9, [regFrame + FrameInfo.offsetOfSPAfterProlog]
psl->ThumbEmitStoreRegIndirect(regR9, regFrame, FrameInfo.offsetOfSPAfterProlog - negSpace);
// mov [regFrame + FrameInfo.offsetOfReturnAddress], 0
psl->ThumbEmitMovConstant(regScratch, 0);
psl->ThumbEmitStoreRegIndirect(regScratch, regFrame, FrameInfo.offsetOfReturnAddress - negSpace);
#ifdef TARGET_UNIX
DWORD cbSavedRegs = sizeof(ArgumentRegisters) + 2 * 4; // r0-r3, r4, lr
psl->ThumbEmitAdd(regScratch, thumbRegSp, cbSavedRegs);
psl->ThumbEmitStoreRegIndirect(regScratch, regFrame, FrameInfo.offsetOfCallSiteSP - negSpace);
#else
// str SP, [regFrame + FrameInfo.offsetOfCallSiteSP]
psl->ThumbEmitStoreRegIndirect(thumbRegSp, regFrame, FrameInfo.offsetOfCallSiteSP - negSpace);
#endif
// mov [regThread + offsetof(Thread, m_pFrame)], regFrame
psl->ThumbEmitStoreRegIndirect(regFrame, regThread, offsetof(Thread, m_pFrame));
// leave current Thread in R4
#ifdef TARGET_UNIX
psl->ThumbEmitEpilog();
#else
// Return. The return address has been restored into LR at this point.
// bx lr
psl->ThumbEmitJumpRegister(thumbRegLr);
#endif
// A single process-wide stub that will never unload
RETURN psl->Link(SystemDomain::GetGlobalLoaderAllocator()->GetStubHeap());
}
void StubLinkerCPU::ThumbEmitGetThread(ThumbReg dest)
{
#ifdef TARGET_UNIX
ThumbEmitMovConstant(ThumbReg(0), (TADDR)GetThreadHelper);
ThumbEmitCallRegister(ThumbReg(0));
if (dest != ThumbReg(0))
{
ThumbEmitMovRegReg(dest, ThumbReg(0));
}
#else // TARGET_UNIX
// mrc p15, 0, dest, c13, c0, 2
Emit16(0xee1d);
Emit16((WORD)(0x0f50 | (dest << 12)));
ThumbEmitLoadRegIndirect(dest, dest, offsetof(TEB, ThreadLocalStoragePointer));
ThumbEmitLoadRegIndirect(dest, dest, sizeof(void *) * _tls_index);
ThumbEmitLoadRegIndirect(dest, dest, (int)Thread::GetOffsetOfThreadStatic(&gCurrentThreadInfo));
#endif // TARGET_UNIX
}
// Emits code to adjust for a static delegate target.
VOID StubLinkerCPU::EmitShuffleThunk(ShuffleEntry *pShuffleEntryArray)
{
// Scan the shuffle entries to see if there any stack-to-stack operations. If there aren't we can emit a
// much simpler thunk (simply because we generate code that doesn't require more than one scratch
// register).
bool fSimpleCase = true;
ShuffleEntry *pEntry = pShuffleEntryArray;
while (pEntry->srcofs != ShuffleEntry::SENTINEL)
{
// It's enough to check whether we have a destination stack location (there are no register to stack
// scenarios).
if (!(pEntry->dstofs & ShuffleEntry::REGMASK))
{
fSimpleCase = false;
break;
}
pEntry++;
}
if (fSimpleCase)
{
// No real prolog for the simple case, we're a tail call so we shouldn't be on the stack for any walk
// or unwind.
// On entry r0 holds the delegate instance. Look up the real target address stored in the MethodPtrAux
// field and stash it in r12.
// ldr r12, [r0, #offsetof(DelegateObject, _methodPtrAux)]
ThumbEmitLoadRegIndirect(ThumbReg(12), ThumbReg(0), DelegateObject::GetOffsetOfMethodPtrAux());
// Emit the instructions to rewrite the argument registers. Most will be register-to-register (e.g.
// move r1 to r0) but one or two of them might move values from the top of the incoming stack
// arguments into registers r2 and r3. Note that the entries are ordered so that we don't need to
// worry about a move overwriting a register we'll need to use as input for the next move (i.e. we get
// move r1 to r0, move r2 to r1 etc.).
pEntry = pShuffleEntryArray;
while (pEntry->srcofs != ShuffleEntry::SENTINEL)
{
_ASSERTE(pEntry->dstofs & ShuffleEntry::REGMASK);
if (pEntry->srcofs & ShuffleEntry::REGMASK)
{
// Move from register case.
ThumbEmitMovRegReg(ThumbReg(pEntry->dstofs & ShuffleEntry::OFSMASK),
ThumbReg(pEntry->srcofs & ShuffleEntry::OFSMASK));
}
else
{
// Move from the stack case.
// ldr <dest>, [sp + #source_offset]
ThumbEmitLoadRegIndirect(ThumbReg(pEntry->dstofs & ShuffleEntry::OFSMASK),
thumbRegSp,
(pEntry->srcofs & ShuffleEntry::OFSMASK) * 4);
}
pEntry++;
}
// Tail call to real target.
// bx r12
ThumbEmitJumpRegister(ThumbReg(12));
return;
}
// In the more complex case we need to re-write at least some of the arguments on the stack as well as
// argument registers. We need some temporary registers to perform stack-to-stack copies and we've
// reserved our one remaining volatile register, r12, to store the eventual target method address. So
// we're going to generate a hybrid-tail call. Using a tail call has the advantage that we don't need to
// erect and link an explicit CLR frame to enable crawling of this thunk. Additionally re-writing the
// stack can be more peformant in some scenarios than copying the stack (in the presence of floating point
// or arguments requieing 64-bit alignment we might not have to move some or even most of the values).
// The hybrid nature is that we'll erect a standard native frame (with a proper prolog and epilog) so we
// can save some non-volatile registers to act as temporaries. Once we've performed the stack re-write
// we'll poke the saved LR value (which will become a PC value on the pop in the epilog) to return to the
// target method instead of us, thus atomically removing our frame from the stack and tail-calling the
// real target.
// Prolog:
ThumbEmitProlog(3, // Save r4-r6,lr (count doesn't include lr)
0, // No additional space in the stack frame required
FALSE); // Don't push argument registers
// On entry r0 holds the delegate instance. Look up the real target address stored in the MethodPtrAux
// field and stash it in r12.
// ldr r12, [r0, #offsetof(DelegateObject, _methodPtrAux)]
ThumbEmitLoadRegIndirect(ThumbReg(12), ThumbReg(0), DelegateObject::GetOffsetOfMethodPtrAux());
// As we copy slots from lower in the argument stack to higher we need to keep track of source and
// destination pointers into those arguments (if we just use offsets from SP we get into trouble with
// argument frames larger than 4K). We'll use r4 to track the source (original location of an argument
// from the caller's perspective) and r5 to track the destination (new location of the argument from the
// callee's perspective). Both start at the current value of SP plus the offset created by pushing our
// stack frame in the prolog.
// add r4, sp, #cbSavedRegs
// add r5, sp, #cbSavedRegs
DWORD cbSavedRegs = 4 * 4; // r4, r5, r6, lr
ThumbEmitAdd(ThumbReg(4), thumbRegSp, cbSavedRegs);
ThumbEmitAdd(ThumbReg(5), thumbRegSp, cbSavedRegs);
// Follow the shuffle array instructions to re-write some subset of r0-r3 and the stacked arguments to
// remove the unwanted delegate instance in r0. Arguments only ever move from higher registers to lower
// registers or higher stack addresses to lower stack addresses and are ordered from lowest register to
// highest stack address. As a result we can do all updates in order and in place and we'll never
// overwrite a register or stack location needed as a source value in a later iteration.
DWORD dwLastSrcIndex = (DWORD)-1;
DWORD dwLastDstIndex = (DWORD)-1;
pEntry = pShuffleEntryArray;
while (pEntry->srcofs != ShuffleEntry::SENTINEL)
{
// If this is a register-to-register move we can do it in one instruction.
if ((pEntry->srcofs & ShuffleEntry::REGMASK) && (pEntry->dstofs & ShuffleEntry::REGMASK))
{
ThumbEmitMovRegReg(ThumbReg(pEntry->dstofs & ShuffleEntry::OFSMASK),
ThumbReg(pEntry->srcofs & ShuffleEntry::OFSMASK));
}
else
{
// There is no case where a source argument register is moved into a destination stack slot.
_ASSERTE((pEntry->srcofs & ShuffleEntry::REGMASK) == 0);
// Source or destination stack offsets might not be contiguous (though they often will be).
// Floating point arguments and 64-bit aligned values can cause discontinuities. While we copy
// values we'll use post increment addressing modes to move both source and destination stack
// pointers forward 4 bytes at a time, the common case. But we'll insert additional add
// instructions for any holes we find (we detect these by remembering the last source and
// destination stack offset we used).
// Add any additional offset to the source pointer (r4) to account for holes in the copy.
DWORD dwSrcIndex = pEntry->srcofs & ShuffleEntry::OFSMASK;
if (dwSrcIndex != (dwLastSrcIndex + 1))
{
// If the gap is at the very beginning, then dwLastSrcIndex is still -1, so we need to allow
// for that. Note that the calculation below handles this properly, due to DWORD wrapping.
_ASSERTE((dwLastSrcIndex == (DWORD)-1) || (dwSrcIndex > dwLastSrcIndex));
// add r4, #gap_size
ThumbEmitIncrement(ThumbReg(4), (dwSrcIndex - dwLastSrcIndex - 1) * 4);
}
dwLastSrcIndex = dwSrcIndex;
// Load the source value from the stack and increment our source pointer (r4) in one instruction.
// If the target is a register we can move the value directly there. Otherwise we move it to the
// r6 temporary register.
if (pEntry->dstofs & ShuffleEntry::REGMASK)
{
// ldr <regnum>, [r4], #4
ThumbEmitLoadIndirectPostIncrement(ThumbReg(pEntry->dstofs & ShuffleEntry::OFSMASK), ThumbReg(4), 4);
}
else
{
// ldr r6, [r4], #4
ThumbEmitLoadIndirectPostIncrement(ThumbReg(6), ThumbReg(4), 4);
// Add any additional offset to the destination pointer (r5) to account for holes in the copy.
DWORD dwDstIndex = pEntry->dstofs & ShuffleEntry::OFSMASK;
if (dwDstIndex != (dwLastDstIndex + 1))
{
// If the gap is at the very beginning, then dwLastDstIndex is still -1, so we need to allow
// for that. Note that the calculation below handles this properly, due to DWORD wrapping.
_ASSERTE((dwLastDstIndex == (DWORD)-1) || (dwDstIndex > dwLastDstIndex));
// add r5, #gap_size
ThumbEmitIncrement(ThumbReg(5), (dwDstIndex - dwLastDstIndex - 1) * 4);
}
dwLastDstIndex = dwDstIndex;
// Write the value in r6 to it's final home on the stack and increment our destination pointer
// (r5).
// str r6, [r5], #4
ThumbEmitStoreIndirectPostIncrement(ThumbReg(6), ThumbReg(5), 4);
}
}
pEntry++;
}
// Arguments are copied. Now we modify the saved value of LR we created in our prolog (which will be
// popped back off into PC in our epilog) so that it points to the real target address in r12 rather than
// our return address. We haven't modified LR ourselves, so the net result is that executing our epilog
// will pop our frame and tail call to the real method.
// str r12, [sp + #(cbSavedRegs-4)]
ThumbEmitStoreRegIndirect(ThumbReg(12), thumbRegSp, cbSavedRegs - 4);
// Epilog:
ThumbEmitEpilog();
}
void StubLinkerCPU::ThumbEmitTailCallManagedMethod(MethodDesc *pMD)
{
// Use direct call if possible.
if (pMD->HasStableEntryPoint())
{
// mov r12, #entry_point
ThumbEmitMovConstant(ThumbReg(12), (TADDR)pMD->GetStableEntryPoint());
}
else
{
// mov r12, #slotaddress
ThumbEmitMovConstant(ThumbReg(12), (TADDR)pMD->GetAddrOfSlot());
// ldr r12, [r12]
ThumbEmitLoadRegIndirect(ThumbReg(12), ThumbReg(12), 0);
}
// bx r12
ThumbEmitJumpRegister(ThumbReg(12));
}
VOID StubLinkerCPU::EmitComputedInstantiatingMethodStub(MethodDesc* pSharedMD, struct ShuffleEntry *pShuffleEntryArray, void* extraArg)
{
STANDARD_VM_CONTRACT;
struct ShuffleEntry *pEntry = pShuffleEntryArray;
while (pEntry->srcofs != ShuffleEntry::SENTINEL)
{
_ASSERTE(pEntry->dstofs & ShuffleEntry::REGMASK);
_ASSERTE(pEntry->srcofs & ShuffleEntry::REGMASK);
_ASSERTE(!(pEntry->dstofs & ShuffleEntry::FPREGMASK));
_ASSERTE(!(pEntry->srcofs & ShuffleEntry::FPREGMASK));
_ASSERTE(pEntry->dstofs != ShuffleEntry::HELPERREG);
_ASSERTE(pEntry->srcofs != ShuffleEntry::HELPERREG);
ThumbEmitMovRegReg(ThumbReg(pEntry->dstofs & ShuffleEntry::OFSMASK),
ThumbReg(pEntry->srcofs & ShuffleEntry::OFSMASK));
pEntry++;
}
MetaSig msig(pSharedMD);
ArgIterator argit(&msig);
if (argit.HasParamType())
{
// Place instantiation parameter into the correct register.
ArgLocDesc sInstArgLoc;
argit.GetParamTypeLoc(&sInstArgLoc);
int regHidden = sInstArgLoc.m_idxGenReg;
_ASSERTE(regHidden != -1);
if (extraArg == NULL)
{
if (pSharedMD->RequiresInstMethodTableArg())
{
// Unboxing stub case
// Extract MethodTable pointer (the hidden arg) from the object instance.
// ldr regHidden, [r0]
ThumbEmitLoadRegIndirect(ThumbReg(regHidden), ThumbReg(0), 0);
}
}
else
{
// mov regHidden, #pHiddenArg
ThumbEmitMovConstant(ThumbReg(regHidden), (TADDR)extraArg);
}
}
if (extraArg == NULL)
{
// Unboxing stub case
// Skip over the MethodTable* to find the address of the unboxed value type.
// add r0, #sizeof(MethodTable*)
ThumbEmitIncrement(ThumbReg(0), sizeof(MethodTable*));
}
ThumbEmitTailCallManagedMethod(pSharedMD);
SetTargetMethod(pSharedMD);
}
#endif // !DACCESS_COMPILE
LONG CLRNoCatchHandler(EXCEPTION_POINTERS* pExceptionInfo, PVOID pv)
{
return EXCEPTION_CONTINUE_SEARCH;
}
void UpdateRegDisplayFromCalleeSavedRegisters(REGDISPLAY * pRD, CalleeSavedRegisters * pRegs)
{
LIMITED_METHOD_CONTRACT;
T_CONTEXT * pContext = pRD->pCurrentContext;
pContext->R4 = pRegs->r4;
pContext->R5 = pRegs->r5;
pContext->R6 = pRegs->r6;
pContext->R7 = pRegs->r7;
pContext->R8 = pRegs->r8;
pContext->R9 = pRegs->r9;
pContext->R10 = pRegs->r10;
pContext->R11 = pRegs->r11;
pContext->Lr = pRegs->r14;
T_KNONVOLATILE_CONTEXT_POINTERS * pContextPointers = pRD->pCurrentContextPointers;
pRD->pCurrentContextPointers->R4 = (PDWORD)&pRegs->r4;
pRD->pCurrentContextPointers->R5 = (PDWORD)&pRegs->r5;
pRD->pCurrentContextPointers->R6 = (PDWORD)&pRegs->r6;
pRD->pCurrentContextPointers->R7 = (PDWORD)&pRegs->r7;
pRD->pCurrentContextPointers->R8 = (PDWORD)&pRegs->r8;
pRD->pCurrentContextPointers->R9 = (PDWORD)&pRegs->r9;
pRD->pCurrentContextPointers->R10 = (PDWORD)&pRegs->r10;
pRD->pCurrentContextPointers->R11 = (PDWORD)&pRegs->r11;
pRD->pCurrentContextPointers->Lr = NULL;
}
void TransitionFrame::UpdateRegDisplay(const PREGDISPLAY pRD)
{
pRD->IsCallerContextValid = FALSE;
pRD->IsCallerSPValid = FALSE; // Don't add usage of this field. This is only temporary.
// Copy the saved argument registers into the current context
ArgumentRegisters * pArgRegs = GetArgumentRegisters();
pRD->pCurrentContext->R0 = pArgRegs->r[0];
pRD->pCurrentContext->R1 = pArgRegs->r[1];
pRD->pCurrentContext->R2 = pArgRegs->r[2];
pRD->pCurrentContext->R3 = pArgRegs->r[3];
// Next, copy all the callee saved registers
UpdateRegDisplayFromCalleeSavedRegisters(pRD, GetCalleeSavedRegisters());
// Set ControlPC to be the same as the saved "return address"
// value, which is actually a ControlPC in the frameless method (e.g.
// faulting address incase of AV or TAE).
pRD->pCurrentContext->Pc = GetReturnAddress();
// Set the caller SP
pRD->pCurrentContext->Sp = this->GetSP();
// Finally, syncup the regdisplay with the context
SyncRegDisplayToCurrentContext(pRD);
LOG((LF_GCROOTS, LL_INFO100000, "STACKWALK TransitionFrame::UpdateRegDisplay(rip:%p, rsp:%p)\n", pRD->ControlPC, pRD->SP));
}
void FaultingExceptionFrame::UpdateRegDisplay(const PREGDISPLAY pRD)
{
LIMITED_METHOD_DAC_CONTRACT;
// Copy the context to regdisplay
memcpy(pRD->pCurrentContext, &m_ctx, sizeof(T_CONTEXT));
pRD->ControlPC = ::GetIP(&m_ctx);
pRD->SP = ::GetSP(&m_ctx);
// Update the integer registers in KNONVOLATILE_CONTEXT_POINTERS from
// the exception context we have.
pRD->pCurrentContextPointers->R4 = (PDWORD)&m_ctx.R4;
pRD->pCurrentContextPointers->R5 = (PDWORD)&m_ctx.R5;
pRD->pCurrentContextPointers->R6 = (PDWORD)&m_ctx.R6;
pRD->pCurrentContextPointers->R7 = (PDWORD)&m_ctx.R7;
pRD->pCurrentContextPointers->R8 = (PDWORD)&m_ctx.R8;
pRD->pCurrentContextPointers->R9 = (PDWORD)&m_ctx.R9;
pRD->pCurrentContextPointers->R10 = (PDWORD)&m_ctx.R10;
pRD->pCurrentContextPointers->R11 = (PDWORD)&m_ctx.R11;
pRD->pCurrentContextPointers->Lr = NULL;
pRD->IsCallerContextValid = FALSE;
pRD->IsCallerSPValid = FALSE; // Don't add usage of this field. This is only temporary.
}
void InlinedCallFrame::UpdateRegDisplay(const PREGDISPLAY pRD)
{
CONTRACT_VOID
{
NOTHROW;
GC_NOTRIGGER;
// We should skip over InlinedCallFrame if it is not active.
// It will be part of a JITed method's frame, and the stack-walker
// can handle such a case.
#ifdef PROFILING_SUPPORTED
PRECONDITION(CORProfilerStackSnapshotEnabled() || InlinedCallFrame::FrameHasActiveCall(this));
#endif
HOST_NOCALLS;
MODE_ANY;
SUPPORTS_DAC;
}
CONTRACT_END;
// @TODO: Remove this after the debugger is fixed to avoid stack-walks from bad places
// @TODO: This may be still needed for sampling profilers
if (!InlinedCallFrame::FrameHasActiveCall(this))
{
LOG((LF_CORDB, LL_ERROR, "WARNING: InlinedCallFrame::UpdateRegDisplay called on inactive frame %p\n", this));
return;
}
// reset pContext; it's only valid for active (top-most) frame
pRD->pContext = NULL;
*(pRD->pPC) = m_pCallerReturnAddress;
pRD->SP = (DWORD) dac_cast<TADDR>(m_pCallSiteSP);
pRD->IsCallerContextValid = FALSE;
pRD->IsCallerSPValid = FALSE; // Don't add usage of this field. This is only temporary.
pRD->pCurrentContext->Pc = *(pRD->pPC);
pRD->pCurrentContext->Sp = pRD->SP;
// Update the frame pointer in the current context.
pRD->pCurrentContext->R11 = m_pCalleeSavedFP;
pRD->pCurrentContextPointers->R11 = &m_pCalleeSavedFP;
// This is necessary to unwind methods with alloca. This needs to stay
// in sync with definition of REG_SAVED_LOCALLOC_SP in the JIT.
pRD->pCurrentContext->R9 = (DWORD) dac_cast<TADDR>(m_pSPAfterProlog);
pRD->pCurrentContextPointers->R9 = (DWORD *)&m_pSPAfterProlog;
RETURN;
}
#ifdef FEATURE_HIJACK
TADDR ResumableFrame::GetReturnAddressPtr(void)
{
LIMITED_METHOD_DAC_CONTRACT;
return dac_cast<TADDR>(m_Regs) + offsetof(T_CONTEXT, Pc);
}
void ResumableFrame::UpdateRegDisplay(const PREGDISPLAY pRD)
{
CONTRACT_VOID
{
NOTHROW;
GC_NOTRIGGER;
MODE_ANY;
SUPPORTS_DAC;
}
CONTRACT_END;
CopyMemory(pRD->pCurrentContext, m_Regs, sizeof(T_CONTEXT));
pRD->ControlPC = m_Regs->Pc;
pRD->SP = m_Regs->Sp;
pRD->pCurrentContextPointers->R4 = &m_Regs->R4;
pRD->pCurrentContextPointers->R5 = &m_Regs->R5;
pRD->pCurrentContextPointers->R6 = &m_Regs->R6;
pRD->pCurrentContextPointers->R7 = &m_Regs->R7;
pRD->pCurrentContextPointers->R8 = &m_Regs->R8;
pRD->pCurrentContextPointers->R9 = &m_Regs->R9;
pRD->pCurrentContextPointers->R10 = &m_Regs->R10;
pRD->pCurrentContextPointers->R11 = &m_Regs->R11;
pRD->pCurrentContextPointers->Lr = &m_Regs->Lr;
pRD->volatileCurrContextPointers.R0 = &m_Regs->R0;
pRD->volatileCurrContextPointers.R1 = &m_Regs->R1;
pRD->volatileCurrContextPointers.R2 = &m_Regs->R2;
pRD->volatileCurrContextPointers.R3 = &m_Regs->R3;
pRD->volatileCurrContextPointers.R12 = &m_Regs->R12;
pRD->IsCallerContextValid = FALSE;
pRD->IsCallerSPValid = FALSE; // Don't add usage of this field. This is only temporary.
}
void HijackFrame::UpdateRegDisplay(const PREGDISPLAY pRD)
{
CONTRACTL {
NOTHROW;
GC_NOTRIGGER;
SUPPORTS_DAC;
}
CONTRACTL_END;
pRD->IsCallerContextValid = FALSE;
pRD->IsCallerSPValid = FALSE;
pRD->pCurrentContext->Pc = m_ReturnAddress;
pRD->pCurrentContext->Sp = PTR_TO_TADDR(m_Args) + sizeof(struct HijackArgs);
pRD->pCurrentContext->R0 = m_Args->R0;
pRD->pCurrentContext->R4 = m_Args->R4;
pRD->pCurrentContext->R5 = m_Args->R5;
pRD->pCurrentContext->R6 = m_Args->R6;
pRD->pCurrentContext->R7 = m_Args->R7;
pRD->pCurrentContext->R8 = m_Args->R8;
pRD->pCurrentContext->R9 = m_Args->R9;
pRD->pCurrentContext->R10 = m_Args->R10;
pRD->pCurrentContext->R11 = m_Args->R11;
pRD->pCurrentContextPointers->R4 = &m_Args->R4;
pRD->pCurrentContextPointers->R5 = &m_Args->R5;
pRD->pCurrentContextPointers->R6 = &m_Args->R6;
pRD->pCurrentContextPointers->R7 = &m_Args->R7;
pRD->pCurrentContextPointers->R8 = &m_Args->R8;
pRD->pCurrentContextPointers->R9 = &m_Args->R9;
pRD->pCurrentContextPointers->R10 = &m_Args->R10;
pRD->pCurrentContextPointers->R11 = &m_Args->R11;
pRD->pCurrentContextPointers->Lr = NULL;
SyncRegDisplayToCurrentContext(pRD);
}
#endif // FEATURE_HIJACK
class UMEntryThunk * UMEntryThunk::Decode(void *pCallback)
{
_ASSERTE(offsetof(UMEntryThunkCode, m_code) == 0);
UMEntryThunkCode * pCode = (UMEntryThunkCode*)((ULONG_PTR)pCallback & ~THUMB_CODE);
// We may be called with an unmanaged external code pointer instead. So if it doesn't look like one of our
// stubs (see UMEntryThunkCode::Encode below) then we'll return NULL. Luckily in these scenarios our
// caller will perform a hash lookup on successful return to verify our result in case random unmanaged
// code happens to look like ours.
if ((pCode->m_code[0] == 0xf8df) &&
(pCode->m_code[1] == 0xc008) &&
(pCode->m_code[2] == 0xf8df) &&
(pCode->m_code[3] == 0xf000))
{
return (UMEntryThunk*)pCode->m_pvSecretParam;
}
return NULL;
}
void UMEntryThunkCode::Encode(UMEntryThunkCode *pEntryThunkCodeRX, BYTE* pTargetCode, void* pvSecretParam)
{
// ldr r12, [pc + 8]
m_code[0] = 0xf8df;
m_code[1] = 0xc008;
// ldr pc, [pc]
m_code[2] = 0xf8df;
m_code[3] = 0xf000;
m_pTargetCode = (TADDR)pTargetCode;
m_pvSecretParam = (TADDR)pvSecretParam;
FlushInstructionCache(GetCurrentProcess(),&pEntryThunkCodeRX->m_code,sizeof(m_code));
}
#ifndef DACCESS_COMPILE
void UMEntryThunkCode::Poison()
{
ExecutableWriterHolder<UMEntryThunkCode> thunkWriterHolder(this, sizeof(UMEntryThunkCode));
UMEntryThunkCode *pThisRW = thunkWriterHolder.GetRW();
pThisRW->m_pTargetCode = (TADDR)UMEntryThunk::ReportViolation;
// ldr r0, [pc + 8]
pThisRW->m_code[0] = 0x4802;
// nop
pThisRW->m_code[1] = 0xbf00;
ClrFlushInstructionCache(&m_code,sizeof(m_code));
}
#endif // DACCESS_COMPILE
///////////////////////////// UNIMPLEMENTED //////////////////////////////////
#ifndef DACCESS_COMPILE
extern "C" void STDCALL JIT_PatchedCodeStart();
extern "C" void STDCALL JIT_PatchedCodeLast();
void InitJITHelpers1()
{
STANDARD_VM_CONTRACT;
// Allocation helpers, faster but non-logging.
if (!(TrackAllocationsEnabled()
|| LoggingOn(LF_GCALLOC, LL_INFO10)
#ifdef _DEBUG
|| (g_pConfig->ShouldInjectFault(INJECTFAULT_GCHEAP) != 0)
#endif // _DEBUG
))
{
_ASSERTE(GCHeapUtilities::UseThreadAllocationContexts());
SetJitHelperFunction(CORINFO_HELP_NEWSFAST, JIT_NewS_MP_FastPortable);
SetJitHelperFunction(CORINFO_HELP_NEWARR_1_VC, JIT_NewArr1VC_MP_FastPortable);
SetJitHelperFunction(CORINFO_HELP_NEWARR_1_OBJ, JIT_NewArr1OBJ_MP_FastPortable);
ECall::DynamicallyAssignFCallImpl(GetEEFuncEntryPoint(AllocateString_MP_FastPortable), ECall::FastAllocateString);
}
}
VOID ResetCurrentContext()
{
LIMITED_METHOD_CONTRACT;
}
#endif // !DACCESS_COMPILE
#ifdef FEATURE_COMINTEROP
void emitCOMStubCall (ComCallMethodDesc *pCOMMethodRX, ComCallMethodDesc *pCOMMethodRW, PCODE target)
{
WRAPPER_NO_CONTRACT;
// mov r12, pc
// ldr pc, [pc, #0]
// dcd 0
// dcd target
WORD rgCode[] = {
0x46fc,
0xf8df, 0xf004
};
BYTE *pBufferRX = (BYTE*)pCOMMethodRX - COMMETHOD_CALL_PRESTUB_SIZE;
BYTE *pBufferRW = (BYTE*)pCOMMethodRW - COMMETHOD_CALL_PRESTUB_SIZE;
memcpy(pBufferRW, rgCode, sizeof(rgCode));
*((PCODE*)(pBufferRW + sizeof(rgCode) + 2)) = target;
// Ensure that the updated instructions get actually written
ClrFlushInstructionCache(pBufferRX, COMMETHOD_CALL_PRESTUB_SIZE);
_ASSERTE(IS_ALIGNED(pBufferRX + COMMETHOD_CALL_PRESTUB_ADDRESS_OFFSET, sizeof(void*)) &&
*((PCODE*)(pBufferRX + COMMETHOD_CALL_PRESTUB_ADDRESS_OFFSET)) == target);
}
#endif // FEATURE_COMINTEROP
void MovRegImm(BYTE* p, int reg, TADDR imm)
{
LIMITED_METHOD_CONTRACT;
*(WORD *)(p + 0) = 0xF240;
*(WORD *)(p + 2) = (UINT16)(reg << 8);
*(WORD *)(p + 4) = 0xF2C0;
*(WORD *)(p + 6) = (UINT16)(reg << 8);
PutThumb2Mov32((UINT16 *)p, imm);
}
#ifndef DACCESS_COMPILE
#ifdef FEATURE_READYTORUN
//
// Allocation of dynamic helpers
//
#define DYNAMIC_HELPER_ALIGNMENT sizeof(TADDR)
#define BEGIN_DYNAMIC_HELPER_EMIT(size) \
SIZE_T cb = size; \
SIZE_T cbAligned = ALIGN_UP(cb, DYNAMIC_HELPER_ALIGNMENT); \
BYTE * pStartRX = (BYTE *)(void*)pAllocator->GetDynamicHelpersHeap()->AllocAlignedMem(cbAligned, DYNAMIC_HELPER_ALIGNMENT); \
ExecutableWriterHolder<BYTE> startWriterHolder(pStartRX, cbAligned); \
BYTE * pStart = startWriterHolder.GetRW(); \
size_t rxOffset = pStartRX - pStart; \
BYTE * p = pStart;
#define END_DYNAMIC_HELPER_EMIT() \
_ASSERTE(pStart + cb == p); \
while (p < pStart + cbAligned) { *(WORD *)p = 0xdefe; p += 2; } \
ClrFlushInstructionCache(pStartRX, cbAligned); \
return (PCODE)((TADDR)pStartRX | THUMB_CODE)
PCODE DynamicHelpers::CreateHelper(LoaderAllocator * pAllocator, TADDR arg, PCODE target)
{
STANDARD_VM_CONTRACT;
BEGIN_DYNAMIC_HELPER_EMIT(18);
// mov r0, arg
MovRegImm(p, 0, arg);
p += 8;
// mov r12, target
MovRegImm(p, 12, target);
p += 8;
// bx r12
*(WORD *)p = 0x4760;
p += 2;
END_DYNAMIC_HELPER_EMIT();
}
void DynamicHelpers::EmitHelperWithArg(BYTE*& p, size_t rxOffset, LoaderAllocator * pAllocator, TADDR arg, PCODE target)
{
// mov r1, arg
MovRegImm(p, 1, arg);
p += 8;
// mov r12, target
MovRegImm(p, 12, target);
p += 8;
// bx r12
*(WORD *)p = 0x4760;
p += 2;
}
PCODE DynamicHelpers::CreateHelperWithArg(LoaderAllocator * pAllocator, TADDR arg, PCODE target)
{
BEGIN_DYNAMIC_HELPER_EMIT(18);
EmitHelperWithArg(p, rxOffset, pAllocator, arg, target);
END_DYNAMIC_HELPER_EMIT();
}
PCODE DynamicHelpers::CreateHelper(LoaderAllocator * pAllocator, TADDR arg, TADDR arg2, PCODE target)
{
BEGIN_DYNAMIC_HELPER_EMIT(26);
// mov r0, arg
MovRegImm(p, 0, arg);
p += 8;
// mov r1, arg2
MovRegImm(p, 1, arg2);
p += 8;
// mov r12, target
MovRegImm(p, 12, target);
p += 8;
// bx r12
*(WORD *)p = 0x4760;
p += 2;
END_DYNAMIC_HELPER_EMIT();
}
PCODE DynamicHelpers::CreateHelperArgMove(LoaderAllocator * pAllocator, TADDR arg, PCODE target)
{
BEGIN_DYNAMIC_HELPER_EMIT(20);
// mov r1, r0
*(WORD *)p = 0x4601;
p += 2;
// mov r0, arg
MovRegImm(p, 0, arg);
p += 8;
// mov r12, target
MovRegImm(p, 12, target);
p += 8;
// bx r12
*(WORD *)p = 0x4760;
p += 2;
END_DYNAMIC_HELPER_EMIT();
}
PCODE DynamicHelpers::CreateReturn(LoaderAllocator * pAllocator)
{
BEGIN_DYNAMIC_HELPER_EMIT(2);
*(WORD *)p = 0x4770; // bx lr
p += 2;
END_DYNAMIC_HELPER_EMIT();
}
PCODE DynamicHelpers::CreateReturnConst(LoaderAllocator * pAllocator, TADDR arg)
{
BEGIN_DYNAMIC_HELPER_EMIT(10);
// mov r0, arg
MovRegImm(p, 0, arg);
p += 8;
// bx lr
*(WORD *)p = 0x4770;
p += 2;
END_DYNAMIC_HELPER_EMIT();
}
PCODE DynamicHelpers::CreateReturnIndirConst(LoaderAllocator * pAllocator, TADDR arg, INT8 offset)
{
BEGIN_DYNAMIC_HELPER_EMIT((offset != 0) ? 16 : 12);
// mov r0, arg
MovRegImm(p, 0, arg);
p += 8;
// ldr r0, [r0]
*(WORD *)p = 0x6800;
p += 2;
if (offset != 0)
{
// add r0, r0, <offset>
*(WORD *)(p + 0) = 0xF100;
*(WORD *)(p + 2) = offset;
p += 4;
}
// bx lr
*(WORD *)p = 0x4770;
p += 2;
END_DYNAMIC_HELPER_EMIT();
}
PCODE DynamicHelpers::CreateHelperWithTwoArgs(LoaderAllocator * pAllocator, TADDR arg, PCODE target)
{
BEGIN_DYNAMIC_HELPER_EMIT(18);
// mov r2, arg
MovRegImm(p, 2, arg);
p += 8;
// mov r12, target
MovRegImm(p, 12, target);
p += 8;
// bx r12
*(WORD *)p = 0x4760;
p += 2;
END_DYNAMIC_HELPER_EMIT();
}
PCODE DynamicHelpers::CreateHelperWithTwoArgs(LoaderAllocator * pAllocator, TADDR arg, TADDR arg2, PCODE target)
{
BEGIN_DYNAMIC_HELPER_EMIT(26);
// mov r2, arg
MovRegImm(p, 2, arg);
p += 8;
// mov r3, arg
MovRegImm(p, 3, arg2);
p += 8;
// mov r12, target
MovRegImm(p, 12, target);
p += 8;
// bx r12
*(WORD *)p = 0x4760;
p += 2;
END_DYNAMIC_HELPER_EMIT();
}
PCODE DynamicHelpers::CreateDictionaryLookupHelper(LoaderAllocator * pAllocator, CORINFO_RUNTIME_LOOKUP * pLookup, DWORD dictionaryIndexAndSlot, Module * pModule)
{
STANDARD_VM_CONTRACT;
PCODE helperAddress = (pLookup->helper == CORINFO_HELP_RUNTIMEHANDLE_METHOD ?
GetEEFuncEntryPoint(JIT_GenericHandleMethodWithSlotAndModule) :
GetEEFuncEntryPoint(JIT_GenericHandleClassWithSlotAndModule));
GenericHandleArgs * pArgs = (GenericHandleArgs *)(void *)pAllocator->GetDynamicHelpersHeap()->AllocAlignedMem(sizeof(GenericHandleArgs), DYNAMIC_HELPER_ALIGNMENT);
ExecutableWriterHolder<GenericHandleArgs> argsWriterHolder(pArgs, sizeof(GenericHandleArgs));
argsWriterHolder.GetRW()->dictionaryIndexAndSlot = dictionaryIndexAndSlot;
argsWriterHolder.GetRW()->signature = pLookup->signature;
argsWriterHolder.GetRW()->module = (CORINFO_MODULE_HANDLE)pModule;
WORD slotOffset = (WORD)(dictionaryIndexAndSlot & 0xFFFF) * sizeof(Dictionary*);
// It's available only via the run-time helper function,
if (pLookup->indirections == CORINFO_USEHELPER)
{
BEGIN_DYNAMIC_HELPER_EMIT(18);
EmitHelperWithArg(p, rxOffset, pAllocator, (TADDR)pArgs, helperAddress);
END_DYNAMIC_HELPER_EMIT();
}
else
{
int indirectionsSize = 0;
if (pLookup->sizeOffset != CORINFO_NO_SIZE_CHECK)
{
indirectionsSize += (pLookup->sizeOffset >= 0xFFF ? 10 : 4);
indirectionsSize += 12;
}
for (WORD i = 0; i < pLookup->indirections; i++)
{
indirectionsSize += (pLookup->offsets[i] >= 0xFFF ? 10 : 4);
}
int codeSize = indirectionsSize + (pLookup->testForNull ? 26 : 2);
BEGIN_DYNAMIC_HELPER_EMIT(codeSize);
if (pLookup->testForNull)
{
// mov r3, r0
*(WORD *)p = 0x4603;
p += 2;
}
BYTE* pBLECall = NULL;
for (WORD i = 0; i < pLookup->indirections; i++)
{
if (i == pLookup->indirections - 1 && pLookup->sizeOffset != CORINFO_NO_SIZE_CHECK)
{
_ASSERTE(pLookup->testForNull && i > 0);
if (pLookup->sizeOffset >= 0xFFF)
{
// mov r2, offset
MovRegImm(p, 2, pLookup->sizeOffset); p += 8;
// ldr r1, [r0, r2]
*(WORD*)p = 0x5881; p += 2;
}
else
{
// ldr r1, [r0 + offset]
*(WORD*)p = 0xF8D0; p += 2;
*(WORD*)p = (WORD)(0xFFF & pLookup->sizeOffset) | 0x1000; p += 2;
}
// mov r2, slotOffset
MovRegImm(p, 2, slotOffset); p += 8;
// cmp r1,r2
*(WORD*)p = 0x4291; p += 2;
// ble 'CALL HELPER'
pBLECall = p; // Offset filled later
*(WORD*)p = 0xdd00; p += 2;
}
if (pLookup->offsets[i] >= 0xFFF)
{
// mov r2, offset
MovRegImm(p, 2, pLookup->offsets[i]);
p += 8;
// ldr r0, [r0, r2]
*(WORD *)p = 0x5880;
p += 2;
}
else
{
// ldr r0, [r0 + offset]
*(WORD *)p = 0xF8D0;
p += 2;
*(WORD *)p = (WORD)(0xFFF & pLookup->offsets[i]);
p += 2;
}
}
// No null test required
if (!pLookup->testForNull)
{
_ASSERTE(pLookup->sizeOffset == CORINFO_NO_SIZE_CHECK);
// mov pc, lr
*(WORD *)p = 0x46F7;
p += 2;
}
else
{
// cbz r0, 'CALL HELPER'
*(WORD *)p = 0xB100;
p += 2;
// mov pc, lr
*(WORD *)p = 0x46F7;
p += 2;
// CALL HELPER:
if (pBLECall != NULL)
*(WORD*)pBLECall |= (((BYTE)(p - pBLECall) - 4) >> 1);
// mov r0, r3
*(WORD *)p = 0x4618;
p += 2;
EmitHelperWithArg(p, rxOffset, pAllocator, (TADDR)pArgs, helperAddress);
}
END_DYNAMIC_HELPER_EMIT();
}
}
#endif // FEATURE_READYTORUN
#endif // !DACCESS_COMPILE
| -1 |
dotnet/runtime | 66,251 | Do not generate `TYP_STRUCT` `LCL_FLD` nodes | Today, the morpher can fold `IND(struct ADDR(LCL))` into `LCL_FLD` `TYP_STRUCT` layout-less nodes, these end up being locations, always (well -- there is one case where that's not true, such a node can end up under a return of a small struct later to be retyped by lowering, but this is rare).
These will be a significant impediment to actual, proper `TYP_STRUCT` local field nodes, as it would mean handling the case, everywhere in code, where the layout would be missing, that we don't want to allow in the first place.
This change fixed that by introducing the folding that the creation of these location nodes achieves at the address level, where we can choose the type of the location node arbitrarily and freely.
Along the way, `IsLocalAddrExpr` was fixed to not forget to look at zero-offset sequences attached to `ADDR`s, to avoid regressions.
We are expecting some positive diffs here, from the `IsLocalAddrExpr` change (enabling more precise VNs) and the folding change itself (the old code path would, ever so rarely, miss out on some things).
We are also expecting one small regression due to call retyping of reg-sized `IND struct(ADDR(LCL))` nodes losing the field sequence. I did not fix it because a) it was small, and rare, b) I do not want to permeate the zero-offset code any more than is strictly required.
[Diffs](https://dev.azure.com/dnceng/public/_build/results?buildId=1653792&view=results). | SingleAccretion | 2022-03-05T21:01:45Z | 2022-03-30T04:48:39Z | cbf3f9c43649035f59e626792f82e7b579c44865 | 2453f16807b85b279efc26d17d6f20de87801c09 | Do not generate `TYP_STRUCT` `LCL_FLD` nodes. Today, the morpher can fold `IND(struct ADDR(LCL))` into `LCL_FLD` `TYP_STRUCT` layout-less nodes, these end up being locations, always (well -- there is one case where that's not true, such a node can end up under a return of a small struct later to be retyped by lowering, but this is rare).
These will be a significant impediment to actual, proper `TYP_STRUCT` local field nodes, as it would mean handling the case, everywhere in code, where the layout would be missing, that we don't want to allow in the first place.
This change fixed that by introducing the folding that the creation of these location nodes achieves at the address level, where we can choose the type of the location node arbitrarily and freely.
Along the way, `IsLocalAddrExpr` was fixed to not forget to look at zero-offset sequences attached to `ADDR`s, to avoid regressions.
We are expecting some positive diffs here, from the `IsLocalAddrExpr` change (enabling more precise VNs) and the folding change itself (the old code path would, ever so rarely, miss out on some things).
We are also expecting one small regression due to call retyping of reg-sized `IND struct(ADDR(LCL))` nodes losing the field sequence. I did not fix it because a) it was small, and rare, b) I do not want to permeate the zero-offset code any more than is strictly required.
[Diffs](https://dev.azure.com/dnceng/public/_build/results?buildId=1653792&view=results). | ./src/coreclr/vm/nativelibrarynative.cpp | // Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
//
// File: NativeLibraryNative.cpp
//
#include "common.h"
#include "nativelibrary.h"
#include "nativelibrarynative.h"
// static
extern "C" INT_PTR QCALLTYPE NativeLibrary_LoadFromPath(LPCWSTR path, BOOL throwOnError)
{
QCALL_CONTRACT;
NATIVE_LIBRARY_HANDLE handle = nullptr;
BEGIN_QCALL;
handle = NativeLibrary::LoadLibraryFromPath(path, throwOnError);
END_QCALL;
return reinterpret_cast<INT_PTR>(handle);
}
// static
extern "C" INT_PTR QCALLTYPE NativeLibrary_LoadByName(LPCWSTR name, QCall::AssemblyHandle callingAssembly,
BOOL hasDllImportSearchPathFlag, DWORD dllImportSearchPathFlag,
BOOL throwOnError)
{
QCALL_CONTRACT;
NATIVE_LIBRARY_HANDLE handle = nullptr;
Assembly *pAssembly = callingAssembly->GetAssembly();
BEGIN_QCALL;
handle = NativeLibrary::LoadLibraryByName(name, pAssembly, hasDllImportSearchPathFlag, dllImportSearchPathFlag, throwOnError);
END_QCALL;
return reinterpret_cast<INT_PTR>(handle);
}
// static
extern "C" void QCALLTYPE NativeLibrary_FreeLib(INT_PTR handle)
{
QCALL_CONTRACT;
BEGIN_QCALL;
NativeLibrary::FreeNativeLibrary((NATIVE_LIBRARY_HANDLE) handle);
END_QCALL;
}
//static
extern "C" INT_PTR QCALLTYPE NativeLibrary_GetSymbol(INT_PTR handle, LPCWSTR symbolName, BOOL throwOnError)
{
QCALL_CONTRACT;
INT_PTR address = NULL;
BEGIN_QCALL;
address = NativeLibrary::GetNativeLibraryExport((NATIVE_LIBRARY_HANDLE)handle, symbolName, throwOnError);
END_QCALL;
return address;
}
| // Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
//
// File: NativeLibraryNative.cpp
//
#include "common.h"
#include "nativelibrary.h"
#include "nativelibrarynative.h"
// static
extern "C" INT_PTR QCALLTYPE NativeLibrary_LoadFromPath(LPCWSTR path, BOOL throwOnError)
{
QCALL_CONTRACT;
NATIVE_LIBRARY_HANDLE handle = nullptr;
BEGIN_QCALL;
handle = NativeLibrary::LoadLibraryFromPath(path, throwOnError);
END_QCALL;
return reinterpret_cast<INT_PTR>(handle);
}
// static
extern "C" INT_PTR QCALLTYPE NativeLibrary_LoadByName(LPCWSTR name, QCall::AssemblyHandle callingAssembly,
BOOL hasDllImportSearchPathFlag, DWORD dllImportSearchPathFlag,
BOOL throwOnError)
{
QCALL_CONTRACT;
NATIVE_LIBRARY_HANDLE handle = nullptr;
Assembly *pAssembly = callingAssembly->GetAssembly();
BEGIN_QCALL;
handle = NativeLibrary::LoadLibraryByName(name, pAssembly, hasDllImportSearchPathFlag, dllImportSearchPathFlag, throwOnError);
END_QCALL;
return reinterpret_cast<INT_PTR>(handle);
}
// static
extern "C" void QCALLTYPE NativeLibrary_FreeLib(INT_PTR handle)
{
QCALL_CONTRACT;
BEGIN_QCALL;
NativeLibrary::FreeNativeLibrary((NATIVE_LIBRARY_HANDLE) handle);
END_QCALL;
}
//static
extern "C" INT_PTR QCALLTYPE NativeLibrary_GetSymbol(INT_PTR handle, LPCWSTR symbolName, BOOL throwOnError)
{
QCALL_CONTRACT;
INT_PTR address = NULL;
BEGIN_QCALL;
address = NativeLibrary::GetNativeLibraryExport((NATIVE_LIBRARY_HANDLE)handle, symbolName, throwOnError);
END_QCALL;
return address;
}
| -1 |
dotnet/runtime | 66,251 | Do not generate `TYP_STRUCT` `LCL_FLD` nodes | Today, the morpher can fold `IND(struct ADDR(LCL))` into `LCL_FLD` `TYP_STRUCT` layout-less nodes, these end up being locations, always (well -- there is one case where that's not true, such a node can end up under a return of a small struct later to be retyped by lowering, but this is rare).
These will be a significant impediment to actual, proper `TYP_STRUCT` local field nodes, as it would mean handling the case, everywhere in code, where the layout would be missing, that we don't want to allow in the first place.
This change fixed that by introducing the folding that the creation of these location nodes achieves at the address level, where we can choose the type of the location node arbitrarily and freely.
Along the way, `IsLocalAddrExpr` was fixed to not forget to look at zero-offset sequences attached to `ADDR`s, to avoid regressions.
We are expecting some positive diffs here, from the `IsLocalAddrExpr` change (enabling more precise VNs) and the folding change itself (the old code path would, ever so rarely, miss out on some things).
We are also expecting one small regression due to call retyping of reg-sized `IND struct(ADDR(LCL))` nodes losing the field sequence. I did not fix it because a) it was small, and rare, b) I do not want to permeate the zero-offset code any more than is strictly required.
[Diffs](https://dev.azure.com/dnceng/public/_build/results?buildId=1653792&view=results). | SingleAccretion | 2022-03-05T21:01:45Z | 2022-03-30T04:48:39Z | cbf3f9c43649035f59e626792f82e7b579c44865 | 2453f16807b85b279efc26d17d6f20de87801c09 | Do not generate `TYP_STRUCT` `LCL_FLD` nodes. Today, the morpher can fold `IND(struct ADDR(LCL))` into `LCL_FLD` `TYP_STRUCT` layout-less nodes, these end up being locations, always (well -- there is one case where that's not true, such a node can end up under a return of a small struct later to be retyped by lowering, but this is rare).
These will be a significant impediment to actual, proper `TYP_STRUCT` local field nodes, as it would mean handling the case, everywhere in code, where the layout would be missing, that we don't want to allow in the first place.
This change fixed that by introducing the folding that the creation of these location nodes achieves at the address level, where we can choose the type of the location node arbitrarily and freely.
Along the way, `IsLocalAddrExpr` was fixed to not forget to look at zero-offset sequences attached to `ADDR`s, to avoid regressions.
We are expecting some positive diffs here, from the `IsLocalAddrExpr` change (enabling more precise VNs) and the folding change itself (the old code path would, ever so rarely, miss out on some things).
We are also expecting one small regression due to call retyping of reg-sized `IND struct(ADDR(LCL))` nodes losing the field sequence. I did not fix it because a) it was small, and rare, b) I do not want to permeate the zero-offset code any more than is strictly required.
[Diffs](https://dev.azure.com/dnceng/public/_build/results?buildId=1653792&view=results). | ./src/coreclr/debug/di/rsmda.cpp | // Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
//*****************************************************************************
// File: RsMda.cpp
//
// Manage Debug Assistant support in the Right-Side
//
//*****************************************************************************
#include "stdafx.h"
#include "winbase.h"
#include "corpriv.h"
//-----------------------------------------------------------------------------
// Cordb MDA notification
//-----------------------------------------------------------------------------
CordbMDA::CordbMDA(CordbProcess * pProc, DebuggerMDANotification * pData)
: CordbBase(pProc, 0, enumCordbMDA)
{
_ASSERTE(pData != NULL);
// Owning Parent process should add us to process'es neuter list.
// Pick up ownership of strings
m_szName = pData->szName.TransferStringData();
m_szDescription = pData->szDescription.TransferStringData();
m_szXml = pData->szXml.TransferStringData();
m_dwOSTID = pData->dwOSThreadId;
m_flags = pData->flags;
}
//-----------------------------------------------------------------------------
// Destructor for CordbMDA object. Not much to do here since neutering should
// have taken care of it all.
//-----------------------------------------------------------------------------
CordbMDA::~CordbMDA()
{
// Strings protected w/ holders that will automatically free them.
_ASSERTE(IsNeutered());
}
//-----------------------------------------------------------------------------
// Neuter the CordbMDA object.
//-----------------------------------------------------------------------------
void CordbMDA::Neuter()
{
// Release buffers. Once we're neutered, these can no longer be accessed anyways,
// so may as well free them now.
// This is being done under the process-lock, and our accessors are also done
// under that lock, so we don't have to worry about any races here. :)
m_szName.Clear();
m_szDescription.Clear();
m_szXml.Clear();
CordbBase::Neuter();
};
//-----------------------------------------------------------------------------
// Implement IUnknown::QueryInterface.
//-----------------------------------------------------------------------------
HRESULT CordbMDA::QueryInterface(REFIID riid, void **ppInterface)
{
if (riid == IID_ICorDebugMDA)
*ppInterface = static_cast<ICorDebugMDA*>(this);
else if (riid == IID_IUnknown)
*ppInterface = static_cast<IUnknown*>(static_cast<ICorDebugMDA*>(this));
else
{
*ppInterface = NULL;
return E_NOINTERFACE;
}
ExternalAddRef();
return S_OK;
}
//-----------------------------------------------------------------------------
// Helper to marshal a string object out through the ICorDebug interfaces
// *GetName() functions using the common triple design pattern.
//
// parameters:
// pInputString - the string that we want to marshal out via the triple
// cchName, pcchName, szName - triple used to marshal out a string.
// Same usage as CordbModule::GetName and other string getters on the API.
//
// *pcchName is always set to the length of pInputString (including NULL). This lets
// callers know the full size of buffer they'd need to allocate to get the full string.
//
// if (cchName == 0) then we're in "query" mode:
// szName must be null. pcchName must be non-null and this function will just set
// *pcchName to let the caller know how large of a buffer to allocate.
// if (cchName != 0) then we copy as much as can fit into szName. We will always
// null terminate szName.
// pcchName can be null. If it's non-null, we set it.
//
//
// Expected usage is that caller calls us twice, once in query mode to allocate
// buffer, then a 2nd time to fill the buffer.
//
// Returns: S_OK on success.
//-----------------------------------------------------------------------------
HRESULT CopyOutString(LPCWSTR pInputString, ULONG32 cchName, ULONG32 * pcchName, _Out_writes_to_opt_(cchName, *pcchName) WCHAR szName[])
{
_ASSERTE(pInputString != NULL);
ULONG32 len = (ULONG32) wcslen(pInputString) + 1;
if (cchName == 0)
{
// Query length
if ((szName != NULL) || (pcchName == NULL))
{
return E_INVALIDARG;
}
*pcchName = len;
return S_OK;
}
else
{
// Get data
if (szName == NULL)
{
return E_INVALIDARG;
}
// Just copy whatever we can fit into the buffer. If we truncate, that's ok.
// This will also guarantee that we null terminate.
wcsncpy_s(szName, cchName, pInputString, _TRUNCATE);
if (pcchName != 0)
{
*pcchName = len;
}
return S_OK;
}
}
//-----------------------------------------------------------------------------
// Get the string for the type of the MDA. Never empty.
// This is a convenient performant alternative to getting the XML stream and extracting
// the type from that based off the schema.
// See CopyOutString for parameter details.
//-----------------------------------------------------------------------------
HRESULT CordbMDA::GetName(ULONG32 cchName, ULONG32 * pcchName, _Out_writes_to_opt_(cchName, *pcchName) WCHAR szName[])
{
HRESULT hr = S_OK;
PUBLIC_API_BEGIN(this)
{
hr = E_NOTIMPL;
}
PUBLIC_API_END(hr);
return hr;
}
//-----------------------------------------------------------------------------
// Get a string description of the MDA. This may be empty (0-length).
// See CopyOutString for parameter details.
//-----------------------------------------------------------------------------
HRESULT CordbMDA::GetDescription(ULONG32 cchName, ULONG32 * pcchName, _Out_writes_to_opt_(cchName, *pcchName) WCHAR szName[])
{
HRESULT hr = S_OK;
PUBLIC_API_BEGIN(this)
{
hr = E_NOTIMPL;
}
PUBLIC_API_END(hr);
return hr;
}
//-----------------------------------------------------------------------------
// Get the full associated XML for the MDA. This may be empty.
// This could be a potentially expensive operation if the xml stream is large.
// See the MDA documentation for the schema for this XML stream.
// See CopyOutString for parameter details.
//-----------------------------------------------------------------------------
HRESULT CordbMDA::GetXML(ULONG32 cchName, ULONG32 * pcchName, _Out_writes_to_opt_(cchName, *pcchName) WCHAR szName[])
{
HRESULT hr = S_OK;
PUBLIC_API_BEGIN(this)
{
hr = E_NOTIMPL;
}
PUBLIC_API_END(hr);
return hr;
}
//-----------------------------------------------------------------------------
// Get flags for this MDA object.
//-----------------------------------------------------------------------------
HRESULT CordbMDA::GetFlags(CorDebugMDAFlags * pFlags)
{
HRESULT hr = S_OK;
PUBLIC_API_BEGIN(this)
{
hr = E_NOTIMPL;
}
PUBLIC_API_END(hr);
return hr;
}
//-----------------------------------------------------------------------------
// Thread that the MDA is fired on. We use the os tid instead of an ICDThread in case an MDA is fired on a
// native thread (or a managed thread that hasn't yet entered managed code and so we don't have a ICDThread
// object for it yet)
//-----------------------------------------------------------------------------
HRESULT CordbMDA::GetOSThreadId(DWORD * pOsTid)
{
HRESULT hr = S_OK;
PUBLIC_API_BEGIN(this)
{
hr = E_NOTIMPL;
}
PUBLIC_API_END(hr);
return hr;
}
| // Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
//*****************************************************************************
// File: RsMda.cpp
//
// Manage Debug Assistant support in the Right-Side
//
//*****************************************************************************
#include "stdafx.h"
#include "winbase.h"
#include "corpriv.h"
//-----------------------------------------------------------------------------
// Cordb MDA notification
//-----------------------------------------------------------------------------
CordbMDA::CordbMDA(CordbProcess * pProc, DebuggerMDANotification * pData)
: CordbBase(pProc, 0, enumCordbMDA)
{
_ASSERTE(pData != NULL);
// Owning Parent process should add us to process'es neuter list.
// Pick up ownership of strings
m_szName = pData->szName.TransferStringData();
m_szDescription = pData->szDescription.TransferStringData();
m_szXml = pData->szXml.TransferStringData();
m_dwOSTID = pData->dwOSThreadId;
m_flags = pData->flags;
}
//-----------------------------------------------------------------------------
// Destructor for CordbMDA object. Not much to do here since neutering should
// have taken care of it all.
//-----------------------------------------------------------------------------
CordbMDA::~CordbMDA()
{
// Strings protected w/ holders that will automatically free them.
_ASSERTE(IsNeutered());
}
//-----------------------------------------------------------------------------
// Neuter the CordbMDA object.
//-----------------------------------------------------------------------------
void CordbMDA::Neuter()
{
// Release buffers. Once we're neutered, these can no longer be accessed anyways,
// so may as well free them now.
// This is being done under the process-lock, and our accessors are also done
// under that lock, so we don't have to worry about any races here. :)
m_szName.Clear();
m_szDescription.Clear();
m_szXml.Clear();
CordbBase::Neuter();
};
//-----------------------------------------------------------------------------
// Implement IUnknown::QueryInterface.
//-----------------------------------------------------------------------------
HRESULT CordbMDA::QueryInterface(REFIID riid, void **ppInterface)
{
if (riid == IID_ICorDebugMDA)
*ppInterface = static_cast<ICorDebugMDA*>(this);
else if (riid == IID_IUnknown)
*ppInterface = static_cast<IUnknown*>(static_cast<ICorDebugMDA*>(this));
else
{
*ppInterface = NULL;
return E_NOINTERFACE;
}
ExternalAddRef();
return S_OK;
}
//-----------------------------------------------------------------------------
// Helper to marshal a string object out through the ICorDebug interfaces
// *GetName() functions using the common triple design pattern.
//
// parameters:
// pInputString - the string that we want to marshal out via the triple
// cchName, pcchName, szName - triple used to marshal out a string.
// Same usage as CordbModule::GetName and other string getters on the API.
//
// *pcchName is always set to the length of pInputString (including NULL). This lets
// callers know the full size of buffer they'd need to allocate to get the full string.
//
// if (cchName == 0) then we're in "query" mode:
// szName must be null. pcchName must be non-null and this function will just set
// *pcchName to let the caller know how large of a buffer to allocate.
// if (cchName != 0) then we copy as much as can fit into szName. We will always
// null terminate szName.
// pcchName can be null. If it's non-null, we set it.
//
//
// Expected usage is that caller calls us twice, once in query mode to allocate
// buffer, then a 2nd time to fill the buffer.
//
// Returns: S_OK on success.
//-----------------------------------------------------------------------------
HRESULT CopyOutString(LPCWSTR pInputString, ULONG32 cchName, ULONG32 * pcchName, _Out_writes_to_opt_(cchName, *pcchName) WCHAR szName[])
{
_ASSERTE(pInputString != NULL);
ULONG32 len = (ULONG32) wcslen(pInputString) + 1;
if (cchName == 0)
{
// Query length
if ((szName != NULL) || (pcchName == NULL))
{
return E_INVALIDARG;
}
*pcchName = len;
return S_OK;
}
else
{
// Get data
if (szName == NULL)
{
return E_INVALIDARG;
}
// Just copy whatever we can fit into the buffer. If we truncate, that's ok.
// This will also guarantee that we null terminate.
wcsncpy_s(szName, cchName, pInputString, _TRUNCATE);
if (pcchName != 0)
{
*pcchName = len;
}
return S_OK;
}
}
//-----------------------------------------------------------------------------
// Get the string for the type of the MDA. Never empty.
// This is a convenient performant alternative to getting the XML stream and extracting
// the type from that based off the schema.
// See CopyOutString for parameter details.
//-----------------------------------------------------------------------------
HRESULT CordbMDA::GetName(ULONG32 cchName, ULONG32 * pcchName, _Out_writes_to_opt_(cchName, *pcchName) WCHAR szName[])
{
HRESULT hr = S_OK;
PUBLIC_API_BEGIN(this)
{
hr = E_NOTIMPL;
}
PUBLIC_API_END(hr);
return hr;
}
//-----------------------------------------------------------------------------
// Get a string description of the MDA. This may be empty (0-length).
// See CopyOutString for parameter details.
//-----------------------------------------------------------------------------
HRESULT CordbMDA::GetDescription(ULONG32 cchName, ULONG32 * pcchName, _Out_writes_to_opt_(cchName, *pcchName) WCHAR szName[])
{
HRESULT hr = S_OK;
PUBLIC_API_BEGIN(this)
{
hr = E_NOTIMPL;
}
PUBLIC_API_END(hr);
return hr;
}
//-----------------------------------------------------------------------------
// Get the full associated XML for the MDA. This may be empty.
// This could be a potentially expensive operation if the xml stream is large.
// See the MDA documentation for the schema for this XML stream.
// See CopyOutString for parameter details.
//-----------------------------------------------------------------------------
HRESULT CordbMDA::GetXML(ULONG32 cchName, ULONG32 * pcchName, _Out_writes_to_opt_(cchName, *pcchName) WCHAR szName[])
{
HRESULT hr = S_OK;
PUBLIC_API_BEGIN(this)
{
hr = E_NOTIMPL;
}
PUBLIC_API_END(hr);
return hr;
}
//-----------------------------------------------------------------------------
// Get flags for this MDA object.
//-----------------------------------------------------------------------------
HRESULT CordbMDA::GetFlags(CorDebugMDAFlags * pFlags)
{
HRESULT hr = S_OK;
PUBLIC_API_BEGIN(this)
{
hr = E_NOTIMPL;
}
PUBLIC_API_END(hr);
return hr;
}
//-----------------------------------------------------------------------------
// Thread that the MDA is fired on. We use the os tid instead of an ICDThread in case an MDA is fired on a
// native thread (or a managed thread that hasn't yet entered managed code and so we don't have a ICDThread
// object for it yet)
//-----------------------------------------------------------------------------
HRESULT CordbMDA::GetOSThreadId(DWORD * pOsTid)
{
HRESULT hr = S_OK;
PUBLIC_API_BEGIN(this)
{
hr = E_NOTIMPL;
}
PUBLIC_API_END(hr);
return hr;
}
| -1 |
dotnet/runtime | 66,251 | Do not generate `TYP_STRUCT` `LCL_FLD` nodes | Today, the morpher can fold `IND(struct ADDR(LCL))` into `LCL_FLD` `TYP_STRUCT` layout-less nodes, these end up being locations, always (well -- there is one case where that's not true, such a node can end up under a return of a small struct later to be retyped by lowering, but this is rare).
These will be a significant impediment to actual, proper `TYP_STRUCT` local field nodes, as it would mean handling the case, everywhere in code, where the layout would be missing, that we don't want to allow in the first place.
This change fixed that by introducing the folding that the creation of these location nodes achieves at the address level, where we can choose the type of the location node arbitrarily and freely.
Along the way, `IsLocalAddrExpr` was fixed to not forget to look at zero-offset sequences attached to `ADDR`s, to avoid regressions.
We are expecting some positive diffs here, from the `IsLocalAddrExpr` change (enabling more precise VNs) and the folding change itself (the old code path would, ever so rarely, miss out on some things).
We are also expecting one small regression due to call retyping of reg-sized `IND struct(ADDR(LCL))` nodes losing the field sequence. I did not fix it because a) it was small, and rare, b) I do not want to permeate the zero-offset code any more than is strictly required.
[Diffs](https://dev.azure.com/dnceng/public/_build/results?buildId=1653792&view=results). | SingleAccretion | 2022-03-05T21:01:45Z | 2022-03-30T04:48:39Z | cbf3f9c43649035f59e626792f82e7b579c44865 | 2453f16807b85b279efc26d17d6f20de87801c09 | Do not generate `TYP_STRUCT` `LCL_FLD` nodes. Today, the morpher can fold `IND(struct ADDR(LCL))` into `LCL_FLD` `TYP_STRUCT` layout-less nodes, these end up being locations, always (well -- there is one case where that's not true, such a node can end up under a return of a small struct later to be retyped by lowering, but this is rare).
These will be a significant impediment to actual, proper `TYP_STRUCT` local field nodes, as it would mean handling the case, everywhere in code, where the layout would be missing, that we don't want to allow in the first place.
This change fixed that by introducing the folding that the creation of these location nodes achieves at the address level, where we can choose the type of the location node arbitrarily and freely.
Along the way, `IsLocalAddrExpr` was fixed to not forget to look at zero-offset sequences attached to `ADDR`s, to avoid regressions.
We are expecting some positive diffs here, from the `IsLocalAddrExpr` change (enabling more precise VNs) and the folding change itself (the old code path would, ever so rarely, miss out on some things).
We are also expecting one small regression due to call retyping of reg-sized `IND struct(ADDR(LCL))` nodes losing the field sequence. I did not fix it because a) it was small, and rare, b) I do not want to permeate the zero-offset code any more than is strictly required.
[Diffs](https://dev.azure.com/dnceng/public/_build/results?buildId=1653792&view=results). | ./src/coreclr/ilasm/assem.cpp | // Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
//
// File: assem.cpp
//
//
// COM+ IL assembler
//
#include "ilasmpch.h"
#define INITGUID
#define DECLARE_DATA
#include "assembler.h"
void indexKeywords(Indx* indx); // defined in asmparse.y
unsigned int g_uCodePage = CP_ACP;
unsigned int g_uConsoleCP = CP_ACP;
char g_szSourceFileName[MAX_FILENAME_LENGTH*3];
WCHAR wzUniBuf[dwUniBuf]; // Unicode conversion global buffer
Assembler::Assembler()
{
m_pDisp = NULL;
m_pEmitter = NULL;
m_pImporter = NULL;
char* pszFQN = new char[16];
strcpy_s(pszFQN,16,"<Module>");
m_pModuleClass = new Class(pszFQN);
m_lstClass.PUSH(m_pModuleClass);
m_hshClass.PUSH(m_pModuleClass);
m_pModuleClass->m_cl = mdTokenNil;
m_pModuleClass->m_bIsMaster = FALSE;
m_fStdMapping = FALSE;
m_fDisplayTraceOutput= FALSE;
m_fTolerateDupMethods = FALSE;
m_pCurOutputPos = NULL;
m_CurPC = 0; // PC offset in method
m_pCurMethod = NULL;
m_pCurClass = NULL;
m_pCurEvent = NULL;
m_pCurProp = NULL;
m_wzMetadataVersion = NULL;
m_wMSVmajor = 0xFFFF;
m_wMSVminor = 0xFFFF;
m_wSSVersionMajor = 4;
m_wSSVersionMinor = 0;
m_fAppContainer = FALSE;
m_fHighEntropyVA = FALSE;
m_pCeeFileGen = NULL;
m_pCeeFile = 0;
m_pManifest = NULL;
m_pCustomDescrList = NULL;
m_pGlobalDataSection = NULL;
m_pILSection = NULL;
m_pTLSSection = NULL;
m_fDidCoInitialise = FALSE;
m_fDLL = FALSE;
m_fEntryPointPresent = FALSE;
m_fHaveFieldsWithRvas = FALSE;
m_fFoldCode = FALSE;
m_dwMethodsFolded = 0;
m_szScopeName[0] = 0;
m_crExtends = mdTypeDefNil;
m_nImplList = 0;
m_TyParList = NULL;
m_SEHD = NULL;
m_firstArgName = NULL;
m_lastArgName = NULL;
m_szNamespace = new char[2];
m_szNamespace[0] = 0;
m_NSstack.PUSH(m_szNamespace);
m_szFullNS = new char[MAX_NAMESPACE_LENGTH];
memset(m_szFullNS,0,MAX_NAMESPACE_LENGTH);
m_ulFullNSLen = MAX_NAMESPACE_LENGTH;
m_State = STATE_OK;
m_fInitialisedMetaData = FALSE;
m_fAutoInheritFromObject = TRUE;
m_ulLastDebugLine = 0xFFFFFFFF;
m_ulLastDebugColumn = 0xFFFFFFFF;
m_ulLastDebugLineEnd = 0xFFFFFFFF;
m_ulLastDebugColumnEnd = 0xFFFFFFFF;
m_dwIncludeDebugInfo = 0;
m_fGeneratePDB = FALSE;
m_fIsMscorlib = FALSE;
m_fOptimize = FALSE;
m_tkSysObject = 0;
m_tkSysString = 0;
m_tkSysValue = 0;
m_tkSysEnum = 0;
m_pVTable = NULL;
m_pMarshal = NULL;
m_pPInvoke = NULL;
m_fReportProgress = TRUE;
m_tkCurrentCVOwner = 1; // module
m_pOutputBuffer = NULL;
m_dwSubsystem = (DWORD)-1;
m_dwComImageFlags = COMIMAGE_FLAGS_ILONLY;
m_dwFileAlignment = 0;
m_stBaseAddress = 0;
m_stSizeOfStackReserve = 0;
m_dwCeeFileFlags = ICEE_CREATE_FILE_PURE_IL;
g_szSourceFileName[0] = 0;
m_guidLang = CorSym_LanguageType_ILAssembly;
m_guidLangVendor = CorSym_LanguageVendor_Microsoft;
m_guidDoc = CorSym_DocumentType_Text;
for(int i=0; i<INSTR_POOL_SIZE; i++) m_Instr[i].opcode = -1;
m_wzResourceFile = NULL;
m_wzKeySourceName = NULL;
OnErrGo = false;
bClock = NULL;
m_pbsMD = NULL;
m_pOutputBuffer = new BYTE[OUTPUT_BUFFER_SIZE];
m_pCurOutputPos = m_pOutputBuffer;
m_pEndOutputPos = m_pOutputBuffer + OUTPUT_BUFFER_SIZE;
m_crImplList = new mdTypeRef[MAX_INTERFACES_IMPLEMENTED];
m_nImplListSize = MAX_INTERFACES_IMPLEMENTED;
m_pManifest = new AsmMan((void*)this);
dummyClass = new Class(NULL);
indexKeywords(&indxKeywords);
m_pPortablePdbWriter = NULL;
}
Assembler::~Assembler()
{
if(m_pbsMD) delete m_pbsMD;
if(m_pMarshal) delete m_pMarshal;
if(m_pManifest) delete m_pManifest;
if(m_pPInvoke) delete m_pPInvoke;
if(m_pVTable) delete m_pVTable;
m_lstGlobalLabel.RESET(true);
m_lstGlobalFixup.RESET(true);
m_hshClass.RESET(false);
m_lstClass.RESET(true);
while((m_ClassStack.POP()));
while(m_CustomDescrListStack.POP());
m_pCurClass = NULL;
dummyClass->m_szFQN = NULL;
delete dummyClass;
if (m_pOutputBuffer) delete [] m_pOutputBuffer;
if (m_crImplList) delete [] m_crImplList;
if (m_TyParList) delete m_TyParList;
if (m_pCeeFileGen != NULL) {
if (m_pCeeFile)
m_pCeeFileGen->DestroyCeeFile(&m_pCeeFile);
DestroyICeeFileGen(&m_pCeeFileGen);
m_pCeeFileGen = NULL;
}
while((m_szNamespace = m_NSstack.POP())) ;
delete [] m_szFullNS;
m_MethodBodyList.RESET(true);
m_TypeDefDList.RESET(true);
if (m_pImporter != NULL)
{
m_pImporter->Release();
m_pImporter = NULL;
}
if (m_pEmitter != NULL)
{
m_pEmitter->Release();
m_pEmitter = NULL;
}
if (m_pPortablePdbWriter != NULL)
{
delete m_pPortablePdbWriter;
m_pPortablePdbWriter = NULL;
}
if (m_pDisp != NULL)
{
m_pDisp->Release();
m_pDisp = NULL;
}
}
BOOL Assembler::Init(BOOL generatePdb)
{
if (m_pCeeFileGen != NULL) {
if (m_pCeeFile)
m_pCeeFileGen->DestroyCeeFile(&m_pCeeFile);
DestroyICeeFileGen(&m_pCeeFileGen);
m_pCeeFileGen = NULL;
}
if (FAILED(CreateICeeFileGen(&m_pCeeFileGen))) return FALSE;
if (FAILED(m_pCeeFileGen->CreateCeeFileEx(&m_pCeeFile,(ULONG)m_dwCeeFileFlags))) return FALSE;
if (FAILED(m_pCeeFileGen->GetSectionCreate(m_pCeeFile, ".il", sdReadOnly, &m_pILSection))) return FALSE;
if (FAILED(m_pCeeFileGen->GetSectionCreate (m_pCeeFile, ".sdata", sdReadWrite, &m_pGlobalDataSection))) return FALSE;
if (FAILED(m_pCeeFileGen->GetSectionCreate (m_pCeeFile, ".tls", sdReadWrite, &m_pTLSSection))) return FALSE;
m_fGeneratePDB = generatePdb;
return TRUE;
}
void Assembler::SetDLL(BOOL IsDll)
{
HRESULT OK;
OK = m_pCeeFileGen->SetDllSwitch(m_pCeeFile, IsDll);
_ASSERTE(SUCCEEDED(OK));
m_fDLL = IsDll;
}
void Assembler::ResetArgNameList()
{
if(m_firstArgName) delArgNameList(m_firstArgName);
m_firstArgName = NULL;
m_lastArgName = NULL;
}
void Assembler::ResetForNextMethod()
{
ResetArgNameList();
m_CurPC = 0;
m_pCurOutputPos = m_pOutputBuffer;
m_State = STATE_OK;
m_pCurMethod = NULL;
}
void Assembler::ResetLineNumbers()
{
// reset line number information
m_ulLastDebugLine = 0xFFFFFFFF;
m_ulLastDebugColumn = 0xFFFFFFFF;
m_ulLastDebugLineEnd = 0xFFFFFFFF;
m_ulLastDebugColumnEnd = 0xFFFFFFFF;
}
BOOL Assembler::AddMethod(Method *pMethod)
{
BOOL fIsInterface=FALSE, fIsImport=FALSE;
ULONG PEFileOffset=0;
_ASSERTE(m_pCeeFileGen != NULL);
if (pMethod == NULL)
{
report->error("pMethod == NULL");
return FALSE;
}
if(pMethod->m_pClass != NULL)
{
fIsInterface = IsTdInterface(pMethod->m_pClass->m_Attr);
fIsImport = IsTdImport(pMethod->m_pClass->m_Attr);
}
if(m_CurPC)
{
char sz[1024];
sz[0] = 0;
if(fIsImport) strcat_s(sz,1024," imported");
if(IsMdAbstract(pMethod->m_Attr)) strcat_s(sz,1024," abstract");
if(IsMdPinvokeImpl(pMethod->m_Attr)) strcat_s(sz,1024," pinvoke");
if(!IsMiIL(pMethod->m_wImplAttr)) strcat_s(sz,1024," non-IL");
if(IsMiRuntime(pMethod->m_wImplAttr)) strcat_s(sz,1024," runtime-supplied");
if(IsMiInternalCall(pMethod->m_wImplAttr)) strcat_s(sz,1024," an internal call");
if(strlen(sz))
{
report->error("Method cannot have body if it is%s\n",sz);
}
}
else // method has no body
{
if(fIsImport || IsMdAbstract(pMethod->m_Attr) || IsMdPinvokeImpl(pMethod->m_Attr)
|| IsMiRuntime(pMethod->m_wImplAttr) || IsMiInternalCall(pMethod->m_wImplAttr)) return TRUE;
if(OnErrGo)
{
report->error("Method has no body\n");
return TRUE;
}
else
{
report->warn("Method has no body, 'ret' emitted\n");
Instr* pIns = GetInstr();
if(pIns)
{
memset(pIns,0,sizeof(Instr));
pIns->opcode = CEE_RET;
EmitOpcode(pIns);
}
}
}
if(pMethod->m_Locals.COUNT()) pMethod->m_LocalsSig=0x11000001; // placeholder, the real token 2b defined in EmitMethod
COR_ILMETHOD_FAT fatHeader;
fatHeader.SetFlags(pMethod->m_Flags);
fatHeader.SetMaxStack(pMethod->m_MaxStack);
fatHeader.SetLocalVarSigTok(pMethod->m_LocalsSig);
fatHeader.SetCodeSize(m_CurPC);
bool moreSections = (pMethod->m_dwNumExceptions != 0);
// if max stack is specified <8, force fat header, otherwise (with tiny header) it will default to 8
if((fatHeader.GetMaxStack() < 8)&&(fatHeader.GetLocalVarSigTok()==0)&&(fatHeader.GetCodeSize()<64)&&(!moreSections))
fatHeader.SetFlags(fatHeader.GetFlags() | CorILMethod_InitLocals); //forces fat header but does nothing else, since LocalVarSigTok==0
unsigned codeSize = m_CurPC;
unsigned codeSizeAligned = codeSize;
if (moreSections)
codeSizeAligned = (codeSizeAligned + 3) & ~3; // to insure EH section aligned
unsigned headerSize = COR_ILMETHOD::Size(&fatHeader, moreSections);
unsigned ehSize = COR_ILMETHOD_SECT_EH::Size(pMethod->m_dwNumExceptions, pMethod->m_ExceptionList);
unsigned totalSize = headerSize + codeSizeAligned + ehSize;
BYTE* outBuff;
BYTE* endbuf;
BinStr* pbsBody;
if((pbsBody = new BinStr())==NULL) return FALSE;
if((outBuff = pbsBody->getBuff(totalSize))==NULL) return FALSE;
endbuf = &outBuff[totalSize];
// Emit the header
outBuff += COR_ILMETHOD::Emit(headerSize, &fatHeader, moreSections, outBuff);
pMethod->m_pCode = outBuff;
pMethod->m_headerOffset= PEFileOffset;
pMethod->m_methodOffset= PEFileOffset + headerSize;
pMethod->m_CodeSize = codeSize;
// Emit the code
if (codeSizeAligned)
{
memset(outBuff,0,codeSizeAligned);
memcpy(outBuff, m_pOutputBuffer, codeSize);
outBuff += codeSizeAligned;
}
if(pMethod->m_dwNumExceptions)
{
// Validate the eh
COR_ILMETHOD_SECT_EH_CLAUSE_FAT* pEx;
DWORD TryEnd,HandlerEnd, dwEx, dwEf;
for(dwEx = 0, pEx = pMethod->m_ExceptionList; dwEx < pMethod->m_dwNumExceptions; dwEx++, pEx++)
{
if(pEx->GetTryOffset() > m_CurPC) // i.e., pMethod->m_CodeSize
{
report->error("Invalid SEH clause #%d: Try block starts beyond code size\n",dwEx+1);
}
TryEnd = pEx->GetTryOffset()+pEx->GetTryLength();
if(TryEnd > m_CurPC)
{
report->error("Invalid SEH clause #%d: Try block ends beyond code size\n",dwEx+1);
}
if(pEx->GetHandlerOffset() > m_CurPC)
{
report->error("Invalid SEH clause #%d: Handler block starts beyond code size\n",dwEx+1);
}
HandlerEnd = pEx->GetHandlerOffset()+pEx->GetHandlerLength();
if(HandlerEnd > m_CurPC)
{
report->error("Invalid SEH clause #%d: Handler block ends beyond code size\n",dwEx+1);
}
if(pEx->Flags & COR_ILEXCEPTION_CLAUSE_FILTER)
{
if(!((pEx->GetFilterOffset() >= TryEnd)||(pEx->GetTryOffset() >= HandlerEnd)))
{
report->error("Invalid SEH clause #%d: Try and Filter/Handler blocks overlap\n",dwEx+1);
}
for(dwEf = 0; dwEf < pMethod->m_dwNumEndfilters; dwEf++)
{
if(pMethod->m_EndfilterOffsetList[dwEf] == pEx->GetHandlerOffset()) break;
}
if(dwEf >= pMethod->m_dwNumEndfilters)
{
report->error("Invalid SEH clause #%d: Filter block separated from Handler, or not ending with endfilter\n",dwEx+1);
}
}
else
if(!((pEx->GetHandlerOffset() >= TryEnd)||(pEx->GetTryOffset() >= HandlerEnd)))
{
report->error("Invalid SEH clause #%d: Try and Handler blocks overlap\n",dwEx+1);
}
}
// Emit the eh
outBuff += COR_ILMETHOD_SECT_EH::Emit(ehSize, pMethod->m_dwNumExceptions,
pMethod->m_ExceptionList, false, outBuff);
}
_ASSERTE(outBuff == endbuf);
pMethod->m_pbsBody = pbsBody;
LocalMemberRefFixup* pMRF;
while((pMRF = pMethod->m_LocalMemberRefFixupList.POP()))
{
pMRF->offset += (size_t)(pMethod->m_pCode);
m_LocalMemberRefFixupList.PUSH(pMRF); // transfer MRF to assembler's list
}
if(m_fReportProgress)
{
if (pMethod->IsGlobalMethod())
report->msg("Assembled global method %s\n", pMethod->m_szName);
else report->msg("Assembled method %s::%s\n", pMethod->m_pClass->m_szFQN,
pMethod->m_szName);
}
return TRUE;
}
BOOL Assembler::EmitMethodBody(Method* pMethod, BinStr* pbsOut)
{
HRESULT hr = S_OK;
if(pMethod)
{
BinStr* pbsBody = pMethod->m_pbsBody;
unsigned totalSize;
if(pbsBody && (totalSize = pbsBody->length()))
{
unsigned headerSize = pMethod->m_methodOffset-pMethod->m_headerOffset;
MethodBody* pMB = NULL;
// ----------emit locals signature-------------------
unsigned uLocals;
if((uLocals = pMethod->m_Locals.COUNT()))
{
VarDescr* pVD;
BinStr* pbsSig = new BinStr();
unsigned cnt;
DWORD cSig;
const COR_SIGNATURE* mySig;
pbsSig->appendInt8(IMAGE_CEE_CS_CALLCONV_LOCAL_SIG);
cnt = CorSigCompressData(uLocals,pbsSig->getBuff(5));
pbsSig->remove(5-cnt);
for(cnt = 0; (pVD = pMethod->m_Locals.PEEK(cnt)); cnt++)
{
if(pVD->pbsSig) pbsSig->append(pVD->pbsSig);
else
{
report->error("Undefined type of local var slot %d in method %s\n",cnt,pMethod->m_szName);
pbsSig->appendInt8(ELEMENT_TYPE_I4);
}
}
cSig = pbsSig->length();
mySig = (const COR_SIGNATURE *)(pbsSig->ptr());
if (cSig > 1) // non-empty signature
{
hr = m_pEmitter->GetTokenFromSig(mySig, cSig, &pMethod->m_LocalsSig);
_ASSERTE(SUCCEEDED(hr));
}
delete pbsSig;
COR_ILMETHOD_FAT* pFH; // Fat header guaranteed, because there are local vars
pFH = (COR_ILMETHOD_FAT*)(pMethod->m_pbsBody->ptr());
pFH->SetLocalVarSigTok(pMethod->m_LocalsSig);
}
if(m_fFoldCode)
{
for(int k=0; (pMB = m_MethodBodyList.PEEK(k)) != NULL; k++)
{
if((pMB->pbsBody->length() == totalSize)
&& (memcmp(pMB->pbsBody->ptr(), pbsBody->ptr(),totalSize)==0))
break;
}
if(pMB)
{
pMethod->m_headerOffset= pMB->RVA;
pMethod->m_methodOffset= pMB->RVA + headerSize;
pMethod->m_pCode = pMB->pCode;
delete pbsBody;
pMethod->m_pbsBody = NULL;
m_dwMethodsFolded++;
}
}
if(pMB == NULL)
{
BYTE* outBuff;
unsigned align = (headerSize == 1)? 1 : 4;
ULONG PEFileOffset, methodRVA;
if (FAILED(m_pCeeFileGen->GetSectionBlock (m_pILSection, totalSize,
align, (void **) &outBuff))) return FALSE;
memcpy(outBuff,pbsBody->ptr(),totalSize);
// The offset where we start, (not where the alignment bytes start!
if (FAILED(m_pCeeFileGen->GetSectionDataLen (m_pILSection, &PEFileOffset)))
return FALSE;
PEFileOffset -= totalSize;
pMethod->m_pCode = outBuff + headerSize;
pMethod->m_headerOffset= PEFileOffset;
pMethod->m_methodOffset= PEFileOffset + headerSize;
DoDeferredILFixups(pMethod);
m_pCeeFileGen->GetMethodRVA(m_pCeeFile, PEFileOffset,&methodRVA);
pMethod->m_headerOffset= methodRVA;
pMethod->m_methodOffset= methodRVA + headerSize;
if(m_fFoldCode)
{
if((pMB = new MethodBody)==NULL) return FALSE;
pMB->pbsBody = pbsBody;
pMB->RVA = methodRVA;
pMB->pCode = pMethod->m_pCode;
m_MethodBodyList.PUSH(pMB);
}
//else
// delete pbsBody;
//pMethod->m_pbsBody = NULL;
}
m_pEmitter->SetRVA(pMethod->m_Tok,pMethod->m_headerOffset);
}
if (m_fGeneratePDB)
{
if (FAILED(m_pPortablePdbWriter->DefineSequencePoints(pMethod)))
return FALSE;
if (FAILED(m_pPortablePdbWriter->DefineLocalScope(pMethod)))
return FALSE;
}
return TRUE;
}
else return FALSE;
}
ImportDescriptor* Assembler::EmitImport(BinStr* DllName)
{
int i = 0, l = 0;
ImportDescriptor* pID;
char* sz=NULL;
if(DllName) l = DllName->length(); // No zero terminator here!
if(l)
{
sz = (char*)DllName->ptr();
while((pID=m_ImportList.PEEK(i++)))
{
if((pID->dwDllName== (DWORD) l)&& !memcmp(pID->szDllName,sz,l)) return pID;
}
}
else
{
while((pID=m_ImportList.PEEK(i++)))
{
if(pID->dwDllName==0) return pID;
}
}
if((pID = new ImportDescriptor(sz,l)))
{
m_ImportList.PUSH(pID);
pID->mrDll = TokenFromRid(m_ImportList.COUNT(),mdtModuleRef);
return pID;
}
else report->error("Failed to allocate import descriptor\n");
return NULL;
}
void Assembler::EmitImports()
{
WCHAR* wzDllName=&wzUniBuf[0];
ImportDescriptor* pID;
int i;
mdToken tk;
for(i=0; (pID = m_ImportList.PEEK(i)); i++)
{
WszMultiByteToWideChar(g_uCodePage,0,pID->szDllName,-1,wzDllName,dwUniBuf-1);
if(FAILED(m_pEmitter->DefineModuleRef( // S_OK or error.
wzDllName, // [IN] DLL name
&tk))) // [OUT] returned
report->error("Failed to define module ref '%s'\n",pID->szDllName);
else
_ASSERTE(tk == pID->mrDll);
}
}
HRESULT Assembler::EmitPinvokeMap(mdToken tk, PInvokeDescriptor* pDescr)
{
WCHAR* wzAlias=&wzUniBuf[0];
if(pDescr->szAlias) WszMultiByteToWideChar(g_uCodePage,0,pDescr->szAlias,-1,wzAlias,dwUniBuf-1);
return m_pEmitter->DefinePinvokeMap( // Return code.
tk, // [IN] FieldDef, MethodDef or MethodImpl.
pDescr->dwAttrs, // [IN] Flags used for mapping.
(LPCWSTR)wzAlias, // [IN] Import name.
pDescr->mrDll); // [IN] ModuleRef token for the target DLL.
}
BOOL Assembler::EmitMethod(Method *pMethod)
{
// Emit the metadata for a method definition
BOOL fSuccess = FALSE;
WCHAR* wzMemberName=&wzUniBuf[0];
BOOL fIsInterface;
DWORD cSig;
ULONG methodRVA = 0;
mdMethodDef MethodToken;
mdTypeDef ClassToken = mdTypeDefNil;
char *pszMethodName;
COR_SIGNATURE *mySig;
_ASSERTE((m_pCeeFileGen != NULL) && (pMethod != NULL));
fIsInterface = ((pMethod->m_pClass != NULL) && IsTdInterface(pMethod->m_pClass->m_Attr));
pszMethodName = pMethod->m_szName;
mySig = pMethod->m_pMethodSig;
cSig = pMethod->m_dwMethodCSig;
// If this is an instance method, make certain the signature says so
if (!(pMethod->m_Attr & mdStatic))
*mySig |= IMAGE_CEE_CS_CALLCONV_HASTHIS;
ClassToken = (pMethod->IsGlobalMethod())? mdTokenNil
: pMethod->m_pClass->m_cl;
// Convert name to UNICODE
WszMultiByteToWideChar(g_uCodePage,0,pszMethodName,-1,wzMemberName,dwUniBuf-1);
if(IsMdPrivateScope(pMethod->m_Attr))
{
WCHAR* p = wcsstr(wzMemberName,W("$PST06"));
if(p) *p = 0;
}
if (FAILED(m_pEmitter->DefineMethod(ClassToken, // parent class
wzMemberName, // member name
pMethod->m_Attr & ~mdReservedMask, // member attributes
mySig, // member signature
cSig,
methodRVA, // RVA
pMethod->m_wImplAttr, // implflags
&MethodToken)))
{
report->error("Failed to define method '%s'\n",pszMethodName);
goto exit;
}
pMethod->m_Tok = MethodToken;
//--------------------------------------------------------------------------------
// the only way to set mdRequireSecObject:
if(pMethod->m_Attr & mdRequireSecObject)
{
mdToken tkPseudoClass;
if(FAILED(m_pEmitter->DefineTypeRefByName(1, COR_REQUIRES_SECOBJ_ATTRIBUTE, &tkPseudoClass)))
report->error("Unable to define type reference '%s'\n", COR_REQUIRES_SECOBJ_ATTRIBUTE_ANSI);
else
{
mdToken tkPseudoCtor;
BYTE bSig[3] = {IMAGE_CEE_CS_CALLCONV_HASTHIS,0,ELEMENT_TYPE_VOID};
if(FAILED(m_pEmitter->DefineMemberRef(tkPseudoClass, W(".ctor"), (PCCOR_SIGNATURE)bSig, 3, &tkPseudoCtor)))
report->error("Unable to define member reference '%s::.ctor'\n", COR_REQUIRES_SECOBJ_ATTRIBUTE_ANSI);
else DefineCV(new CustomDescr(MethodToken,tkPseudoCtor,NULL));
}
}
if (pMethod->m_NumTyPars)
{
ULONG i;
mdToken tkNil = mdTokenNil;
mdGenericParam tkGP = mdTokenNil;
for(i = 0; i < pMethod->m_NumTyPars; i++)
{
if (FAILED(m_pEmitter->DefineGenericParam(MethodToken, i, pMethod->m_TyPars[i].Attrs(), pMethod->m_TyPars[i].Name(), 0, &tkNil, &tkGP)))
{
report->error("Unable to define generic param: %s'\n", pMethod->m_TyPars[i].Name());
}
else
{
pMethod->m_TyPars[i].Token(tkGP);
EmitCustomAttributes(tkGP, pMethod->m_TyPars[i].CAList());
}
}
EmitGenericParamConstraints(pMethod->m_NumTyPars, pMethod->m_TyPars, pMethod->m_Tok, &(pMethod->m_GPCList));
}
//--------------------------------------------------------------------------------
EmitSecurityInfo(MethodToken,
pMethod->m_pPermissions,
pMethod->m_pPermissionSets);
//--------------------------------------------------------------------------------
if (pMethod->m_fEntryPoint)
{
if(fIsInterface) report->error("Entrypoint in Interface: Method '%s'\n",pszMethodName);
if (FAILED(m_pCeeFileGen->SetEntryPoint(m_pCeeFile, MethodToken)))
{
report->error("Failed to set entry point for method '%s'\n",pszMethodName);
goto exit;
}
}
//--------------------------------------------------------------------------------
if(IsMdPinvokeImpl(pMethod->m_Attr))
{
if(pMethod->m_pPInvoke)
{
HRESULT hr;
if(pMethod->m_pPInvoke->szAlias == NULL) pMethod->m_pPInvoke->szAlias = pszMethodName;
hr = EmitPinvokeMap(MethodToken,pMethod->m_pPInvoke);
if(pMethod->m_pPInvoke->szAlias == pszMethodName) pMethod->m_pPInvoke->szAlias = NULL;
if(FAILED(hr))
{
report->error("Failed to set PInvoke map for method '%s'\n",pszMethodName);
goto exit;
}
}
}
{ // add parameters to metadata
void const *pValue=NULL;
ULONG cbValue;
DWORD dwCPlusTypeFlag=0;
mdParamDef pdef;
WCHAR* wzParName=&wzUniBuf[0];
char* szPhonyName=(char*)&wzUniBuf[dwUniBuf >> 1];
if(pMethod->m_dwRetAttr || pMethod->m_pRetMarshal || pMethod->m_RetCustDList.COUNT())
{
if(pMethod->m_pRetValue)
{
dwCPlusTypeFlag= (DWORD)*(pMethod->m_pRetValue->ptr());
pValue = (void const *)(pMethod->m_pRetValue->ptr()+1);
cbValue = pMethod->m_pRetValue->length()-1;
if(dwCPlusTypeFlag == ELEMENT_TYPE_STRING)
{
cbValue /= sizeof(WCHAR);
#if BIGENDIAN
void* pValueTemp = _alloca(cbValue * sizeof(WCHAR));
memcpy(pValueTemp, pValue, cbValue * sizeof(WCHAR));
pValue = pValueTemp;
SwapStringLength((WCHAR*)pValue, cbValue);
#endif
}
}
else
{
pValue = NULL;
cbValue = (ULONG)-1;
dwCPlusTypeFlag=0;
}
m_pEmitter->DefineParam(MethodToken,0,NULL,pMethod->m_dwRetAttr,dwCPlusTypeFlag,pValue,cbValue,&pdef);
if(pMethod->m_pRetMarshal)
{
if(FAILED(m_pEmitter->SetFieldMarshal (
pdef, // [IN] given a fieldDef or paramDef token
(PCCOR_SIGNATURE)(pMethod->m_pRetMarshal->ptr()), // [IN] native type specification
pMethod->m_pRetMarshal->length()))) // [IN] count of bytes of pvNativeType
report->error("Failed to set param marshaling for return\n");
}
EmitCustomAttributes(pdef, &(pMethod->m_RetCustDList));
}
for(ARG_NAME_LIST *pAN=pMethod->m_firstArgName; pAN; pAN = pAN->pNext)
{
if(pAN->nNum >= 65535)
{
report->error("Method '%s': Param.sequence number (%d) exceeds 65535, unable to define parameter\n",pszMethodName,pAN->nNum+1);
continue;
}
if(pAN->dwName) strcpy_s(szPhonyName,dwUniBuf >> 1,pAN->szName);
else sprintf_s(szPhonyName,(dwUniBuf >> 1),"A_%d",pAN->nNum);
WszMultiByteToWideChar(g_uCodePage,0,szPhonyName,-1,wzParName,dwUniBuf >> 1);
if(pAN->pValue)
{
dwCPlusTypeFlag= (DWORD)*(pAN->pValue->ptr());
pValue = (void const *)(pAN->pValue->ptr()+1);
cbValue = pAN->pValue->length()-1;
if(dwCPlusTypeFlag == ELEMENT_TYPE_STRING)
{
cbValue /= sizeof(WCHAR);
#if BIGENDIAN
void* pValueTemp = _alloca(cbValue * sizeof(WCHAR));
memcpy(pValueTemp, pValue, cbValue * sizeof(WCHAR));
pValue = pValueTemp;
SwapStringLength((WCHAR*)pValue, cbValue);
#endif
}
}
else
{
pValue = NULL;
cbValue = (ULONG)-1;
dwCPlusTypeFlag=0;
}
m_pEmitter->DefineParam(MethodToken,pAN->nNum+1,wzParName,pAN->dwAttr,dwCPlusTypeFlag,pValue,cbValue,&pdef);
if(pAN->pMarshal)
{
if(FAILED(m_pEmitter->SetFieldMarshal (
pdef, // [IN] given a fieldDef or paramDef token
(PCCOR_SIGNATURE)(pAN->pMarshal->ptr()), // [IN] native type specification
pAN->pMarshal->length()))) // [IN] count of bytes of pvNativeType
report->error("Failed to set param marshaling for '%s'\n",pAN->szName);
}
EmitCustomAttributes(pdef, &(pAN->CustDList));
}
}
fSuccess = TRUE;
//--------------------------------------------------------------------------------
// Update method implementations for this method
{
MethodImplDescriptor* pMID;
int i;
for(i=0;(pMID = pMethod->m_MethodImplDList.PEEK(i));i++)
{
pMID->m_tkImplementingMethod = MethodToken;
// don't delete it here, it's still in the general list
}
}
//--------------------------------------------------------------------------------
EmitCustomAttributes(MethodToken, &(pMethod->m_CustomDescrList));
exit:
if (fSuccess == FALSE) m_State = STATE_FAIL;
return fSuccess;
}
BOOL Assembler::EmitMethodImpls()
{
MethodImplDescriptor* pMID;
BOOL ret = TRUE;
int i;
for(i=0; (pMID = m_MethodImplDList.PEEK(i)); i++)
{
pMID->m_tkImplementingMethod = ResolveLocalMemberRef(pMID->m_tkImplementingMethod);
pMID->m_tkImplementedMethod = ResolveLocalMemberRef(pMID->m_tkImplementedMethod);
if(FAILED(m_pEmitter->DefineMethodImpl( pMID->m_tkDefiningClass,
pMID->m_tkImplementingMethod,
pMID->m_tkImplementedMethod)))
{
report->error("Failed to define Method Implementation");
ret = FALSE;
}
pMID->m_fNew = FALSE;
}// end while
return ret;
}
mdToken Assembler::ResolveLocalMemberRef(mdToken tok)
{
if(TypeFromToken(tok) == 0x99000000)
{
tok = RidFromToken(tok);
if(tok) tok = m_LocalMethodRefDList.PEEK(tok-1)->m_tkResolved;
}
else if(TypeFromToken(tok) == 0x98000000)
{
tok = RidFromToken(tok);
if(tok) tok = m_LocalFieldRefDList.PEEK(tok-1)->m_tkResolved;
}
return tok;
}
BOOL Assembler::EmitEvent(EventDescriptor* pED)
{
mdMethodDef mdAddOn=mdMethodDefNil,
mdRemoveOn=mdMethodDefNil,
mdFire=mdMethodDefNil,
*mdOthers;
int nOthers;
WCHAR* wzMemberName=&wzUniBuf[0];
if(!pED) return FALSE;
WszMultiByteToWideChar(g_uCodePage,0,pED->m_szName,-1,wzMemberName,dwUniBuf-1);
mdAddOn = ResolveLocalMemberRef(pED->m_tkAddOn);
if(TypeFromToken(mdAddOn) != mdtMethodDef)
{
report->error("Invalid Add method of event '%s'\n",pED->m_szName);
return FALSE;
}
mdRemoveOn = ResolveLocalMemberRef(pED->m_tkRemoveOn);
if(TypeFromToken(mdRemoveOn) != mdtMethodDef)
{
report->error("Invalid Remove method of event '%s'\n",pED->m_szName);
return FALSE;
}
mdFire = ResolveLocalMemberRef(pED->m_tkFire);
if((RidFromToken(mdFire)!=0)&&(TypeFromToken(mdFire) != mdtMethodDef))
{
report->error("Invalid Fire method of event '%s'\n",pED->m_szName);
return FALSE;
}
nOthers = pED->m_tklOthers.COUNT();
mdOthers = new mdMethodDef[nOthers+1];
if(mdOthers == NULL)
{
report->error("Failed to allocate Others array for event descriptor\n");
nOthers = 0;
}
for(int j=0; j < nOthers; j++)
{
mdOthers[j] = ResolveLocalMemberRef((mdToken)(UINT_PTR)(pED->m_tklOthers.PEEK(j))); // @WARNING: casting down from 'mdToken*' to 'mdToken'
}
mdOthers[nOthers] = mdMethodDefNil; // like null-terminator
if(FAILED(m_pEmitter->DefineEvent( pED->m_tdClass,
wzMemberName,
pED->m_dwAttr,
pED->m_tkEventType,
mdAddOn,
mdRemoveOn,
mdFire,
mdOthers,
&(pED->m_edEventTok))))
{
report->error("Failed to define event '%s'.\n",pED->m_szName);
delete [] mdOthers;
return FALSE;
}
EmitCustomAttributes(pED->m_edEventTok, &(pED->m_CustomDescrList));
return TRUE;
}
BOOL Assembler::EmitProp(PropDescriptor* pPD)
{
mdMethodDef mdSet, mdGet, *mdOthers;
int nOthers;
WCHAR* wzMemberName=&wzUniBuf[0];
if(!pPD) return FALSE;
WszMultiByteToWideChar(g_uCodePage,0,pPD->m_szName,-1,wzMemberName,dwUniBuf-1);
mdSet = ResolveLocalMemberRef(pPD->m_tkSet);
if((RidFromToken(mdSet)!=0)&&(TypeFromToken(mdSet) != mdtMethodDef))
{
report->error("Invalid Set method of property '%s'\n",pPD->m_szName);
return FALSE;
}
mdGet = ResolveLocalMemberRef(pPD->m_tkGet);
if((RidFromToken(mdGet)!=0)&&(TypeFromToken(mdGet) != mdtMethodDef))
{
report->error("Invalid Get method of property '%s'\n",pPD->m_szName);
return FALSE;
}
nOthers = pPD->m_tklOthers.COUNT();
mdOthers = new mdMethodDef[nOthers+1];
if(mdOthers == NULL)
{
report->error("Failed to allocate Others array for prop descriptor\n");
nOthers = 0;
}
for(int j=0; j < nOthers; j++)
{
mdOthers[j] = ResolveLocalMemberRef((mdToken)(UINT_PTR)(pPD->m_tklOthers.PEEK(j))); // @WARNING: casting down from 'mdToken*' to 'mdToken'
if((RidFromToken(mdOthers[j])!=0)&&(TypeFromToken(mdOthers[j]) != mdtMethodDef))
{
report->error("Invalid Other method of property '%s'\n",pPD->m_szName);
delete [] mdOthers;
return FALSE;
}
}
mdOthers[nOthers] = mdMethodDefNil; // like null-terminator
void* pValue = pPD->m_pValue;
#if BIGENDIAN
if (pPD->m_dwCPlusTypeFlag == ELEMENT_TYPE_STRING)
{
void* pValueTemp = _alloca(pPD->m_cbValue * sizeof(WCHAR));
memcpy(pValueTemp, pValue, pPD->m_cbValue * sizeof(WCHAR));
pValue = pValueTemp;
SwapStringLength((WCHAR*)pValue, pPD->m_cbValue);
}
#endif
if(FAILED(m_pEmitter->DefineProperty( pPD->m_tdClass,
wzMemberName,
pPD->m_dwAttr,
pPD->m_pSig,
pPD->m_dwCSig,
pPD->m_dwCPlusTypeFlag,
pValue,
pPD->m_cbValue,
mdSet,
mdGet,
mdOthers,
&(pPD->m_pdPropTok))))
{
report->error("Failed to define property '%s'.\n",pPD->m_szName);
delete [] mdOthers;
return FALSE;
}
EmitCustomAttributes(pPD->m_pdPropTok, &(pPD->m_CustomDescrList));
return TRUE;
}
Class *Assembler::FindCreateClass(_In_ __nullterminated const char *pszFQN)
{
Class *pSearch = NULL;
if(pszFQN)
{
dummyClass->m_szFQN = pszFQN;
dummyClass->m_Hash = hash((BYTE*)pszFQN, (unsigned)strlen(pszFQN), 10);
pSearch = m_hshClass.FIND(dummyClass);
dummyClass->m_szFQN = NULL;
dummyClass->m_Hash = 0;
if(!pSearch)
{
char* pch;
DWORD dwFQN = (DWORD)strlen(pszFQN);
Class *pEncloser = NULL;
char* pszNewFQN = new char[dwFQN+1];
strcpy_s(pszNewFQN,dwFQN+1,pszFQN);
if((pch = strrchr(pszNewFQN, NESTING_SEP)) != NULL)
{
*pch = 0;
pEncloser = FindCreateClass(pszNewFQN);
*pch = NESTING_SEP;
}
pSearch = new Class(pszNewFQN);
if (pSearch == NULL)
report->error("Failed to create class '%s'\n",pszNewFQN);
else
{
pSearch->m_pEncloser = pEncloser;
m_lstClass.PUSH(pSearch);
pSearch->m_cl = mdtTypeDef | m_lstClass.COUNT();
m_hshClass.PUSH(pSearch);
}
}
}
return pSearch;
}
BOOL Assembler::EmitClass(Class *pClass)
{
LPCUTF8 szFullName;
WCHAR* wzFullName=&wzUniBuf[0];
HRESULT hr = E_FAIL;
GUID guid;
size_t L;
mdToken tok;
if(pClass == NULL) return FALSE;
hr = CoCreateGuid(&guid);
if (FAILED(hr))
{
printf("Unable to create GUID\n");
m_State = STATE_FAIL;
return FALSE;
}
if(pClass->m_pEncloser)
szFullName = strrchr(pClass->m_szFQN,NESTING_SEP) + 1;
else
szFullName = pClass->m_szFQN;
WszMultiByteToWideChar(g_uCodePage,0,szFullName,-1,wzFullName,dwUniBuf);
L = wcslen(wzFullName);
if((L==0)||(wzFullName[L-1]==L'.')) // Missing class name!
{
wcscat_s(wzFullName,dwUniBuf,W("$UNNAMED_TYPE$"));
}
pClass->m_Attr = CheckClassFlagsIfNested(pClass->m_pEncloser, pClass->m_Attr);
if (pClass->m_pEncloser)
{
hr = m_pEmitter->DefineNestedType( wzFullName,
pClass->m_Attr, // attributes
pClass->m_crExtends, // CR extends class
pClass->m_crImplements,// implements
pClass->m_pEncloser->m_cl, // Enclosing class.
&tok);
}
else
{
hr = m_pEmitter->DefineTypeDef( wzFullName,
pClass->m_Attr, // attributes
pClass->m_crExtends, // CR extends class
pClass->m_crImplements,// implements
&tok);
}
_ASSERTE(tok == pClass->m_cl);
if (FAILED(hr)) goto exit;
if (pClass->m_NumTyPars)
{
ULONG i;
mdToken tkNil = mdTokenNil;
mdGenericParam tkGP = mdTokenNil;
for(i = 0; i < pClass->m_NumTyPars; i++)
{
if (FAILED(m_pEmitter->DefineGenericParam(pClass->m_cl, i, pClass->m_TyPars[i].Attrs(), pClass->m_TyPars[i].Name(), 0, &tkNil, &tkGP)))
{
report->error("Unable to define generic param: %s'\n", pClass->m_TyPars[i].Name());
}
else
{
pClass->m_TyPars[i].Token(tkGP);
EmitCustomAttributes(tkGP, pClass->m_TyPars[i].CAList());
}
}
EmitGenericParamConstraints(pClass->m_NumTyPars, pClass->m_TyPars, pClass->m_cl, &(pClass->m_GPCList));
}
EmitCustomAttributes(pClass->m_cl, &(pClass->m_CustDList));
hr = S_OK;
exit:
return SUCCEEDED(hr);
}
BOOL Assembler::DoGlobalFixups()
{
GlobalFixup *pSearch;
for (int i=0; (pSearch = m_lstGlobalFixup.PEEK(i)); i++)
{
GlobalLabel * pLabel = FindGlobalLabel(pSearch->m_szLabel);
if (pLabel == NULL)
{
report->error("Unable to find forward reference global label '%s'\n",
pSearch->m_szLabel);
m_State = STATE_FAIL;
return FALSE;
}
//BYTE * pReference = pSearch->m_pReference;
//DWORD GlobalOffset = pLabel->m_GlobalOffset;
//memcpy(pReference,&GlobalOffset,4);
SET_UNALIGNED_VAL32(pSearch->m_pReference,pLabel->m_GlobalOffset);
}
return TRUE;
}
state_t Assembler::AddGlobalLabel(_In_ __nullterminated char *pszName, HCEESECTION section)
{
if (FindGlobalLabel(pszName) != NULL)
{
report->error("Duplicate global label '%s'\n", pszName);
m_State = STATE_FAIL;
return m_State;
}
ULONG GlobalOffset;
HRESULT hr;
hr = m_pCeeFileGen->GetSectionDataLen(section, &GlobalOffset);
_ASSERTE(SUCCEEDED(hr));
GlobalLabel *pNew = new GlobalLabel(pszName, GlobalOffset, section);
if (pNew == 0)
{
report->error("Failed to allocate global label '%s'\n",pszName);
m_State = STATE_FAIL;
return m_State;
}
m_lstGlobalLabel.PUSH(pNew);
return m_State;
}
void Assembler::AddLabel(DWORD CurPC, _In_ __nullterminated char *pszName)
{
if (m_pCurMethod->FindLabel(pszName) != NULL)
{
report->error("Duplicate label: '%s'\n", pszName);
m_State = STATE_FAIL;
}
else
{
Label *pNew = new Label(pszName, CurPC);
if (pNew != NULL)
//m_pCurMethod->m_lstLabel.PUSH(pNew);
m_lstLabel.PUSH(pNew);
else
{
report->error("Failed to allocate label '%s'\n",pszName);
m_State = STATE_FAIL;
}
}
}
void Assembler::DoDeferredILFixups(Method* pMethod)
{ // Now that we know where in the file the code bytes will wind up,
// we can update the RVAs and offsets.
ILFixup *pSearch;
HRESULT hr;
GlobalFixup *Fix = NULL;
int i;
for (i=0;(pSearch = pMethod->m_lstILFixup.PEEK(i));i++)
{
switch(pSearch->m_Kind)
{
case ilGlobal:
Fix = pSearch->m_Fixup;
_ASSERTE(Fix != NULL);
Fix->m_pReference = pMethod->m_pCode+pSearch->m_OffsetInMethod;
break;
case ilToken:
hr = m_pCeeFileGen->AddSectionReloc(m_pILSection,
pSearch->m_OffsetInMethod+pMethod->m_methodOffset,
m_pILSection,
srRelocMapToken);
_ASSERTE(SUCCEEDED(hr));
break;
case ilRVA:
hr = m_pCeeFileGen->AddSectionReloc(m_pILSection,
pSearch->m_OffsetInMethod+pMethod->m_methodOffset,
m_pGlobalDataSection,
srRelocAbsolute);
_ASSERTE(SUCCEEDED(hr));
break;
default:
;
}
}
}
/**************************************************************************/
BOOL Assembler::DoFixups(Method* pMethod)
{
Fixup *pSearch;
for (int i=0; (pSearch = pMethod->m_lstFixup.PEEK(i)); i++)
{
Label * pLabel = pMethod->FindLabel(pSearch->m_szLabel);
long offset;
if (pLabel == NULL)
{
report->error("Unable to find forward reference label '%s' called from PC=%d\n",
pSearch->m_szLabel, pSearch->m_RelativeToPC);
//m_State = STATE_FAIL;
return FALSE;
}
offset = pLabel->m_PC - pSearch->m_RelativeToPC;
if (pSearch->m_FixupSize == 1)
{
if (offset > 127 || offset < -128)
{
report->error("Offset of forward reference label '%s' called from PC=%d is too large for 1 byte pcrel\n",
pLabel->m_szName, pSearch->m_RelativeToPC);
//m_State = STATE_FAIL;
return FALSE;
}
*pSearch->m_pBytes = (BYTE) offset;
}
else if (pSearch->m_FixupSize == 4)
{
SET_UNALIGNED_VAL32(pSearch->m_pBytes,offset);
}
}
return TRUE;
}
OPCODE Assembler::DecodeOpcode(const BYTE *pCode, DWORD *pdwLen)
{
OPCODE opcode;
*pdwLen = 1;
opcode = OPCODE(pCode[0]);
switch(opcode) {
case CEE_PREFIX1:
opcode = OPCODE(pCode[1] + 256);
if (opcode < 0 || opcode >= CEE_COUNT)
return CEE_COUNT;
*pdwLen = 2;
break;
case CEE_PREFIXREF:
case CEE_PREFIX2:
case CEE_PREFIX3:
case CEE_PREFIX4:
case CEE_PREFIX5:
case CEE_PREFIX6:
case CEE_PREFIX7:
return CEE_COUNT;
default:
break;
}
return opcode;
}
char* Assembler::ReflectionNotation(mdToken tk)
{
char *sz = (char*)&wzUniBuf[dwUniBuf>>1], *pc;
*sz=0;
switch(TypeFromToken(tk))
{
case mdtTypeDef:
{
Class *pClass = m_lstClass.PEEK(RidFromToken(tk)-1);
if(pClass)
{
strcpy_s(sz,dwUniBuf>>1,pClass->m_szFQN);
pc = sz;
while((pc = strchr(pc,NESTING_SEP)) != NULL)
{
*pc = '+';
pc++;
}
}
}
break;
case mdtTypeRef:
{
ULONG N;
mdToken tkResScope;
if(SUCCEEDED(m_pImporter->GetTypeRefProps(tk,&tkResScope,wzUniBuf,dwUniBuf>>1,&N)))
{
WszWideCharToMultiByte(CP_UTF8,0,wzUniBuf,-1,sz,dwUniBuf>>1,NULL,NULL);
if(TypeFromToken(tkResScope)==mdtAssemblyRef)
{
AsmManAssembly *pAsmRef = m_pManifest->m_AsmRefLst.PEEK(RidFromToken(tkResScope)-1);
if(pAsmRef)
{
pc = &sz[strlen(sz)];
pc+=sprintf_s(pc,(dwUniBuf >> 1),", %s, Version=%d.%d.%d.%d, Culture=",pAsmRef->szName,
pAsmRef->usVerMajor,pAsmRef->usVerMinor,pAsmRef->usBuild,pAsmRef->usRevision);
ULONG L=0;
if(pAsmRef->pLocale && (L=pAsmRef->pLocale->length()))
{
memcpy(wzUniBuf,pAsmRef->pLocale->ptr(),L);
wzUniBuf[L>>1] = 0;
WszWideCharToMultiByte(CP_UTF8,0,wzUniBuf,-1,pc,dwUniBuf>>1,NULL,NULL);
}
else pc+=sprintf_s(pc,(dwUniBuf >> 1),"neutral");
pc = &sz[strlen(sz)];
if(pAsmRef->pPublicKeyToken && (L=pAsmRef->pPublicKeyToken->length()))
{
pc+=sprintf_s(pc,(dwUniBuf >> 1),", Publickeytoken=");
BYTE* pb = (BYTE*)(pAsmRef->pPublicKeyToken->ptr());
for(N=0; N<L; N++,pb++) pc+=sprintf_s(pc,(dwUniBuf >> 1),"%2.2x",*pb);
}
}
}
}
}
break;
default:
break;
}
return sz;
}
/*
--------------------------------------------------------------------
mix -- mix 3 32-bit values reversibly.
For every delta with one or two bits set, and the deltas of all three
high bits or all three low bits, whether the original value of a,b,c
is almost all zero or is uniformly distributed,
* If mix() is run forward or backward, at least 32 bits in a,b,c
have at least 1/4 probability of changing.
* If mix() is run forward, every bit of c will change between 1/3 and
2/3 of the time. (Well, 22/100 and 78/100 for some 2-bit deltas.)
mix() was built out of 36 single-cycle latency instructions in a
structure that could supported 2x parallelism, like so:
a -= b;
a -= c; x = (c>>13);
b -= c; a ^= x;
b -= a; x = (a<<8);
c -= a; b ^= x;
c -= b; x = (b>>13);
...
Unfortunately, superscalar Pentiums and Sparcs can't take advantage
of that parallelism. They've also turned some of those single-cycle
latency instructions into multi-cycle latency instructions. Still,
this is the fastest good hash I could find. There were about 2^^68
to choose from. I only looked at a billion or so.
--------------------------------------------------------------------
*/
#define mix(a,b,c) \
{ \
a -= b; a -= c; a ^= (c >> 13); \
b -= c; b -= a; b ^= (a << 8); \
c -= a; c -= b; c ^= (b >> 13); \
a -= b; a -= c; a ^= (c >> 12); \
b -= c; b -= a; b ^= (a << 16); \
c -= a; c -= b; c ^= (b >> 5); \
a -= b; a -= c; a ^= (c >> 3); \
b -= c; b -= a; b ^= (a << 10); \
c -= a; c -= b; c ^= (b >> 15); \
}
/*
--------------------------------------------------------------------
hash() -- hash a variable-length key into a 32-bit value
k : the key (the unaligned variable-length array of bytes)
len : the length of the key, counting by bytes
initval : can be any 4-byte value
Returns a 32-bit value. Every bit of the key affects every bit of
the return value. Every 1-bit and 2-bit delta achieves avalanche.
About 6*len+35 instructions.
The best hash table sizes are powers of 2. There is no need to do
mod a prime (mod is sooo slow!). If you need less than 32 bits,
use a bitmask. For example, if you need only 10 bits, do
h = (h & hashmask(10));
In which case, the hash table should have hashsize(10) elements.
If you are hashing n strings (ub1 **)k, do it like this:
for (i=0, h=0; i<n; ++i) h = hash( k[i], len[i], h);
By Bob Jenkins, 1996. [email protected]. You may use this
code any way you wish, private, educational, or commercial. It's free.
See http://burtleburtle.net/bob/hash/evahash.html
Use for hash table lookup, or anything where one collision in 2^^32 is
acceptable. Do NOT use for cryptographic purposes.
--------------------------------------------------------------------
*/
unsigned hash(
_In_reads_(length) const BYTE *k, /* the key */
unsigned length, /* the length of the key */
unsigned initval) /* the previous hash, or an arbitrary value */
{
register unsigned a,b,c,len;
/* Set up the internal state */
len = length;
a = b = 0x9e3779b9; /* the golden ratio; an arbitrary value */
c = initval; /* the previous hash value */
/*---------------------------------------- handle most of the key */
while (len >= 12)
{
a += (k[0] + ((unsigned)k[1] << 8) + ((unsigned)k[2] << 16) + ((unsigned)k[3] << 24));
b += (k[4] + ((unsigned)k[5] << 8) + ((unsigned)k[6] << 16) + ((unsigned)k[7] << 24));
c += (k[8] + ((unsigned)k[9] << 8) + ((unsigned)k[10] << 16) + ((unsigned)k[11] << 24));
mix(a,b,c);
k += 12; len -= 12;
}
/*------------------------------------- handle the last 11 bytes */
c += length;
switch(len) /* all the case statements fall through */
{
case 11: c+=((unsigned)k[10] << 24);
FALLTHROUGH;
case 10: c+=((unsigned)k[9] << 16);
FALLTHROUGH;
case 9 : c+=((unsigned)k[8] << 8);
FALLTHROUGH;
/* the first byte of c is reserved for the length */
case 8 : b+=((unsigned)k[7] << 24);
FALLTHROUGH;
case 7 : b+=((unsigned)k[6] << 16);
FALLTHROUGH;
case 6 : b+=((unsigned)k[5] << 8);
FALLTHROUGH;
case 5 : b+=k[4];
FALLTHROUGH;
case 4 : a+=((unsigned)k[3] << 24);
FALLTHROUGH;
case 3 : a+=((unsigned)k[2] << 16);
FALLTHROUGH;
case 2 : a+=((unsigned)k[1] << 8);
FALLTHROUGH;
case 1 : a+=k[0];
/* case 0: nothing left to add */
}
mix(a,b,c);
/*-------------------------------------------- report the result */
return c;
}
| // Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
//
// File: assem.cpp
//
//
// COM+ IL assembler
//
#include "ilasmpch.h"
#define INITGUID
#define DECLARE_DATA
#include "assembler.h"
void indexKeywords(Indx* indx); // defined in asmparse.y
unsigned int g_uCodePage = CP_ACP;
unsigned int g_uConsoleCP = CP_ACP;
char g_szSourceFileName[MAX_FILENAME_LENGTH*3];
WCHAR wzUniBuf[dwUniBuf]; // Unicode conversion global buffer
Assembler::Assembler()
{
m_pDisp = NULL;
m_pEmitter = NULL;
m_pImporter = NULL;
char* pszFQN = new char[16];
strcpy_s(pszFQN,16,"<Module>");
m_pModuleClass = new Class(pszFQN);
m_lstClass.PUSH(m_pModuleClass);
m_hshClass.PUSH(m_pModuleClass);
m_pModuleClass->m_cl = mdTokenNil;
m_pModuleClass->m_bIsMaster = FALSE;
m_fStdMapping = FALSE;
m_fDisplayTraceOutput= FALSE;
m_fTolerateDupMethods = FALSE;
m_pCurOutputPos = NULL;
m_CurPC = 0; // PC offset in method
m_pCurMethod = NULL;
m_pCurClass = NULL;
m_pCurEvent = NULL;
m_pCurProp = NULL;
m_wzMetadataVersion = NULL;
m_wMSVmajor = 0xFFFF;
m_wMSVminor = 0xFFFF;
m_wSSVersionMajor = 4;
m_wSSVersionMinor = 0;
m_fAppContainer = FALSE;
m_fHighEntropyVA = FALSE;
m_pCeeFileGen = NULL;
m_pCeeFile = 0;
m_pManifest = NULL;
m_pCustomDescrList = NULL;
m_pGlobalDataSection = NULL;
m_pILSection = NULL;
m_pTLSSection = NULL;
m_fDidCoInitialise = FALSE;
m_fDLL = FALSE;
m_fEntryPointPresent = FALSE;
m_fHaveFieldsWithRvas = FALSE;
m_fFoldCode = FALSE;
m_dwMethodsFolded = 0;
m_szScopeName[0] = 0;
m_crExtends = mdTypeDefNil;
m_nImplList = 0;
m_TyParList = NULL;
m_SEHD = NULL;
m_firstArgName = NULL;
m_lastArgName = NULL;
m_szNamespace = new char[2];
m_szNamespace[0] = 0;
m_NSstack.PUSH(m_szNamespace);
m_szFullNS = new char[MAX_NAMESPACE_LENGTH];
memset(m_szFullNS,0,MAX_NAMESPACE_LENGTH);
m_ulFullNSLen = MAX_NAMESPACE_LENGTH;
m_State = STATE_OK;
m_fInitialisedMetaData = FALSE;
m_fAutoInheritFromObject = TRUE;
m_ulLastDebugLine = 0xFFFFFFFF;
m_ulLastDebugColumn = 0xFFFFFFFF;
m_ulLastDebugLineEnd = 0xFFFFFFFF;
m_ulLastDebugColumnEnd = 0xFFFFFFFF;
m_dwIncludeDebugInfo = 0;
m_fGeneratePDB = FALSE;
m_fIsMscorlib = FALSE;
m_fOptimize = FALSE;
m_tkSysObject = 0;
m_tkSysString = 0;
m_tkSysValue = 0;
m_tkSysEnum = 0;
m_pVTable = NULL;
m_pMarshal = NULL;
m_pPInvoke = NULL;
m_fReportProgress = TRUE;
m_tkCurrentCVOwner = 1; // module
m_pOutputBuffer = NULL;
m_dwSubsystem = (DWORD)-1;
m_dwComImageFlags = COMIMAGE_FLAGS_ILONLY;
m_dwFileAlignment = 0;
m_stBaseAddress = 0;
m_stSizeOfStackReserve = 0;
m_dwCeeFileFlags = ICEE_CREATE_FILE_PURE_IL;
g_szSourceFileName[0] = 0;
m_guidLang = CorSym_LanguageType_ILAssembly;
m_guidLangVendor = CorSym_LanguageVendor_Microsoft;
m_guidDoc = CorSym_DocumentType_Text;
for(int i=0; i<INSTR_POOL_SIZE; i++) m_Instr[i].opcode = -1;
m_wzResourceFile = NULL;
m_wzKeySourceName = NULL;
OnErrGo = false;
bClock = NULL;
m_pbsMD = NULL;
m_pOutputBuffer = new BYTE[OUTPUT_BUFFER_SIZE];
m_pCurOutputPos = m_pOutputBuffer;
m_pEndOutputPos = m_pOutputBuffer + OUTPUT_BUFFER_SIZE;
m_crImplList = new mdTypeRef[MAX_INTERFACES_IMPLEMENTED];
m_nImplListSize = MAX_INTERFACES_IMPLEMENTED;
m_pManifest = new AsmMan((void*)this);
dummyClass = new Class(NULL);
indexKeywords(&indxKeywords);
m_pPortablePdbWriter = NULL;
}
Assembler::~Assembler()
{
if(m_pbsMD) delete m_pbsMD;
if(m_pMarshal) delete m_pMarshal;
if(m_pManifest) delete m_pManifest;
if(m_pPInvoke) delete m_pPInvoke;
if(m_pVTable) delete m_pVTable;
m_lstGlobalLabel.RESET(true);
m_lstGlobalFixup.RESET(true);
m_hshClass.RESET(false);
m_lstClass.RESET(true);
while((m_ClassStack.POP()));
while(m_CustomDescrListStack.POP());
m_pCurClass = NULL;
dummyClass->m_szFQN = NULL;
delete dummyClass;
if (m_pOutputBuffer) delete [] m_pOutputBuffer;
if (m_crImplList) delete [] m_crImplList;
if (m_TyParList) delete m_TyParList;
if (m_pCeeFileGen != NULL) {
if (m_pCeeFile)
m_pCeeFileGen->DestroyCeeFile(&m_pCeeFile);
DestroyICeeFileGen(&m_pCeeFileGen);
m_pCeeFileGen = NULL;
}
while((m_szNamespace = m_NSstack.POP())) ;
delete [] m_szFullNS;
m_MethodBodyList.RESET(true);
m_TypeDefDList.RESET(true);
if (m_pImporter != NULL)
{
m_pImporter->Release();
m_pImporter = NULL;
}
if (m_pEmitter != NULL)
{
m_pEmitter->Release();
m_pEmitter = NULL;
}
if (m_pPortablePdbWriter != NULL)
{
delete m_pPortablePdbWriter;
m_pPortablePdbWriter = NULL;
}
if (m_pDisp != NULL)
{
m_pDisp->Release();
m_pDisp = NULL;
}
}
BOOL Assembler::Init(BOOL generatePdb)
{
if (m_pCeeFileGen != NULL) {
if (m_pCeeFile)
m_pCeeFileGen->DestroyCeeFile(&m_pCeeFile);
DestroyICeeFileGen(&m_pCeeFileGen);
m_pCeeFileGen = NULL;
}
if (FAILED(CreateICeeFileGen(&m_pCeeFileGen))) return FALSE;
if (FAILED(m_pCeeFileGen->CreateCeeFileEx(&m_pCeeFile,(ULONG)m_dwCeeFileFlags))) return FALSE;
if (FAILED(m_pCeeFileGen->GetSectionCreate(m_pCeeFile, ".il", sdReadOnly, &m_pILSection))) return FALSE;
if (FAILED(m_pCeeFileGen->GetSectionCreate (m_pCeeFile, ".sdata", sdReadWrite, &m_pGlobalDataSection))) return FALSE;
if (FAILED(m_pCeeFileGen->GetSectionCreate (m_pCeeFile, ".tls", sdReadWrite, &m_pTLSSection))) return FALSE;
m_fGeneratePDB = generatePdb;
return TRUE;
}
void Assembler::SetDLL(BOOL IsDll)
{
HRESULT OK;
OK = m_pCeeFileGen->SetDllSwitch(m_pCeeFile, IsDll);
_ASSERTE(SUCCEEDED(OK));
m_fDLL = IsDll;
}
void Assembler::ResetArgNameList()
{
if(m_firstArgName) delArgNameList(m_firstArgName);
m_firstArgName = NULL;
m_lastArgName = NULL;
}
void Assembler::ResetForNextMethod()
{
ResetArgNameList();
m_CurPC = 0;
m_pCurOutputPos = m_pOutputBuffer;
m_State = STATE_OK;
m_pCurMethod = NULL;
}
void Assembler::ResetLineNumbers()
{
// reset line number information
m_ulLastDebugLine = 0xFFFFFFFF;
m_ulLastDebugColumn = 0xFFFFFFFF;
m_ulLastDebugLineEnd = 0xFFFFFFFF;
m_ulLastDebugColumnEnd = 0xFFFFFFFF;
}
BOOL Assembler::AddMethod(Method *pMethod)
{
BOOL fIsInterface=FALSE, fIsImport=FALSE;
ULONG PEFileOffset=0;
_ASSERTE(m_pCeeFileGen != NULL);
if (pMethod == NULL)
{
report->error("pMethod == NULL");
return FALSE;
}
if(pMethod->m_pClass != NULL)
{
fIsInterface = IsTdInterface(pMethod->m_pClass->m_Attr);
fIsImport = IsTdImport(pMethod->m_pClass->m_Attr);
}
if(m_CurPC)
{
char sz[1024];
sz[0] = 0;
if(fIsImport) strcat_s(sz,1024," imported");
if(IsMdAbstract(pMethod->m_Attr)) strcat_s(sz,1024," abstract");
if(IsMdPinvokeImpl(pMethod->m_Attr)) strcat_s(sz,1024," pinvoke");
if(!IsMiIL(pMethod->m_wImplAttr)) strcat_s(sz,1024," non-IL");
if(IsMiRuntime(pMethod->m_wImplAttr)) strcat_s(sz,1024," runtime-supplied");
if(IsMiInternalCall(pMethod->m_wImplAttr)) strcat_s(sz,1024," an internal call");
if(strlen(sz))
{
report->error("Method cannot have body if it is%s\n",sz);
}
}
else // method has no body
{
if(fIsImport || IsMdAbstract(pMethod->m_Attr) || IsMdPinvokeImpl(pMethod->m_Attr)
|| IsMiRuntime(pMethod->m_wImplAttr) || IsMiInternalCall(pMethod->m_wImplAttr)) return TRUE;
if(OnErrGo)
{
report->error("Method has no body\n");
return TRUE;
}
else
{
report->warn("Method has no body, 'ret' emitted\n");
Instr* pIns = GetInstr();
if(pIns)
{
memset(pIns,0,sizeof(Instr));
pIns->opcode = CEE_RET;
EmitOpcode(pIns);
}
}
}
if(pMethod->m_Locals.COUNT()) pMethod->m_LocalsSig=0x11000001; // placeholder, the real token 2b defined in EmitMethod
COR_ILMETHOD_FAT fatHeader;
fatHeader.SetFlags(pMethod->m_Flags);
fatHeader.SetMaxStack(pMethod->m_MaxStack);
fatHeader.SetLocalVarSigTok(pMethod->m_LocalsSig);
fatHeader.SetCodeSize(m_CurPC);
bool moreSections = (pMethod->m_dwNumExceptions != 0);
// if max stack is specified <8, force fat header, otherwise (with tiny header) it will default to 8
if((fatHeader.GetMaxStack() < 8)&&(fatHeader.GetLocalVarSigTok()==0)&&(fatHeader.GetCodeSize()<64)&&(!moreSections))
fatHeader.SetFlags(fatHeader.GetFlags() | CorILMethod_InitLocals); //forces fat header but does nothing else, since LocalVarSigTok==0
unsigned codeSize = m_CurPC;
unsigned codeSizeAligned = codeSize;
if (moreSections)
codeSizeAligned = (codeSizeAligned + 3) & ~3; // to insure EH section aligned
unsigned headerSize = COR_ILMETHOD::Size(&fatHeader, moreSections);
unsigned ehSize = COR_ILMETHOD_SECT_EH::Size(pMethod->m_dwNumExceptions, pMethod->m_ExceptionList);
unsigned totalSize = headerSize + codeSizeAligned + ehSize;
BYTE* outBuff;
BYTE* endbuf;
BinStr* pbsBody;
if((pbsBody = new BinStr())==NULL) return FALSE;
if((outBuff = pbsBody->getBuff(totalSize))==NULL) return FALSE;
endbuf = &outBuff[totalSize];
// Emit the header
outBuff += COR_ILMETHOD::Emit(headerSize, &fatHeader, moreSections, outBuff);
pMethod->m_pCode = outBuff;
pMethod->m_headerOffset= PEFileOffset;
pMethod->m_methodOffset= PEFileOffset + headerSize;
pMethod->m_CodeSize = codeSize;
// Emit the code
if (codeSizeAligned)
{
memset(outBuff,0,codeSizeAligned);
memcpy(outBuff, m_pOutputBuffer, codeSize);
outBuff += codeSizeAligned;
}
if(pMethod->m_dwNumExceptions)
{
// Validate the eh
COR_ILMETHOD_SECT_EH_CLAUSE_FAT* pEx;
DWORD TryEnd,HandlerEnd, dwEx, dwEf;
for(dwEx = 0, pEx = pMethod->m_ExceptionList; dwEx < pMethod->m_dwNumExceptions; dwEx++, pEx++)
{
if(pEx->GetTryOffset() > m_CurPC) // i.e., pMethod->m_CodeSize
{
report->error("Invalid SEH clause #%d: Try block starts beyond code size\n",dwEx+1);
}
TryEnd = pEx->GetTryOffset()+pEx->GetTryLength();
if(TryEnd > m_CurPC)
{
report->error("Invalid SEH clause #%d: Try block ends beyond code size\n",dwEx+1);
}
if(pEx->GetHandlerOffset() > m_CurPC)
{
report->error("Invalid SEH clause #%d: Handler block starts beyond code size\n",dwEx+1);
}
HandlerEnd = pEx->GetHandlerOffset()+pEx->GetHandlerLength();
if(HandlerEnd > m_CurPC)
{
report->error("Invalid SEH clause #%d: Handler block ends beyond code size\n",dwEx+1);
}
if(pEx->Flags & COR_ILEXCEPTION_CLAUSE_FILTER)
{
if(!((pEx->GetFilterOffset() >= TryEnd)||(pEx->GetTryOffset() >= HandlerEnd)))
{
report->error("Invalid SEH clause #%d: Try and Filter/Handler blocks overlap\n",dwEx+1);
}
for(dwEf = 0; dwEf < pMethod->m_dwNumEndfilters; dwEf++)
{
if(pMethod->m_EndfilterOffsetList[dwEf] == pEx->GetHandlerOffset()) break;
}
if(dwEf >= pMethod->m_dwNumEndfilters)
{
report->error("Invalid SEH clause #%d: Filter block separated from Handler, or not ending with endfilter\n",dwEx+1);
}
}
else
if(!((pEx->GetHandlerOffset() >= TryEnd)||(pEx->GetTryOffset() >= HandlerEnd)))
{
report->error("Invalid SEH clause #%d: Try and Handler blocks overlap\n",dwEx+1);
}
}
// Emit the eh
outBuff += COR_ILMETHOD_SECT_EH::Emit(ehSize, pMethod->m_dwNumExceptions,
pMethod->m_ExceptionList, false, outBuff);
}
_ASSERTE(outBuff == endbuf);
pMethod->m_pbsBody = pbsBody;
LocalMemberRefFixup* pMRF;
while((pMRF = pMethod->m_LocalMemberRefFixupList.POP()))
{
pMRF->offset += (size_t)(pMethod->m_pCode);
m_LocalMemberRefFixupList.PUSH(pMRF); // transfer MRF to assembler's list
}
if(m_fReportProgress)
{
if (pMethod->IsGlobalMethod())
report->msg("Assembled global method %s\n", pMethod->m_szName);
else report->msg("Assembled method %s::%s\n", pMethod->m_pClass->m_szFQN,
pMethod->m_szName);
}
return TRUE;
}
BOOL Assembler::EmitMethodBody(Method* pMethod, BinStr* pbsOut)
{
HRESULT hr = S_OK;
if(pMethod)
{
BinStr* pbsBody = pMethod->m_pbsBody;
unsigned totalSize;
if(pbsBody && (totalSize = pbsBody->length()))
{
unsigned headerSize = pMethod->m_methodOffset-pMethod->m_headerOffset;
MethodBody* pMB = NULL;
// ----------emit locals signature-------------------
unsigned uLocals;
if((uLocals = pMethod->m_Locals.COUNT()))
{
VarDescr* pVD;
BinStr* pbsSig = new BinStr();
unsigned cnt;
DWORD cSig;
const COR_SIGNATURE* mySig;
pbsSig->appendInt8(IMAGE_CEE_CS_CALLCONV_LOCAL_SIG);
cnt = CorSigCompressData(uLocals,pbsSig->getBuff(5));
pbsSig->remove(5-cnt);
for(cnt = 0; (pVD = pMethod->m_Locals.PEEK(cnt)); cnt++)
{
if(pVD->pbsSig) pbsSig->append(pVD->pbsSig);
else
{
report->error("Undefined type of local var slot %d in method %s\n",cnt,pMethod->m_szName);
pbsSig->appendInt8(ELEMENT_TYPE_I4);
}
}
cSig = pbsSig->length();
mySig = (const COR_SIGNATURE *)(pbsSig->ptr());
if (cSig > 1) // non-empty signature
{
hr = m_pEmitter->GetTokenFromSig(mySig, cSig, &pMethod->m_LocalsSig);
_ASSERTE(SUCCEEDED(hr));
}
delete pbsSig;
COR_ILMETHOD_FAT* pFH; // Fat header guaranteed, because there are local vars
pFH = (COR_ILMETHOD_FAT*)(pMethod->m_pbsBody->ptr());
pFH->SetLocalVarSigTok(pMethod->m_LocalsSig);
}
if(m_fFoldCode)
{
for(int k=0; (pMB = m_MethodBodyList.PEEK(k)) != NULL; k++)
{
if((pMB->pbsBody->length() == totalSize)
&& (memcmp(pMB->pbsBody->ptr(), pbsBody->ptr(),totalSize)==0))
break;
}
if(pMB)
{
pMethod->m_headerOffset= pMB->RVA;
pMethod->m_methodOffset= pMB->RVA + headerSize;
pMethod->m_pCode = pMB->pCode;
delete pbsBody;
pMethod->m_pbsBody = NULL;
m_dwMethodsFolded++;
}
}
if(pMB == NULL)
{
BYTE* outBuff;
unsigned align = (headerSize == 1)? 1 : 4;
ULONG PEFileOffset, methodRVA;
if (FAILED(m_pCeeFileGen->GetSectionBlock (m_pILSection, totalSize,
align, (void **) &outBuff))) return FALSE;
memcpy(outBuff,pbsBody->ptr(),totalSize);
// The offset where we start, (not where the alignment bytes start!
if (FAILED(m_pCeeFileGen->GetSectionDataLen (m_pILSection, &PEFileOffset)))
return FALSE;
PEFileOffset -= totalSize;
pMethod->m_pCode = outBuff + headerSize;
pMethod->m_headerOffset= PEFileOffset;
pMethod->m_methodOffset= PEFileOffset + headerSize;
DoDeferredILFixups(pMethod);
m_pCeeFileGen->GetMethodRVA(m_pCeeFile, PEFileOffset,&methodRVA);
pMethod->m_headerOffset= methodRVA;
pMethod->m_methodOffset= methodRVA + headerSize;
if(m_fFoldCode)
{
if((pMB = new MethodBody)==NULL) return FALSE;
pMB->pbsBody = pbsBody;
pMB->RVA = methodRVA;
pMB->pCode = pMethod->m_pCode;
m_MethodBodyList.PUSH(pMB);
}
//else
// delete pbsBody;
//pMethod->m_pbsBody = NULL;
}
m_pEmitter->SetRVA(pMethod->m_Tok,pMethod->m_headerOffset);
}
if (m_fGeneratePDB)
{
if (FAILED(m_pPortablePdbWriter->DefineSequencePoints(pMethod)))
return FALSE;
if (FAILED(m_pPortablePdbWriter->DefineLocalScope(pMethod)))
return FALSE;
}
return TRUE;
}
else return FALSE;
}
ImportDescriptor* Assembler::EmitImport(BinStr* DllName)
{
int i = 0, l = 0;
ImportDescriptor* pID;
char* sz=NULL;
if(DllName) l = DllName->length(); // No zero terminator here!
if(l)
{
sz = (char*)DllName->ptr();
while((pID=m_ImportList.PEEK(i++)))
{
if((pID->dwDllName== (DWORD) l)&& !memcmp(pID->szDllName,sz,l)) return pID;
}
}
else
{
while((pID=m_ImportList.PEEK(i++)))
{
if(pID->dwDllName==0) return pID;
}
}
if((pID = new ImportDescriptor(sz,l)))
{
m_ImportList.PUSH(pID);
pID->mrDll = TokenFromRid(m_ImportList.COUNT(),mdtModuleRef);
return pID;
}
else report->error("Failed to allocate import descriptor\n");
return NULL;
}
void Assembler::EmitImports()
{
WCHAR* wzDllName=&wzUniBuf[0];
ImportDescriptor* pID;
int i;
mdToken tk;
for(i=0; (pID = m_ImportList.PEEK(i)); i++)
{
WszMultiByteToWideChar(g_uCodePage,0,pID->szDllName,-1,wzDllName,dwUniBuf-1);
if(FAILED(m_pEmitter->DefineModuleRef( // S_OK or error.
wzDllName, // [IN] DLL name
&tk))) // [OUT] returned
report->error("Failed to define module ref '%s'\n",pID->szDllName);
else
_ASSERTE(tk == pID->mrDll);
}
}
HRESULT Assembler::EmitPinvokeMap(mdToken tk, PInvokeDescriptor* pDescr)
{
WCHAR* wzAlias=&wzUniBuf[0];
if(pDescr->szAlias) WszMultiByteToWideChar(g_uCodePage,0,pDescr->szAlias,-1,wzAlias,dwUniBuf-1);
return m_pEmitter->DefinePinvokeMap( // Return code.
tk, // [IN] FieldDef, MethodDef or MethodImpl.
pDescr->dwAttrs, // [IN] Flags used for mapping.
(LPCWSTR)wzAlias, // [IN] Import name.
pDescr->mrDll); // [IN] ModuleRef token for the target DLL.
}
BOOL Assembler::EmitMethod(Method *pMethod)
{
// Emit the metadata for a method definition
BOOL fSuccess = FALSE;
WCHAR* wzMemberName=&wzUniBuf[0];
BOOL fIsInterface;
DWORD cSig;
ULONG methodRVA = 0;
mdMethodDef MethodToken;
mdTypeDef ClassToken = mdTypeDefNil;
char *pszMethodName;
COR_SIGNATURE *mySig;
_ASSERTE((m_pCeeFileGen != NULL) && (pMethod != NULL));
fIsInterface = ((pMethod->m_pClass != NULL) && IsTdInterface(pMethod->m_pClass->m_Attr));
pszMethodName = pMethod->m_szName;
mySig = pMethod->m_pMethodSig;
cSig = pMethod->m_dwMethodCSig;
// If this is an instance method, make certain the signature says so
if (!(pMethod->m_Attr & mdStatic))
*mySig |= IMAGE_CEE_CS_CALLCONV_HASTHIS;
ClassToken = (pMethod->IsGlobalMethod())? mdTokenNil
: pMethod->m_pClass->m_cl;
// Convert name to UNICODE
WszMultiByteToWideChar(g_uCodePage,0,pszMethodName,-1,wzMemberName,dwUniBuf-1);
if(IsMdPrivateScope(pMethod->m_Attr))
{
WCHAR* p = wcsstr(wzMemberName,W("$PST06"));
if(p) *p = 0;
}
if (FAILED(m_pEmitter->DefineMethod(ClassToken, // parent class
wzMemberName, // member name
pMethod->m_Attr & ~mdReservedMask, // member attributes
mySig, // member signature
cSig,
methodRVA, // RVA
pMethod->m_wImplAttr, // implflags
&MethodToken)))
{
report->error("Failed to define method '%s'\n",pszMethodName);
goto exit;
}
pMethod->m_Tok = MethodToken;
//--------------------------------------------------------------------------------
// the only way to set mdRequireSecObject:
if(pMethod->m_Attr & mdRequireSecObject)
{
mdToken tkPseudoClass;
if(FAILED(m_pEmitter->DefineTypeRefByName(1, COR_REQUIRES_SECOBJ_ATTRIBUTE, &tkPseudoClass)))
report->error("Unable to define type reference '%s'\n", COR_REQUIRES_SECOBJ_ATTRIBUTE_ANSI);
else
{
mdToken tkPseudoCtor;
BYTE bSig[3] = {IMAGE_CEE_CS_CALLCONV_HASTHIS,0,ELEMENT_TYPE_VOID};
if(FAILED(m_pEmitter->DefineMemberRef(tkPseudoClass, W(".ctor"), (PCCOR_SIGNATURE)bSig, 3, &tkPseudoCtor)))
report->error("Unable to define member reference '%s::.ctor'\n", COR_REQUIRES_SECOBJ_ATTRIBUTE_ANSI);
else DefineCV(new CustomDescr(MethodToken,tkPseudoCtor,NULL));
}
}
if (pMethod->m_NumTyPars)
{
ULONG i;
mdToken tkNil = mdTokenNil;
mdGenericParam tkGP = mdTokenNil;
for(i = 0; i < pMethod->m_NumTyPars; i++)
{
if (FAILED(m_pEmitter->DefineGenericParam(MethodToken, i, pMethod->m_TyPars[i].Attrs(), pMethod->m_TyPars[i].Name(), 0, &tkNil, &tkGP)))
{
report->error("Unable to define generic param: %s'\n", pMethod->m_TyPars[i].Name());
}
else
{
pMethod->m_TyPars[i].Token(tkGP);
EmitCustomAttributes(tkGP, pMethod->m_TyPars[i].CAList());
}
}
EmitGenericParamConstraints(pMethod->m_NumTyPars, pMethod->m_TyPars, pMethod->m_Tok, &(pMethod->m_GPCList));
}
//--------------------------------------------------------------------------------
EmitSecurityInfo(MethodToken,
pMethod->m_pPermissions,
pMethod->m_pPermissionSets);
//--------------------------------------------------------------------------------
if (pMethod->m_fEntryPoint)
{
if(fIsInterface) report->error("Entrypoint in Interface: Method '%s'\n",pszMethodName);
if (FAILED(m_pCeeFileGen->SetEntryPoint(m_pCeeFile, MethodToken)))
{
report->error("Failed to set entry point for method '%s'\n",pszMethodName);
goto exit;
}
}
//--------------------------------------------------------------------------------
if(IsMdPinvokeImpl(pMethod->m_Attr))
{
if(pMethod->m_pPInvoke)
{
HRESULT hr;
if(pMethod->m_pPInvoke->szAlias == NULL) pMethod->m_pPInvoke->szAlias = pszMethodName;
hr = EmitPinvokeMap(MethodToken,pMethod->m_pPInvoke);
if(pMethod->m_pPInvoke->szAlias == pszMethodName) pMethod->m_pPInvoke->szAlias = NULL;
if(FAILED(hr))
{
report->error("Failed to set PInvoke map for method '%s'\n",pszMethodName);
goto exit;
}
}
}
{ // add parameters to metadata
void const *pValue=NULL;
ULONG cbValue;
DWORD dwCPlusTypeFlag=0;
mdParamDef pdef;
WCHAR* wzParName=&wzUniBuf[0];
char* szPhonyName=(char*)&wzUniBuf[dwUniBuf >> 1];
if(pMethod->m_dwRetAttr || pMethod->m_pRetMarshal || pMethod->m_RetCustDList.COUNT())
{
if(pMethod->m_pRetValue)
{
dwCPlusTypeFlag= (DWORD)*(pMethod->m_pRetValue->ptr());
pValue = (void const *)(pMethod->m_pRetValue->ptr()+1);
cbValue = pMethod->m_pRetValue->length()-1;
if(dwCPlusTypeFlag == ELEMENT_TYPE_STRING)
{
cbValue /= sizeof(WCHAR);
#if BIGENDIAN
void* pValueTemp = _alloca(cbValue * sizeof(WCHAR));
memcpy(pValueTemp, pValue, cbValue * sizeof(WCHAR));
pValue = pValueTemp;
SwapStringLength((WCHAR*)pValue, cbValue);
#endif
}
}
else
{
pValue = NULL;
cbValue = (ULONG)-1;
dwCPlusTypeFlag=0;
}
m_pEmitter->DefineParam(MethodToken,0,NULL,pMethod->m_dwRetAttr,dwCPlusTypeFlag,pValue,cbValue,&pdef);
if(pMethod->m_pRetMarshal)
{
if(FAILED(m_pEmitter->SetFieldMarshal (
pdef, // [IN] given a fieldDef or paramDef token
(PCCOR_SIGNATURE)(pMethod->m_pRetMarshal->ptr()), // [IN] native type specification
pMethod->m_pRetMarshal->length()))) // [IN] count of bytes of pvNativeType
report->error("Failed to set param marshaling for return\n");
}
EmitCustomAttributes(pdef, &(pMethod->m_RetCustDList));
}
for(ARG_NAME_LIST *pAN=pMethod->m_firstArgName; pAN; pAN = pAN->pNext)
{
if(pAN->nNum >= 65535)
{
report->error("Method '%s': Param.sequence number (%d) exceeds 65535, unable to define parameter\n",pszMethodName,pAN->nNum+1);
continue;
}
if(pAN->dwName) strcpy_s(szPhonyName,dwUniBuf >> 1,pAN->szName);
else sprintf_s(szPhonyName,(dwUniBuf >> 1),"A_%d",pAN->nNum);
WszMultiByteToWideChar(g_uCodePage,0,szPhonyName,-1,wzParName,dwUniBuf >> 1);
if(pAN->pValue)
{
dwCPlusTypeFlag= (DWORD)*(pAN->pValue->ptr());
pValue = (void const *)(pAN->pValue->ptr()+1);
cbValue = pAN->pValue->length()-1;
if(dwCPlusTypeFlag == ELEMENT_TYPE_STRING)
{
cbValue /= sizeof(WCHAR);
#if BIGENDIAN
void* pValueTemp = _alloca(cbValue * sizeof(WCHAR));
memcpy(pValueTemp, pValue, cbValue * sizeof(WCHAR));
pValue = pValueTemp;
SwapStringLength((WCHAR*)pValue, cbValue);
#endif
}
}
else
{
pValue = NULL;
cbValue = (ULONG)-1;
dwCPlusTypeFlag=0;
}
m_pEmitter->DefineParam(MethodToken,pAN->nNum+1,wzParName,pAN->dwAttr,dwCPlusTypeFlag,pValue,cbValue,&pdef);
if(pAN->pMarshal)
{
if(FAILED(m_pEmitter->SetFieldMarshal (
pdef, // [IN] given a fieldDef or paramDef token
(PCCOR_SIGNATURE)(pAN->pMarshal->ptr()), // [IN] native type specification
pAN->pMarshal->length()))) // [IN] count of bytes of pvNativeType
report->error("Failed to set param marshaling for '%s'\n",pAN->szName);
}
EmitCustomAttributes(pdef, &(pAN->CustDList));
}
}
fSuccess = TRUE;
//--------------------------------------------------------------------------------
// Update method implementations for this method
{
MethodImplDescriptor* pMID;
int i;
for(i=0;(pMID = pMethod->m_MethodImplDList.PEEK(i));i++)
{
pMID->m_tkImplementingMethod = MethodToken;
// don't delete it here, it's still in the general list
}
}
//--------------------------------------------------------------------------------
EmitCustomAttributes(MethodToken, &(pMethod->m_CustomDescrList));
exit:
if (fSuccess == FALSE) m_State = STATE_FAIL;
return fSuccess;
}
BOOL Assembler::EmitMethodImpls()
{
MethodImplDescriptor* pMID;
BOOL ret = TRUE;
int i;
for(i=0; (pMID = m_MethodImplDList.PEEK(i)); i++)
{
pMID->m_tkImplementingMethod = ResolveLocalMemberRef(pMID->m_tkImplementingMethod);
pMID->m_tkImplementedMethod = ResolveLocalMemberRef(pMID->m_tkImplementedMethod);
if(FAILED(m_pEmitter->DefineMethodImpl( pMID->m_tkDefiningClass,
pMID->m_tkImplementingMethod,
pMID->m_tkImplementedMethod)))
{
report->error("Failed to define Method Implementation");
ret = FALSE;
}
pMID->m_fNew = FALSE;
}// end while
return ret;
}
mdToken Assembler::ResolveLocalMemberRef(mdToken tok)
{
if(TypeFromToken(tok) == 0x99000000)
{
tok = RidFromToken(tok);
if(tok) tok = m_LocalMethodRefDList.PEEK(tok-1)->m_tkResolved;
}
else if(TypeFromToken(tok) == 0x98000000)
{
tok = RidFromToken(tok);
if(tok) tok = m_LocalFieldRefDList.PEEK(tok-1)->m_tkResolved;
}
return tok;
}
BOOL Assembler::EmitEvent(EventDescriptor* pED)
{
mdMethodDef mdAddOn=mdMethodDefNil,
mdRemoveOn=mdMethodDefNil,
mdFire=mdMethodDefNil,
*mdOthers;
int nOthers;
WCHAR* wzMemberName=&wzUniBuf[0];
if(!pED) return FALSE;
WszMultiByteToWideChar(g_uCodePage,0,pED->m_szName,-1,wzMemberName,dwUniBuf-1);
mdAddOn = ResolveLocalMemberRef(pED->m_tkAddOn);
if(TypeFromToken(mdAddOn) != mdtMethodDef)
{
report->error("Invalid Add method of event '%s'\n",pED->m_szName);
return FALSE;
}
mdRemoveOn = ResolveLocalMemberRef(pED->m_tkRemoveOn);
if(TypeFromToken(mdRemoveOn) != mdtMethodDef)
{
report->error("Invalid Remove method of event '%s'\n",pED->m_szName);
return FALSE;
}
mdFire = ResolveLocalMemberRef(pED->m_tkFire);
if((RidFromToken(mdFire)!=0)&&(TypeFromToken(mdFire) != mdtMethodDef))
{
report->error("Invalid Fire method of event '%s'\n",pED->m_szName);
return FALSE;
}
nOthers = pED->m_tklOthers.COUNT();
mdOthers = new mdMethodDef[nOthers+1];
if(mdOthers == NULL)
{
report->error("Failed to allocate Others array for event descriptor\n");
nOthers = 0;
}
for(int j=0; j < nOthers; j++)
{
mdOthers[j] = ResolveLocalMemberRef((mdToken)(UINT_PTR)(pED->m_tklOthers.PEEK(j))); // @WARNING: casting down from 'mdToken*' to 'mdToken'
}
mdOthers[nOthers] = mdMethodDefNil; // like null-terminator
if(FAILED(m_pEmitter->DefineEvent( pED->m_tdClass,
wzMemberName,
pED->m_dwAttr,
pED->m_tkEventType,
mdAddOn,
mdRemoveOn,
mdFire,
mdOthers,
&(pED->m_edEventTok))))
{
report->error("Failed to define event '%s'.\n",pED->m_szName);
delete [] mdOthers;
return FALSE;
}
EmitCustomAttributes(pED->m_edEventTok, &(pED->m_CustomDescrList));
return TRUE;
}
BOOL Assembler::EmitProp(PropDescriptor* pPD)
{
mdMethodDef mdSet, mdGet, *mdOthers;
int nOthers;
WCHAR* wzMemberName=&wzUniBuf[0];
if(!pPD) return FALSE;
WszMultiByteToWideChar(g_uCodePage,0,pPD->m_szName,-1,wzMemberName,dwUniBuf-1);
mdSet = ResolveLocalMemberRef(pPD->m_tkSet);
if((RidFromToken(mdSet)!=0)&&(TypeFromToken(mdSet) != mdtMethodDef))
{
report->error("Invalid Set method of property '%s'\n",pPD->m_szName);
return FALSE;
}
mdGet = ResolveLocalMemberRef(pPD->m_tkGet);
if((RidFromToken(mdGet)!=0)&&(TypeFromToken(mdGet) != mdtMethodDef))
{
report->error("Invalid Get method of property '%s'\n",pPD->m_szName);
return FALSE;
}
nOthers = pPD->m_tklOthers.COUNT();
mdOthers = new mdMethodDef[nOthers+1];
if(mdOthers == NULL)
{
report->error("Failed to allocate Others array for prop descriptor\n");
nOthers = 0;
}
for(int j=0; j < nOthers; j++)
{
mdOthers[j] = ResolveLocalMemberRef((mdToken)(UINT_PTR)(pPD->m_tklOthers.PEEK(j))); // @WARNING: casting down from 'mdToken*' to 'mdToken'
if((RidFromToken(mdOthers[j])!=0)&&(TypeFromToken(mdOthers[j]) != mdtMethodDef))
{
report->error("Invalid Other method of property '%s'\n",pPD->m_szName);
delete [] mdOthers;
return FALSE;
}
}
mdOthers[nOthers] = mdMethodDefNil; // like null-terminator
void* pValue = pPD->m_pValue;
#if BIGENDIAN
if (pPD->m_dwCPlusTypeFlag == ELEMENT_TYPE_STRING)
{
void* pValueTemp = _alloca(pPD->m_cbValue * sizeof(WCHAR));
memcpy(pValueTemp, pValue, pPD->m_cbValue * sizeof(WCHAR));
pValue = pValueTemp;
SwapStringLength((WCHAR*)pValue, pPD->m_cbValue);
}
#endif
if(FAILED(m_pEmitter->DefineProperty( pPD->m_tdClass,
wzMemberName,
pPD->m_dwAttr,
pPD->m_pSig,
pPD->m_dwCSig,
pPD->m_dwCPlusTypeFlag,
pValue,
pPD->m_cbValue,
mdSet,
mdGet,
mdOthers,
&(pPD->m_pdPropTok))))
{
report->error("Failed to define property '%s'.\n",pPD->m_szName);
delete [] mdOthers;
return FALSE;
}
EmitCustomAttributes(pPD->m_pdPropTok, &(pPD->m_CustomDescrList));
return TRUE;
}
Class *Assembler::FindCreateClass(_In_ __nullterminated const char *pszFQN)
{
Class *pSearch = NULL;
if(pszFQN)
{
dummyClass->m_szFQN = pszFQN;
dummyClass->m_Hash = hash((BYTE*)pszFQN, (unsigned)strlen(pszFQN), 10);
pSearch = m_hshClass.FIND(dummyClass);
dummyClass->m_szFQN = NULL;
dummyClass->m_Hash = 0;
if(!pSearch)
{
char* pch;
DWORD dwFQN = (DWORD)strlen(pszFQN);
Class *pEncloser = NULL;
char* pszNewFQN = new char[dwFQN+1];
strcpy_s(pszNewFQN,dwFQN+1,pszFQN);
if((pch = strrchr(pszNewFQN, NESTING_SEP)) != NULL)
{
*pch = 0;
pEncloser = FindCreateClass(pszNewFQN);
*pch = NESTING_SEP;
}
pSearch = new Class(pszNewFQN);
if (pSearch == NULL)
report->error("Failed to create class '%s'\n",pszNewFQN);
else
{
pSearch->m_pEncloser = pEncloser;
m_lstClass.PUSH(pSearch);
pSearch->m_cl = mdtTypeDef | m_lstClass.COUNT();
m_hshClass.PUSH(pSearch);
}
}
}
return pSearch;
}
BOOL Assembler::EmitClass(Class *pClass)
{
LPCUTF8 szFullName;
WCHAR* wzFullName=&wzUniBuf[0];
HRESULT hr = E_FAIL;
GUID guid;
size_t L;
mdToken tok;
if(pClass == NULL) return FALSE;
hr = CoCreateGuid(&guid);
if (FAILED(hr))
{
printf("Unable to create GUID\n");
m_State = STATE_FAIL;
return FALSE;
}
if(pClass->m_pEncloser)
szFullName = strrchr(pClass->m_szFQN,NESTING_SEP) + 1;
else
szFullName = pClass->m_szFQN;
WszMultiByteToWideChar(g_uCodePage,0,szFullName,-1,wzFullName,dwUniBuf);
L = wcslen(wzFullName);
if((L==0)||(wzFullName[L-1]==L'.')) // Missing class name!
{
wcscat_s(wzFullName,dwUniBuf,W("$UNNAMED_TYPE$"));
}
pClass->m_Attr = CheckClassFlagsIfNested(pClass->m_pEncloser, pClass->m_Attr);
if (pClass->m_pEncloser)
{
hr = m_pEmitter->DefineNestedType( wzFullName,
pClass->m_Attr, // attributes
pClass->m_crExtends, // CR extends class
pClass->m_crImplements,// implements
pClass->m_pEncloser->m_cl, // Enclosing class.
&tok);
}
else
{
hr = m_pEmitter->DefineTypeDef( wzFullName,
pClass->m_Attr, // attributes
pClass->m_crExtends, // CR extends class
pClass->m_crImplements,// implements
&tok);
}
_ASSERTE(tok == pClass->m_cl);
if (FAILED(hr)) goto exit;
if (pClass->m_NumTyPars)
{
ULONG i;
mdToken tkNil = mdTokenNil;
mdGenericParam tkGP = mdTokenNil;
for(i = 0; i < pClass->m_NumTyPars; i++)
{
if (FAILED(m_pEmitter->DefineGenericParam(pClass->m_cl, i, pClass->m_TyPars[i].Attrs(), pClass->m_TyPars[i].Name(), 0, &tkNil, &tkGP)))
{
report->error("Unable to define generic param: %s'\n", pClass->m_TyPars[i].Name());
}
else
{
pClass->m_TyPars[i].Token(tkGP);
EmitCustomAttributes(tkGP, pClass->m_TyPars[i].CAList());
}
}
EmitGenericParamConstraints(pClass->m_NumTyPars, pClass->m_TyPars, pClass->m_cl, &(pClass->m_GPCList));
}
EmitCustomAttributes(pClass->m_cl, &(pClass->m_CustDList));
hr = S_OK;
exit:
return SUCCEEDED(hr);
}
BOOL Assembler::DoGlobalFixups()
{
GlobalFixup *pSearch;
for (int i=0; (pSearch = m_lstGlobalFixup.PEEK(i)); i++)
{
GlobalLabel * pLabel = FindGlobalLabel(pSearch->m_szLabel);
if (pLabel == NULL)
{
report->error("Unable to find forward reference global label '%s'\n",
pSearch->m_szLabel);
m_State = STATE_FAIL;
return FALSE;
}
//BYTE * pReference = pSearch->m_pReference;
//DWORD GlobalOffset = pLabel->m_GlobalOffset;
//memcpy(pReference,&GlobalOffset,4);
SET_UNALIGNED_VAL32(pSearch->m_pReference,pLabel->m_GlobalOffset);
}
return TRUE;
}
state_t Assembler::AddGlobalLabel(_In_ __nullterminated char *pszName, HCEESECTION section)
{
if (FindGlobalLabel(pszName) != NULL)
{
report->error("Duplicate global label '%s'\n", pszName);
m_State = STATE_FAIL;
return m_State;
}
ULONG GlobalOffset;
HRESULT hr;
hr = m_pCeeFileGen->GetSectionDataLen(section, &GlobalOffset);
_ASSERTE(SUCCEEDED(hr));
GlobalLabel *pNew = new GlobalLabel(pszName, GlobalOffset, section);
if (pNew == 0)
{
report->error("Failed to allocate global label '%s'\n",pszName);
m_State = STATE_FAIL;
return m_State;
}
m_lstGlobalLabel.PUSH(pNew);
return m_State;
}
void Assembler::AddLabel(DWORD CurPC, _In_ __nullterminated char *pszName)
{
if (m_pCurMethod->FindLabel(pszName) != NULL)
{
report->error("Duplicate label: '%s'\n", pszName);
m_State = STATE_FAIL;
}
else
{
Label *pNew = new Label(pszName, CurPC);
if (pNew != NULL)
//m_pCurMethod->m_lstLabel.PUSH(pNew);
m_lstLabel.PUSH(pNew);
else
{
report->error("Failed to allocate label '%s'\n",pszName);
m_State = STATE_FAIL;
}
}
}
void Assembler::DoDeferredILFixups(Method* pMethod)
{ // Now that we know where in the file the code bytes will wind up,
// we can update the RVAs and offsets.
ILFixup *pSearch;
HRESULT hr;
GlobalFixup *Fix = NULL;
int i;
for (i=0;(pSearch = pMethod->m_lstILFixup.PEEK(i));i++)
{
switch(pSearch->m_Kind)
{
case ilGlobal:
Fix = pSearch->m_Fixup;
_ASSERTE(Fix != NULL);
Fix->m_pReference = pMethod->m_pCode+pSearch->m_OffsetInMethod;
break;
case ilToken:
hr = m_pCeeFileGen->AddSectionReloc(m_pILSection,
pSearch->m_OffsetInMethod+pMethod->m_methodOffset,
m_pILSection,
srRelocMapToken);
_ASSERTE(SUCCEEDED(hr));
break;
case ilRVA:
hr = m_pCeeFileGen->AddSectionReloc(m_pILSection,
pSearch->m_OffsetInMethod+pMethod->m_methodOffset,
m_pGlobalDataSection,
srRelocAbsolute);
_ASSERTE(SUCCEEDED(hr));
break;
default:
;
}
}
}
/**************************************************************************/
BOOL Assembler::DoFixups(Method* pMethod)
{
Fixup *pSearch;
for (int i=0; (pSearch = pMethod->m_lstFixup.PEEK(i)); i++)
{
Label * pLabel = pMethod->FindLabel(pSearch->m_szLabel);
long offset;
if (pLabel == NULL)
{
report->error("Unable to find forward reference label '%s' called from PC=%d\n",
pSearch->m_szLabel, pSearch->m_RelativeToPC);
//m_State = STATE_FAIL;
return FALSE;
}
offset = pLabel->m_PC - pSearch->m_RelativeToPC;
if (pSearch->m_FixupSize == 1)
{
if (offset > 127 || offset < -128)
{
report->error("Offset of forward reference label '%s' called from PC=%d is too large for 1 byte pcrel\n",
pLabel->m_szName, pSearch->m_RelativeToPC);
//m_State = STATE_FAIL;
return FALSE;
}
*pSearch->m_pBytes = (BYTE) offset;
}
else if (pSearch->m_FixupSize == 4)
{
SET_UNALIGNED_VAL32(pSearch->m_pBytes,offset);
}
}
return TRUE;
}
OPCODE Assembler::DecodeOpcode(const BYTE *pCode, DWORD *pdwLen)
{
OPCODE opcode;
*pdwLen = 1;
opcode = OPCODE(pCode[0]);
switch(opcode) {
case CEE_PREFIX1:
opcode = OPCODE(pCode[1] + 256);
if (opcode < 0 || opcode >= CEE_COUNT)
return CEE_COUNT;
*pdwLen = 2;
break;
case CEE_PREFIXREF:
case CEE_PREFIX2:
case CEE_PREFIX3:
case CEE_PREFIX4:
case CEE_PREFIX5:
case CEE_PREFIX6:
case CEE_PREFIX7:
return CEE_COUNT;
default:
break;
}
return opcode;
}
char* Assembler::ReflectionNotation(mdToken tk)
{
char *sz = (char*)&wzUniBuf[dwUniBuf>>1], *pc;
*sz=0;
switch(TypeFromToken(tk))
{
case mdtTypeDef:
{
Class *pClass = m_lstClass.PEEK(RidFromToken(tk)-1);
if(pClass)
{
strcpy_s(sz,dwUniBuf>>1,pClass->m_szFQN);
pc = sz;
while((pc = strchr(pc,NESTING_SEP)) != NULL)
{
*pc = '+';
pc++;
}
}
}
break;
case mdtTypeRef:
{
ULONG N;
mdToken tkResScope;
if(SUCCEEDED(m_pImporter->GetTypeRefProps(tk,&tkResScope,wzUniBuf,dwUniBuf>>1,&N)))
{
WszWideCharToMultiByte(CP_UTF8,0,wzUniBuf,-1,sz,dwUniBuf>>1,NULL,NULL);
if(TypeFromToken(tkResScope)==mdtAssemblyRef)
{
AsmManAssembly *pAsmRef = m_pManifest->m_AsmRefLst.PEEK(RidFromToken(tkResScope)-1);
if(pAsmRef)
{
pc = &sz[strlen(sz)];
pc+=sprintf_s(pc,(dwUniBuf >> 1),", %s, Version=%d.%d.%d.%d, Culture=",pAsmRef->szName,
pAsmRef->usVerMajor,pAsmRef->usVerMinor,pAsmRef->usBuild,pAsmRef->usRevision);
ULONG L=0;
if(pAsmRef->pLocale && (L=pAsmRef->pLocale->length()))
{
memcpy(wzUniBuf,pAsmRef->pLocale->ptr(),L);
wzUniBuf[L>>1] = 0;
WszWideCharToMultiByte(CP_UTF8,0,wzUniBuf,-1,pc,dwUniBuf>>1,NULL,NULL);
}
else pc+=sprintf_s(pc,(dwUniBuf >> 1),"neutral");
pc = &sz[strlen(sz)];
if(pAsmRef->pPublicKeyToken && (L=pAsmRef->pPublicKeyToken->length()))
{
pc+=sprintf_s(pc,(dwUniBuf >> 1),", Publickeytoken=");
BYTE* pb = (BYTE*)(pAsmRef->pPublicKeyToken->ptr());
for(N=0; N<L; N++,pb++) pc+=sprintf_s(pc,(dwUniBuf >> 1),"%2.2x",*pb);
}
}
}
}
}
break;
default:
break;
}
return sz;
}
/*
--------------------------------------------------------------------
mix -- mix 3 32-bit values reversibly.
For every delta with one or two bits set, and the deltas of all three
high bits or all three low bits, whether the original value of a,b,c
is almost all zero or is uniformly distributed,
* If mix() is run forward or backward, at least 32 bits in a,b,c
have at least 1/4 probability of changing.
* If mix() is run forward, every bit of c will change between 1/3 and
2/3 of the time. (Well, 22/100 and 78/100 for some 2-bit deltas.)
mix() was built out of 36 single-cycle latency instructions in a
structure that could supported 2x parallelism, like so:
a -= b;
a -= c; x = (c>>13);
b -= c; a ^= x;
b -= a; x = (a<<8);
c -= a; b ^= x;
c -= b; x = (b>>13);
...
Unfortunately, superscalar Pentiums and Sparcs can't take advantage
of that parallelism. They've also turned some of those single-cycle
latency instructions into multi-cycle latency instructions. Still,
this is the fastest good hash I could find. There were about 2^^68
to choose from. I only looked at a billion or so.
--------------------------------------------------------------------
*/
#define mix(a,b,c) \
{ \
a -= b; a -= c; a ^= (c >> 13); \
b -= c; b -= a; b ^= (a << 8); \
c -= a; c -= b; c ^= (b >> 13); \
a -= b; a -= c; a ^= (c >> 12); \
b -= c; b -= a; b ^= (a << 16); \
c -= a; c -= b; c ^= (b >> 5); \
a -= b; a -= c; a ^= (c >> 3); \
b -= c; b -= a; b ^= (a << 10); \
c -= a; c -= b; c ^= (b >> 15); \
}
/*
--------------------------------------------------------------------
hash() -- hash a variable-length key into a 32-bit value
k : the key (the unaligned variable-length array of bytes)
len : the length of the key, counting by bytes
initval : can be any 4-byte value
Returns a 32-bit value. Every bit of the key affects every bit of
the return value. Every 1-bit and 2-bit delta achieves avalanche.
About 6*len+35 instructions.
The best hash table sizes are powers of 2. There is no need to do
mod a prime (mod is sooo slow!). If you need less than 32 bits,
use a bitmask. For example, if you need only 10 bits, do
h = (h & hashmask(10));
In which case, the hash table should have hashsize(10) elements.
If you are hashing n strings (ub1 **)k, do it like this:
for (i=0, h=0; i<n; ++i) h = hash( k[i], len[i], h);
By Bob Jenkins, 1996. [email protected]. You may use this
code any way you wish, private, educational, or commercial. It's free.
See http://burtleburtle.net/bob/hash/evahash.html
Use for hash table lookup, or anything where one collision in 2^^32 is
acceptable. Do NOT use for cryptographic purposes.
--------------------------------------------------------------------
*/
unsigned hash(
_In_reads_(length) const BYTE *k, /* the key */
unsigned length, /* the length of the key */
unsigned initval) /* the previous hash, or an arbitrary value */
{
register unsigned a,b,c,len;
/* Set up the internal state */
len = length;
a = b = 0x9e3779b9; /* the golden ratio; an arbitrary value */
c = initval; /* the previous hash value */
/*---------------------------------------- handle most of the key */
while (len >= 12)
{
a += (k[0] + ((unsigned)k[1] << 8) + ((unsigned)k[2] << 16) + ((unsigned)k[3] << 24));
b += (k[4] + ((unsigned)k[5] << 8) + ((unsigned)k[6] << 16) + ((unsigned)k[7] << 24));
c += (k[8] + ((unsigned)k[9] << 8) + ((unsigned)k[10] << 16) + ((unsigned)k[11] << 24));
mix(a,b,c);
k += 12; len -= 12;
}
/*------------------------------------- handle the last 11 bytes */
c += length;
switch(len) /* all the case statements fall through */
{
case 11: c+=((unsigned)k[10] << 24);
FALLTHROUGH;
case 10: c+=((unsigned)k[9] << 16);
FALLTHROUGH;
case 9 : c+=((unsigned)k[8] << 8);
FALLTHROUGH;
/* the first byte of c is reserved for the length */
case 8 : b+=((unsigned)k[7] << 24);
FALLTHROUGH;
case 7 : b+=((unsigned)k[6] << 16);
FALLTHROUGH;
case 6 : b+=((unsigned)k[5] << 8);
FALLTHROUGH;
case 5 : b+=k[4];
FALLTHROUGH;
case 4 : a+=((unsigned)k[3] << 24);
FALLTHROUGH;
case 3 : a+=((unsigned)k[2] << 16);
FALLTHROUGH;
case 2 : a+=((unsigned)k[1] << 8);
FALLTHROUGH;
case 1 : a+=k[0];
/* case 0: nothing left to add */
}
mix(a,b,c);
/*-------------------------------------------- report the result */
return c;
}
| -1 |
dotnet/runtime | 66,251 | Do not generate `TYP_STRUCT` `LCL_FLD` nodes | Today, the morpher can fold `IND(struct ADDR(LCL))` into `LCL_FLD` `TYP_STRUCT` layout-less nodes, these end up being locations, always (well -- there is one case where that's not true, such a node can end up under a return of a small struct later to be retyped by lowering, but this is rare).
These will be a significant impediment to actual, proper `TYP_STRUCT` local field nodes, as it would mean handling the case, everywhere in code, where the layout would be missing, that we don't want to allow in the first place.
This change fixed that by introducing the folding that the creation of these location nodes achieves at the address level, where we can choose the type of the location node arbitrarily and freely.
Along the way, `IsLocalAddrExpr` was fixed to not forget to look at zero-offset sequences attached to `ADDR`s, to avoid regressions.
We are expecting some positive diffs here, from the `IsLocalAddrExpr` change (enabling more precise VNs) and the folding change itself (the old code path would, ever so rarely, miss out on some things).
We are also expecting one small regression due to call retyping of reg-sized `IND struct(ADDR(LCL))` nodes losing the field sequence. I did not fix it because a) it was small, and rare, b) I do not want to permeate the zero-offset code any more than is strictly required.
[Diffs](https://dev.azure.com/dnceng/public/_build/results?buildId=1653792&view=results). | SingleAccretion | 2022-03-05T21:01:45Z | 2022-03-30T04:48:39Z | cbf3f9c43649035f59e626792f82e7b579c44865 | 2453f16807b85b279efc26d17d6f20de87801c09 | Do not generate `TYP_STRUCT` `LCL_FLD` nodes. Today, the morpher can fold `IND(struct ADDR(LCL))` into `LCL_FLD` `TYP_STRUCT` layout-less nodes, these end up being locations, always (well -- there is one case where that's not true, such a node can end up under a return of a small struct later to be retyped by lowering, but this is rare).
These will be a significant impediment to actual, proper `TYP_STRUCT` local field nodes, as it would mean handling the case, everywhere in code, where the layout would be missing, that we don't want to allow in the first place.
This change fixed that by introducing the folding that the creation of these location nodes achieves at the address level, where we can choose the type of the location node arbitrarily and freely.
Along the way, `IsLocalAddrExpr` was fixed to not forget to look at zero-offset sequences attached to `ADDR`s, to avoid regressions.
We are expecting some positive diffs here, from the `IsLocalAddrExpr` change (enabling more precise VNs) and the folding change itself (the old code path would, ever so rarely, miss out on some things).
We are also expecting one small regression due to call retyping of reg-sized `IND struct(ADDR(LCL))` nodes losing the field sequence. I did not fix it because a) it was small, and rare, b) I do not want to permeate the zero-offset code any more than is strictly required.
[Diffs](https://dev.azure.com/dnceng/public/_build/results?buildId=1653792&view=results). | ./src/coreclr/pal/inc/rt/conio.h | // Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
//
//
// ===========================================================================
// File: conio.h
//
// ===========================================================================
// dummy conio.h for PAL
#include "palrt.h"
| // Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
//
//
// ===========================================================================
// File: conio.h
//
// ===========================================================================
// dummy conio.h for PAL
#include "palrt.h"
| -1 |
dotnet/runtime | 66,251 | Do not generate `TYP_STRUCT` `LCL_FLD` nodes | Today, the morpher can fold `IND(struct ADDR(LCL))` into `LCL_FLD` `TYP_STRUCT` layout-less nodes, these end up being locations, always (well -- there is one case where that's not true, such a node can end up under a return of a small struct later to be retyped by lowering, but this is rare).
These will be a significant impediment to actual, proper `TYP_STRUCT` local field nodes, as it would mean handling the case, everywhere in code, where the layout would be missing, that we don't want to allow in the first place.
This change fixed that by introducing the folding that the creation of these location nodes achieves at the address level, where we can choose the type of the location node arbitrarily and freely.
Along the way, `IsLocalAddrExpr` was fixed to not forget to look at zero-offset sequences attached to `ADDR`s, to avoid regressions.
We are expecting some positive diffs here, from the `IsLocalAddrExpr` change (enabling more precise VNs) and the folding change itself (the old code path would, ever so rarely, miss out on some things).
We are also expecting one small regression due to call retyping of reg-sized `IND struct(ADDR(LCL))` nodes losing the field sequence. I did not fix it because a) it was small, and rare, b) I do not want to permeate the zero-offset code any more than is strictly required.
[Diffs](https://dev.azure.com/dnceng/public/_build/results?buildId=1653792&view=results). | SingleAccretion | 2022-03-05T21:01:45Z | 2022-03-30T04:48:39Z | cbf3f9c43649035f59e626792f82e7b579c44865 | 2453f16807b85b279efc26d17d6f20de87801c09 | Do not generate `TYP_STRUCT` `LCL_FLD` nodes. Today, the morpher can fold `IND(struct ADDR(LCL))` into `LCL_FLD` `TYP_STRUCT` layout-less nodes, these end up being locations, always (well -- there is one case where that's not true, such a node can end up under a return of a small struct later to be retyped by lowering, but this is rare).
These will be a significant impediment to actual, proper `TYP_STRUCT` local field nodes, as it would mean handling the case, everywhere in code, where the layout would be missing, that we don't want to allow in the first place.
This change fixed that by introducing the folding that the creation of these location nodes achieves at the address level, where we can choose the type of the location node arbitrarily and freely.
Along the way, `IsLocalAddrExpr` was fixed to not forget to look at zero-offset sequences attached to `ADDR`s, to avoid regressions.
We are expecting some positive diffs here, from the `IsLocalAddrExpr` change (enabling more precise VNs) and the folding change itself (the old code path would, ever so rarely, miss out on some things).
We are also expecting one small regression due to call retyping of reg-sized `IND struct(ADDR(LCL))` nodes losing the field sequence. I did not fix it because a) it was small, and rare, b) I do not want to permeate the zero-offset code any more than is strictly required.
[Diffs](https://dev.azure.com/dnceng/public/_build/results?buildId=1653792&view=results). | ./src/native/libs/Common/pal_safecrt.h | // Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
#pragma once
#include "pal_compiler.h"
#include "pal_config.h"
#include <string.h>
#include <assert.h>
#include <errno.h>
#include <stdbool.h>
// Multiplies a and b into result.
// Returns true if safe, false if overflows.
inline static bool multiply_s(size_t a, size_t b, size_t* result)
{
#if HAVE_BUILTIN_MUL_OVERFLOW
return !__builtin_mul_overflow(a, b, result);
#else
if (a == 0 || b == 0)
{
*result = 0;
return true;
}
if(((size_t)~((size_t)0)) / a < b)
{
//overflow
return false;
}
//ok
*result = a * b;
return true;
#endif
}
// Adds a and b into result.
// Returns true if safe, false if overflows.
inline static bool add_s(size_t a, size_t b, size_t* result)
{
#if HAVE_BUILTIN_MUL_OVERFLOW
return !__builtin_add_overflow(a, b, result);
#else
if(((size_t)~((size_t)0)) - a < b)
{
//overflow
return false;
}
//ok
*result = a + b;
return true;
#endif
}
typedef int errno_t;
inline static errno_t memcpy_s(void* dst, size_t sizeInBytes, const void* src, size_t count)
{
if (count > 0)
{
assert(dst != NULL);
assert(src != NULL);
assert(sizeInBytes >= count);
assert( // should be using memmove if this fails
((const char*)dst + count <= (const char*)src) ||
((const char*)src + count <= (const char*)dst));
if (dst == NULL)
{
return EINVAL;
}
if (src == NULL || sizeInBytes < count)
{
memset(dst, 0, sizeInBytes);
return src == NULL ? EINVAL : ERANGE;
}
memcpy(dst, src, count);
}
return 0;
}
| // Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
#pragma once
#include "pal_compiler.h"
#include "pal_config.h"
#include <string.h>
#include <assert.h>
#include <errno.h>
#include <stdbool.h>
// Multiplies a and b into result.
// Returns true if safe, false if overflows.
inline static bool multiply_s(size_t a, size_t b, size_t* result)
{
#if HAVE_BUILTIN_MUL_OVERFLOW
return !__builtin_mul_overflow(a, b, result);
#else
if (a == 0 || b == 0)
{
*result = 0;
return true;
}
if(((size_t)~((size_t)0)) / a < b)
{
//overflow
return false;
}
//ok
*result = a * b;
return true;
#endif
}
// Adds a and b into result.
// Returns true if safe, false if overflows.
inline static bool add_s(size_t a, size_t b, size_t* result)
{
#if HAVE_BUILTIN_MUL_OVERFLOW
return !__builtin_add_overflow(a, b, result);
#else
if(((size_t)~((size_t)0)) - a < b)
{
//overflow
return false;
}
//ok
*result = a + b;
return true;
#endif
}
typedef int errno_t;
inline static errno_t memcpy_s(void* dst, size_t sizeInBytes, const void* src, size_t count)
{
if (count > 0)
{
assert(dst != NULL);
assert(src != NULL);
assert(sizeInBytes >= count);
assert( // should be using memmove if this fails
((const char*)dst + count <= (const char*)src) ||
((const char*)src + count <= (const char*)dst));
if (dst == NULL)
{
return EINVAL;
}
if (src == NULL || sizeInBytes < count)
{
memset(dst, 0, sizeInBytes);
return src == NULL ? EINVAL : ERANGE;
}
memcpy(dst, src, count);
}
return 0;
}
| -1 |
dotnet/runtime | 66,251 | Do not generate `TYP_STRUCT` `LCL_FLD` nodes | Today, the morpher can fold `IND(struct ADDR(LCL))` into `LCL_FLD` `TYP_STRUCT` layout-less nodes, these end up being locations, always (well -- there is one case where that's not true, such a node can end up under a return of a small struct later to be retyped by lowering, but this is rare).
These will be a significant impediment to actual, proper `TYP_STRUCT` local field nodes, as it would mean handling the case, everywhere in code, where the layout would be missing, that we don't want to allow in the first place.
This change fixed that by introducing the folding that the creation of these location nodes achieves at the address level, where we can choose the type of the location node arbitrarily and freely.
Along the way, `IsLocalAddrExpr` was fixed to not forget to look at zero-offset sequences attached to `ADDR`s, to avoid regressions.
We are expecting some positive diffs here, from the `IsLocalAddrExpr` change (enabling more precise VNs) and the folding change itself (the old code path would, ever so rarely, miss out on some things).
We are also expecting one small regression due to call retyping of reg-sized `IND struct(ADDR(LCL))` nodes losing the field sequence. I did not fix it because a) it was small, and rare, b) I do not want to permeate the zero-offset code any more than is strictly required.
[Diffs](https://dev.azure.com/dnceng/public/_build/results?buildId=1653792&view=results). | SingleAccretion | 2022-03-05T21:01:45Z | 2022-03-30T04:48:39Z | cbf3f9c43649035f59e626792f82e7b579c44865 | 2453f16807b85b279efc26d17d6f20de87801c09 | Do not generate `TYP_STRUCT` `LCL_FLD` nodes. Today, the morpher can fold `IND(struct ADDR(LCL))` into `LCL_FLD` `TYP_STRUCT` layout-less nodes, these end up being locations, always (well -- there is one case where that's not true, such a node can end up under a return of a small struct later to be retyped by lowering, but this is rare).
These will be a significant impediment to actual, proper `TYP_STRUCT` local field nodes, as it would mean handling the case, everywhere in code, where the layout would be missing, that we don't want to allow in the first place.
This change fixed that by introducing the folding that the creation of these location nodes achieves at the address level, where we can choose the type of the location node arbitrarily and freely.
Along the way, `IsLocalAddrExpr` was fixed to not forget to look at zero-offset sequences attached to `ADDR`s, to avoid regressions.
We are expecting some positive diffs here, from the `IsLocalAddrExpr` change (enabling more precise VNs) and the folding change itself (the old code path would, ever so rarely, miss out on some things).
We are also expecting one small regression due to call retyping of reg-sized `IND struct(ADDR(LCL))` nodes losing the field sequence. I did not fix it because a) it was small, and rare, b) I do not want to permeate the zero-offset code any more than is strictly required.
[Diffs](https://dev.azure.com/dnceng/public/_build/results?buildId=1653792&view=results). | ./src/coreclr/pal/tests/palsuite/c_runtime/vprintf/test14/test14.cpp | // Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
/*============================================================================
**
** Source: test14.c
**
** Purpose: Test #14 for the vprintf function. Tests the lowercase
** exponential notation double specifier (%e)
**
**
**==========================================================================*/
#include <palsuite.h>
#include "../vprintf.h"
PALTEST(c_runtime_vprintf_test14_paltest_vprintf_test14, "c_runtime/vprintf/test14/paltest_vprintf_test14")
{
double val = 256.0;
double neg = -256.0;
if (PAL_Initialize(argc, argv))
{
return FAIL;
}
DoDoubleTest("foo %e", val, "foo 2.560000e+002", "foo 2.560000e+02");
DoDoubleTest("foo %le", val, "foo 2.560000e+002", "foo 2.560000e+02");
DoDoubleTest("foo %he", val, "foo 2.560000e+002", "foo 2.560000e+02");
DoDoubleTest("foo %Le", val, "foo 2.560000e+002", "foo 2.560000e+02");
DoDoubleTest("foo %I64e", val, "foo 2.560000e+002", "foo 2.560000e+02");
DoDoubleTest("foo %14e", val, "foo 2.560000e+002", "foo 2.560000e+02");
DoDoubleTest("foo %-14e", val, "foo 2.560000e+002 ",
"foo 2.560000e+02 ");
DoDoubleTest("foo %.1e", val, "foo 2.6e+002", "foo 2.6e+02");
DoDoubleTest("foo %.8e", val, "foo 2.56000000e+002",
"foo 2.56000000e+02");
DoDoubleTest("foo %014e", val, "foo 02.560000e+002",
"foo 002.560000e+02");
DoDoubleTest("foo %#e", val, "foo 2.560000e+002", "foo 2.560000e+02");
DoDoubleTest("foo %+e", val, "foo +2.560000e+002", "foo +2.560000e+02");
DoDoubleTest("foo % e", val, "foo 2.560000e+002", "foo 2.560000e+02");
DoDoubleTest("foo %+e", neg, "foo -2.560000e+002", "foo -2.560000e+02");
DoDoubleTest("foo % e", neg, "foo -2.560000e+002", "foo -2.560000e+02");
PAL_Terminate();
return PASS;
}
| // Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
/*============================================================================
**
** Source: test14.c
**
** Purpose: Test #14 for the vprintf function. Tests the lowercase
** exponential notation double specifier (%e)
**
**
**==========================================================================*/
#include <palsuite.h>
#include "../vprintf.h"
PALTEST(c_runtime_vprintf_test14_paltest_vprintf_test14, "c_runtime/vprintf/test14/paltest_vprintf_test14")
{
double val = 256.0;
double neg = -256.0;
if (PAL_Initialize(argc, argv))
{
return FAIL;
}
DoDoubleTest("foo %e", val, "foo 2.560000e+002", "foo 2.560000e+02");
DoDoubleTest("foo %le", val, "foo 2.560000e+002", "foo 2.560000e+02");
DoDoubleTest("foo %he", val, "foo 2.560000e+002", "foo 2.560000e+02");
DoDoubleTest("foo %Le", val, "foo 2.560000e+002", "foo 2.560000e+02");
DoDoubleTest("foo %I64e", val, "foo 2.560000e+002", "foo 2.560000e+02");
DoDoubleTest("foo %14e", val, "foo 2.560000e+002", "foo 2.560000e+02");
DoDoubleTest("foo %-14e", val, "foo 2.560000e+002 ",
"foo 2.560000e+02 ");
DoDoubleTest("foo %.1e", val, "foo 2.6e+002", "foo 2.6e+02");
DoDoubleTest("foo %.8e", val, "foo 2.56000000e+002",
"foo 2.56000000e+02");
DoDoubleTest("foo %014e", val, "foo 02.560000e+002",
"foo 002.560000e+02");
DoDoubleTest("foo %#e", val, "foo 2.560000e+002", "foo 2.560000e+02");
DoDoubleTest("foo %+e", val, "foo +2.560000e+002", "foo +2.560000e+02");
DoDoubleTest("foo % e", val, "foo 2.560000e+002", "foo 2.560000e+02");
DoDoubleTest("foo %+e", neg, "foo -2.560000e+002", "foo -2.560000e+02");
DoDoubleTest("foo % e", neg, "foo -2.560000e+002", "foo -2.560000e+02");
PAL_Terminate();
return PASS;
}
| -1 |
dotnet/runtime | 66,251 | Do not generate `TYP_STRUCT` `LCL_FLD` nodes | Today, the morpher can fold `IND(struct ADDR(LCL))` into `LCL_FLD` `TYP_STRUCT` layout-less nodes, these end up being locations, always (well -- there is one case where that's not true, such a node can end up under a return of a small struct later to be retyped by lowering, but this is rare).
These will be a significant impediment to actual, proper `TYP_STRUCT` local field nodes, as it would mean handling the case, everywhere in code, where the layout would be missing, that we don't want to allow in the first place.
This change fixed that by introducing the folding that the creation of these location nodes achieves at the address level, where we can choose the type of the location node arbitrarily and freely.
Along the way, `IsLocalAddrExpr` was fixed to not forget to look at zero-offset sequences attached to `ADDR`s, to avoid regressions.
We are expecting some positive diffs here, from the `IsLocalAddrExpr` change (enabling more precise VNs) and the folding change itself (the old code path would, ever so rarely, miss out on some things).
We are also expecting one small regression due to call retyping of reg-sized `IND struct(ADDR(LCL))` nodes losing the field sequence. I did not fix it because a) it was small, and rare, b) I do not want to permeate the zero-offset code any more than is strictly required.
[Diffs](https://dev.azure.com/dnceng/public/_build/results?buildId=1653792&view=results). | SingleAccretion | 2022-03-05T21:01:45Z | 2022-03-30T04:48:39Z | cbf3f9c43649035f59e626792f82e7b579c44865 | 2453f16807b85b279efc26d17d6f20de87801c09 | Do not generate `TYP_STRUCT` `LCL_FLD` nodes. Today, the morpher can fold `IND(struct ADDR(LCL))` into `LCL_FLD` `TYP_STRUCT` layout-less nodes, these end up being locations, always (well -- there is one case where that's not true, such a node can end up under a return of a small struct later to be retyped by lowering, but this is rare).
These will be a significant impediment to actual, proper `TYP_STRUCT` local field nodes, as it would mean handling the case, everywhere in code, where the layout would be missing, that we don't want to allow in the first place.
This change fixed that by introducing the folding that the creation of these location nodes achieves at the address level, where we can choose the type of the location node arbitrarily and freely.
Along the way, `IsLocalAddrExpr` was fixed to not forget to look at zero-offset sequences attached to `ADDR`s, to avoid regressions.
We are expecting some positive diffs here, from the `IsLocalAddrExpr` change (enabling more precise VNs) and the folding change itself (the old code path would, ever so rarely, miss out on some things).
We are also expecting one small regression due to call retyping of reg-sized `IND struct(ADDR(LCL))` nodes losing the field sequence. I did not fix it because a) it was small, and rare, b) I do not want to permeate the zero-offset code any more than is strictly required.
[Diffs](https://dev.azure.com/dnceng/public/_build/results?buildId=1653792&view=results). | ./src/mono/mono/mini/seq-points.h | /**
* \file
* Copyright 2014 Xamarin Inc
* Licensed under the MIT license. See LICENSE file in the project root for full license information.
*/
#ifndef __MONO_SEQ_POINTS_H__
#define __MONO_SEQ_POINTS_H__
#include <mono/metadata/seq-points-data.h>
void
mono_save_seq_point_info (MonoCompile *cfg, MonoJitInfo *jinfo);
MONO_COMPONENT_API MonoSeqPointInfo*
mono_get_seq_points (MonoMethod *method);
MONO_COMPONENT_API gboolean
mono_find_next_seq_point_for_native_offset (MonoMethod *method, gint32 native_offset, MonoSeqPointInfo **info, SeqPoint* seq_point);
MONO_COMPONENT_API gboolean
mono_find_prev_seq_point_for_native_offset (MonoMethod *method, gint32 native_offset, MonoSeqPointInfo **info, SeqPoint* seq_point);
MONO_COMPONENT_API gboolean
mono_find_seq_point (MonoMethod *method, gint32 il_offset, MonoSeqPointInfo **info, SeqPoint *seq_point);
void
mono_bb_deduplicate_op_il_seq_points (MonoCompile *cfg, MonoBasicBlock *bb);
#endif /* __MONO_SEQ_POINTS_H__ */
| /**
* \file
* Copyright 2014 Xamarin Inc
* Licensed under the MIT license. See LICENSE file in the project root for full license information.
*/
#ifndef __MONO_SEQ_POINTS_H__
#define __MONO_SEQ_POINTS_H__
#include <mono/metadata/seq-points-data.h>
void
mono_save_seq_point_info (MonoCompile *cfg, MonoJitInfo *jinfo);
MONO_COMPONENT_API MonoSeqPointInfo*
mono_get_seq_points (MonoMethod *method);
MONO_COMPONENT_API gboolean
mono_find_next_seq_point_for_native_offset (MonoMethod *method, gint32 native_offset, MonoSeqPointInfo **info, SeqPoint* seq_point);
MONO_COMPONENT_API gboolean
mono_find_prev_seq_point_for_native_offset (MonoMethod *method, gint32 native_offset, MonoSeqPointInfo **info, SeqPoint* seq_point);
MONO_COMPONENT_API gboolean
mono_find_seq_point (MonoMethod *method, gint32 il_offset, MonoSeqPointInfo **info, SeqPoint *seq_point);
void
mono_bb_deduplicate_op_il_seq_points (MonoCompile *cfg, MonoBasicBlock *bb);
#endif /* __MONO_SEQ_POINTS_H__ */
| -1 |
dotnet/runtime | 66,251 | Do not generate `TYP_STRUCT` `LCL_FLD` nodes | Today, the morpher can fold `IND(struct ADDR(LCL))` into `LCL_FLD` `TYP_STRUCT` layout-less nodes, these end up being locations, always (well -- there is one case where that's not true, such a node can end up under a return of a small struct later to be retyped by lowering, but this is rare).
These will be a significant impediment to actual, proper `TYP_STRUCT` local field nodes, as it would mean handling the case, everywhere in code, where the layout would be missing, that we don't want to allow in the first place.
This change fixed that by introducing the folding that the creation of these location nodes achieves at the address level, where we can choose the type of the location node arbitrarily and freely.
Along the way, `IsLocalAddrExpr` was fixed to not forget to look at zero-offset sequences attached to `ADDR`s, to avoid regressions.
We are expecting some positive diffs here, from the `IsLocalAddrExpr` change (enabling more precise VNs) and the folding change itself (the old code path would, ever so rarely, miss out on some things).
We are also expecting one small regression due to call retyping of reg-sized `IND struct(ADDR(LCL))` nodes losing the field sequence. I did not fix it because a) it was small, and rare, b) I do not want to permeate the zero-offset code any more than is strictly required.
[Diffs](https://dev.azure.com/dnceng/public/_build/results?buildId=1653792&view=results). | SingleAccretion | 2022-03-05T21:01:45Z | 2022-03-30T04:48:39Z | cbf3f9c43649035f59e626792f82e7b579c44865 | 2453f16807b85b279efc26d17d6f20de87801c09 | Do not generate `TYP_STRUCT` `LCL_FLD` nodes. Today, the morpher can fold `IND(struct ADDR(LCL))` into `LCL_FLD` `TYP_STRUCT` layout-less nodes, these end up being locations, always (well -- there is one case where that's not true, such a node can end up under a return of a small struct later to be retyped by lowering, but this is rare).
These will be a significant impediment to actual, proper `TYP_STRUCT` local field nodes, as it would mean handling the case, everywhere in code, where the layout would be missing, that we don't want to allow in the first place.
This change fixed that by introducing the folding that the creation of these location nodes achieves at the address level, where we can choose the type of the location node arbitrarily and freely.
Along the way, `IsLocalAddrExpr` was fixed to not forget to look at zero-offset sequences attached to `ADDR`s, to avoid regressions.
We are expecting some positive diffs here, from the `IsLocalAddrExpr` change (enabling more precise VNs) and the folding change itself (the old code path would, ever so rarely, miss out on some things).
We are also expecting one small regression due to call retyping of reg-sized `IND struct(ADDR(LCL))` nodes losing the field sequence. I did not fix it because a) it was small, and rare, b) I do not want to permeate the zero-offset code any more than is strictly required.
[Diffs](https://dev.azure.com/dnceng/public/_build/results?buildId=1653792&view=results). | ./src/coreclr/pal/tests/palsuite/locale_info/GetACP/test1/test1.cpp | // Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
/*============================================================================
**
** Source: test1.c
**
** Purpose: Tests that GetACP returns the expected default code page.
**
**
**==========================================================================*/
#include <palsuite.h>
/*
* NOTE: We only support code page 65001 (UTF-8).
*/
#define EXPECTED_CP 65001
PALTEST(locale_info_GetACP_test1_paltest_getacp_test1, "locale_info/GetACP/test1/paltest_getacp_test1")
{
int ret;
if (PAL_Initialize(argc, argv))
{
return FAIL;
}
ret = GetACP();
if (ret != EXPECTED_CP)
{
Fail("ERROR: got incorrect result for current ANSI code page!\n"
"Expected %d, got %d\n", EXPECTED_CP, ret);
}
PAL_Terminate();
return PASS;
}
| // Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
/*============================================================================
**
** Source: test1.c
**
** Purpose: Tests that GetACP returns the expected default code page.
**
**
**==========================================================================*/
#include <palsuite.h>
/*
* NOTE: We only support code page 65001 (UTF-8).
*/
#define EXPECTED_CP 65001
PALTEST(locale_info_GetACP_test1_paltest_getacp_test1, "locale_info/GetACP/test1/paltest_getacp_test1")
{
int ret;
if (PAL_Initialize(argc, argv))
{
return FAIL;
}
ret = GetACP();
if (ret != EXPECTED_CP)
{
Fail("ERROR: got incorrect result for current ANSI code page!\n"
"Expected %d, got %d\n", EXPECTED_CP, ret);
}
PAL_Terminate();
return PASS;
}
| -1 |
dotnet/runtime | 66,251 | Do not generate `TYP_STRUCT` `LCL_FLD` nodes | Today, the morpher can fold `IND(struct ADDR(LCL))` into `LCL_FLD` `TYP_STRUCT` layout-less nodes, these end up being locations, always (well -- there is one case where that's not true, such a node can end up under a return of a small struct later to be retyped by lowering, but this is rare).
These will be a significant impediment to actual, proper `TYP_STRUCT` local field nodes, as it would mean handling the case, everywhere in code, where the layout would be missing, that we don't want to allow in the first place.
This change fixed that by introducing the folding that the creation of these location nodes achieves at the address level, where we can choose the type of the location node arbitrarily and freely.
Along the way, `IsLocalAddrExpr` was fixed to not forget to look at zero-offset sequences attached to `ADDR`s, to avoid regressions.
We are expecting some positive diffs here, from the `IsLocalAddrExpr` change (enabling more precise VNs) and the folding change itself (the old code path would, ever so rarely, miss out on some things).
We are also expecting one small regression due to call retyping of reg-sized `IND struct(ADDR(LCL))` nodes losing the field sequence. I did not fix it because a) it was small, and rare, b) I do not want to permeate the zero-offset code any more than is strictly required.
[Diffs](https://dev.azure.com/dnceng/public/_build/results?buildId=1653792&view=results). | SingleAccretion | 2022-03-05T21:01:45Z | 2022-03-30T04:48:39Z | cbf3f9c43649035f59e626792f82e7b579c44865 | 2453f16807b85b279efc26d17d6f20de87801c09 | Do not generate `TYP_STRUCT` `LCL_FLD` nodes. Today, the morpher can fold `IND(struct ADDR(LCL))` into `LCL_FLD` `TYP_STRUCT` layout-less nodes, these end up being locations, always (well -- there is one case where that's not true, such a node can end up under a return of a small struct later to be retyped by lowering, but this is rare).
These will be a significant impediment to actual, proper `TYP_STRUCT` local field nodes, as it would mean handling the case, everywhere in code, where the layout would be missing, that we don't want to allow in the first place.
This change fixed that by introducing the folding that the creation of these location nodes achieves at the address level, where we can choose the type of the location node arbitrarily and freely.
Along the way, `IsLocalAddrExpr` was fixed to not forget to look at zero-offset sequences attached to `ADDR`s, to avoid regressions.
We are expecting some positive diffs here, from the `IsLocalAddrExpr` change (enabling more precise VNs) and the folding change itself (the old code path would, ever so rarely, miss out on some things).
We are also expecting one small regression due to call retyping of reg-sized `IND struct(ADDR(LCL))` nodes losing the field sequence. I did not fix it because a) it was small, and rare, b) I do not want to permeate the zero-offset code any more than is strictly required.
[Diffs](https://dev.azure.com/dnceng/public/_build/results?buildId=1653792&view=results). | ./src/native/corehost/fxr/corehost_init.h | // Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
#ifndef __COREHOST_INIT_H__
#define __COREHOST_INIT_H__
#include "host_interface.h"
#include "host_startup_info.h"
#include "fx_definition.h"
class corehost_init_t
{
private:
std::vector<pal::string_t> m_clr_keys;
std::vector<pal::string_t> m_clr_values;
std::vector<const pal::char_t*> m_clr_keys_cstr;
std::vector<const pal::char_t*> m_clr_values_cstr;
const pal::string_t m_tfm;
const pal::string_t m_deps_file;
const pal::string_t m_additional_deps_serialized;
bool m_is_framework_dependent;
std::vector<pal::string_t> m_probe_paths;
std::vector<const pal::char_t*> m_probe_paths_cstr;
host_mode_t m_host_mode;
host_interface_t m_host_interface;
std::vector<pal::string_t> m_fx_names;
std::vector<const pal::char_t*> m_fx_names_cstr;
std::vector<pal::string_t> m_fx_dirs;
std::vector<const pal::char_t*> m_fx_dirs_cstr;
std::vector<pal::string_t> m_fx_requested_versions;
std::vector<const pal::char_t*> m_fx_requested_versions_cstr;
std::vector<pal::string_t> m_fx_found_versions;
std::vector<const pal::char_t*> m_fx_found_versions_cstr;
fx_reference_vector_t m_included_frameworks;
const pal::string_t m_host_command;
const pal::string_t m_host_info_host_path;
const pal::string_t m_host_info_dotnet_root;
const pal::string_t m_host_info_app_path;
public:
corehost_init_t(
const pal::string_t& host_command,
const host_startup_info_t& host_info,
const pal::string_t& deps_file,
const pal::string_t& additional_deps_serialized,
const std::vector<pal::string_t>& probe_paths,
const host_mode_t mode,
const fx_definition_vector_t& fx_definitions,
const std::vector<std::pair<pal::string_t, pal::string_t>>& additional_properties);
const host_interface_t& get_host_init_data();
void get_found_fx_versions(std::unordered_map<pal::string_t, const fx_ver_t> &out_fx_versions) const;
void get_included_frameworks(std::unordered_map<pal::string_t, const fx_ver_t>& out_included_frameworks) const;
};
#endif // __COREHOST_INIT_H__
| // Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
#ifndef __COREHOST_INIT_H__
#define __COREHOST_INIT_H__
#include "host_interface.h"
#include "host_startup_info.h"
#include "fx_definition.h"
class corehost_init_t
{
private:
std::vector<pal::string_t> m_clr_keys;
std::vector<pal::string_t> m_clr_values;
std::vector<const pal::char_t*> m_clr_keys_cstr;
std::vector<const pal::char_t*> m_clr_values_cstr;
const pal::string_t m_tfm;
const pal::string_t m_deps_file;
const pal::string_t m_additional_deps_serialized;
bool m_is_framework_dependent;
std::vector<pal::string_t> m_probe_paths;
std::vector<const pal::char_t*> m_probe_paths_cstr;
host_mode_t m_host_mode;
host_interface_t m_host_interface;
std::vector<pal::string_t> m_fx_names;
std::vector<const pal::char_t*> m_fx_names_cstr;
std::vector<pal::string_t> m_fx_dirs;
std::vector<const pal::char_t*> m_fx_dirs_cstr;
std::vector<pal::string_t> m_fx_requested_versions;
std::vector<const pal::char_t*> m_fx_requested_versions_cstr;
std::vector<pal::string_t> m_fx_found_versions;
std::vector<const pal::char_t*> m_fx_found_versions_cstr;
fx_reference_vector_t m_included_frameworks;
const pal::string_t m_host_command;
const pal::string_t m_host_info_host_path;
const pal::string_t m_host_info_dotnet_root;
const pal::string_t m_host_info_app_path;
public:
corehost_init_t(
const pal::string_t& host_command,
const host_startup_info_t& host_info,
const pal::string_t& deps_file,
const pal::string_t& additional_deps_serialized,
const std::vector<pal::string_t>& probe_paths,
const host_mode_t mode,
const fx_definition_vector_t& fx_definitions,
const std::vector<std::pair<pal::string_t, pal::string_t>>& additional_properties);
const host_interface_t& get_host_init_data();
void get_found_fx_versions(std::unordered_map<pal::string_t, const fx_ver_t> &out_fx_versions) const;
void get_included_frameworks(std::unordered_map<pal::string_t, const fx_ver_t>& out_included_frameworks) const;
};
#endif // __COREHOST_INIT_H__
| -1 |
dotnet/runtime | 66,251 | Do not generate `TYP_STRUCT` `LCL_FLD` nodes | Today, the morpher can fold `IND(struct ADDR(LCL))` into `LCL_FLD` `TYP_STRUCT` layout-less nodes, these end up being locations, always (well -- there is one case where that's not true, such a node can end up under a return of a small struct later to be retyped by lowering, but this is rare).
These will be a significant impediment to actual, proper `TYP_STRUCT` local field nodes, as it would mean handling the case, everywhere in code, where the layout would be missing, that we don't want to allow in the first place.
This change fixed that by introducing the folding that the creation of these location nodes achieves at the address level, where we can choose the type of the location node arbitrarily and freely.
Along the way, `IsLocalAddrExpr` was fixed to not forget to look at zero-offset sequences attached to `ADDR`s, to avoid regressions.
We are expecting some positive diffs here, from the `IsLocalAddrExpr` change (enabling more precise VNs) and the folding change itself (the old code path would, ever so rarely, miss out on some things).
We are also expecting one small regression due to call retyping of reg-sized `IND struct(ADDR(LCL))` nodes losing the field sequence. I did not fix it because a) it was small, and rare, b) I do not want to permeate the zero-offset code any more than is strictly required.
[Diffs](https://dev.azure.com/dnceng/public/_build/results?buildId=1653792&view=results). | SingleAccretion | 2022-03-05T21:01:45Z | 2022-03-30T04:48:39Z | cbf3f9c43649035f59e626792f82e7b579c44865 | 2453f16807b85b279efc26d17d6f20de87801c09 | Do not generate `TYP_STRUCT` `LCL_FLD` nodes. Today, the morpher can fold `IND(struct ADDR(LCL))` into `LCL_FLD` `TYP_STRUCT` layout-less nodes, these end up being locations, always (well -- there is one case where that's not true, such a node can end up under a return of a small struct later to be retyped by lowering, but this is rare).
These will be a significant impediment to actual, proper `TYP_STRUCT` local field nodes, as it would mean handling the case, everywhere in code, where the layout would be missing, that we don't want to allow in the first place.
This change fixed that by introducing the folding that the creation of these location nodes achieves at the address level, where we can choose the type of the location node arbitrarily and freely.
Along the way, `IsLocalAddrExpr` was fixed to not forget to look at zero-offset sequences attached to `ADDR`s, to avoid regressions.
We are expecting some positive diffs here, from the `IsLocalAddrExpr` change (enabling more precise VNs) and the folding change itself (the old code path would, ever so rarely, miss out on some things).
We are also expecting one small regression due to call retyping of reg-sized `IND struct(ADDR(LCL))` nodes losing the field sequence. I did not fix it because a) it was small, and rare, b) I do not want to permeate the zero-offset code any more than is strictly required.
[Diffs](https://dev.azure.com/dnceng/public/_build/results?buildId=1653792&view=results). | ./src/coreclr/vm/peimage.h | // Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
// --------------------------------------------------------------------------------
// PEImage.h
//
// --------------------------------------------------------------------------------
#ifndef PEIMAGE_H_
#define PEIMAGE_H_
// --------------------------------------------------------------------------------
// Required headers
// --------------------------------------------------------------------------------
#include "clrtypes.h"
#include "peimagelayout.h"
#include "sstring.h"
#include "holder.h"
#include <bundle.h>
class SimpleRWLock;
// --------------------------------------------------------------------------------
// Forward declarations
// --------------------------------------------------------------------------------
class Crst;
// --------------------------------------------------------------------------------
// PEImage is a PE file loaded into memory.
//
// The actual data is represented by PEImageLayout instances which are created on demand.
//
// Various PEImageLayouts can be classified into two kinds -
// - Flat - the same layout as on disk/array or
//
// - Loaded - PE sections are mapped into virtual addresses.
// PE relocations are applied.
// Native exception handlers are registered with OS (on Windows).
//
// Flat layouts are sufficient for operations that do not require running native code,
// Anything based on RVA, such as retrieving IL method bodies, is slightly less efficient,
// since RVA must be translated to file offsets by iterating through section headers.
// The additional cost is not very high though, since our PEs have only a few sections.
//
// Loaded layouts are functional supersets of Flat - anything that can be done with Flat
// can be done with Loaded.
//
// Running native code in the PE (i.e. R2R or IJW scenarios) requires Loaded layout.
// It is possible to execute R2R assembly from Flat layout in IL mode, but its R2R functionality
// will be disabled. When R2R is explicitly turned off, Flat is sufficient for any scenario with
// R2R assemblies.
// In a case of IJW, the PE must be loaded by the native loader to ensure that native dependencies
// are resolved.
//
// In some scenarios we create Loaded layouts by manually mapping images into memory.
// That is particularly true on Unix where we cannot rely on OS loader.
// Manual creation of layouts is limited to "IL only" images. This can be checked
// for via `PEDecoder::IsILOnlyImage`
// NOTE: historically, and somewhat confusingly, R2R PEs are considered IsILOnlyImage for this
// purpose. That is true even for composite R2R PEs that do not contain IL.
//
// A PEImage, depending on scenario, may end up creating both Flat and Loaded layouts,
// thus it has two slots - m_pLayouts[IMAGE_COUNT].
//
// m_pLayouts[IMAGE_FLAT]
// When initialized contains a layout that allows operations for which Flat layout is sufficient -
// i.e. reading metadata
//
// m_pLayouts[IMAGE_LOADED]
// When initialized contains a layout that allows loading/running code.
//
// The layouts can only be unloaded together with the owning PEImage, so if we have Flat and
// then need Loaded, we can only add one more. Thus we have two slots.
//
// Often the slots refer to the same layout though. That is because if we create Loaded before Flat,
// we put Loaded into both slots, since it is functionally a superset of Flat.
// Also for pure-IL assemblies Flat is sufficient for anything, so we may put Flat into both slots.
//
#define CV_SIGNATURE_RSDS 0x53445352
// CodeView RSDS debug information -> PDB 7.00
struct CV_INFO_PDB70
{
DWORD magic;
GUID signature; // unique identifier
DWORD age; // an always-incrementing value
char path[MAX_LONGPATH]; // zero terminated string with the name of the PDB file
};
typedef DPTR(class PEImage) PTR_PEImage;
class PEImage final
{
public:
// ------------------------------------------------------------
// Public API
// ------------------------------------------------------------
// initialize static data (i.e. locks, unique instance cache, etc..)
static void Startup();
~PEImage();
PEImage();
BOOL Equals(PEImage* pImage);
ULONG AddRef();
ULONG Release();
#ifndef DACCESS_COMPILE
static PTR_PEImage CreateFromByteArray(const BYTE* array, COUNT_T size);
#ifndef TARGET_UNIX
static PTR_PEImage CreateFromHMODULE(HMODULE hMod);
#endif // !TARGET_UNIX
static PTR_PEImage OpenImage(
LPCWSTR pPath,
MDInternalImportFlags flags = MDInternalImport_Default,
BundleFileLocation bundleFileLocation = BundleFileLocation::Invalid());
static PTR_PEImage FindByPath(LPCWSTR pPath, BOOL isInBundle = TRUE);
void AddToHashMap();
#endif
BOOL IsOpened();
PTR_PEImageLayout GetOrCreateLayout(DWORD imageLayoutMask);
BOOL HasLoadedLayout();
PTR_PEImageLayout GetLoadedLayout();
PTR_PEImageLayout GetFlatLayout();
BOOL HasPath();
ULONG GetPathHash();
const SString& GetPath();
const SString& GetPathToLoad();
LPCWSTR GetPathForErrorMessages() { return GetPath(); }
BOOL IsFile();
BOOL IsInBundle() const;
INT64 GetOffset() const;
INT64 GetSize() const;
INT64 GetUncompressedSize() const;
HANDLE GetFileHandle();
HRESULT TryOpenFile(bool takeLock = false);
void GetMVID(GUID *pMvid);
BOOL HasV1Metadata();
IMDInternalImport* GetMDImport();
BOOL MDImportLoaded();
IMDInternalImport* GetNativeMDImport(BOOL loadAllowed = TRUE);
BOOL HasContents() ;
BOOL IsPtrInImage(PTR_CVOID data);
BOOL HasNTHeaders();
BOOL HasCorHeader();
BOOL HasReadyToRunHeader();
BOOL HasDirectoryEntry(int entry);
BOOL Has32BitNTHeaders();
void GetPEKindAndMachine(DWORD* pdwKind, DWORD* pdwMachine);
BOOL IsILOnly();
BOOL IsReferenceAssembly();
BOOL IsComponentAssembly();
PTR_CVOID GetNativeManifestMetadata(COUNT_T* pSize = NULL);
mdToken GetEntryPointToken();
DWORD GetCorHeaderFlags();
PTR_CVOID GetMetadata(COUNT_T* pSize = NULL);
// Check utilites
static CHECK CheckStartup();
static CHECK CheckCanonicalFullPath(const SString& path);
CHECK CheckFormat();
CHECK CheckILFormat();
CHECK CheckUniqueInstance();
void SetModuleFileNameHintForDAC();
#ifdef DACCESS_COMPILE
void EnumMemoryRegions(CLRDataEnumMemoryFlags flags);
const SString &GetModuleFileNameHintForDAC();
#endif
private:
#ifndef DACCESS_COMPILE
// Get or create the layout corresponding to the mask, with an AddRef
PTR_PEImageLayout GetOrCreateLayoutInternal(DWORD imageLayoutMask);
// Create the mapped layout
PTR_PEImageLayout CreateLoadedLayout(bool throwOnFailure);
// Create the flat layout
PTR_PEImageLayout CreateFlatLayout();
void SetLayout(DWORD dwLayout, PTR_PEImageLayout pLayout);
#endif
// Get an existing layout corresponding to the mask, no AddRef
PTR_PEImageLayout GetExistingLayoutInternal(DWORD imageLayoutMask);
void OpenMDImport();
void OpenNativeMDImport();
// ------------------------------------------------------------
// Private routines
// ------------------------------------------------------------
void Init(LPCWSTR pPath, BundleFileLocation bundleFileLocation);
struct PEImageLocator
{
LPCWSTR m_pPath;
BOOL m_bIsInBundle;
PEImageLocator(LPCWSTR pPath, BOOL bIsInBundle)
: m_pPath(pPath),
m_bIsInBundle(bIsInBundle)
{
}
PEImageLocator(PEImage * pImage)
: m_pPath(pImage->m_path.GetUnicode())
{
m_bIsInBundle = pImage->IsInBundle();
}
};
static BOOL CompareImage(UPTR image1, UPTR image2);
static BOOL CompareIJWDataBase(UPTR base, UPTR mapping);
void DECLSPEC_NORETURN ThrowFormat(HRESULT hr);
public:
class IJWFixupData
{
private:
Crst m_lock;
void* m_base;
DWORD m_flags;
PTR_LoaderHeap m_DllThunkHeap;
// the fixup for the next iteration in FixupVTables
// we use it to make sure that we do not try to fix up the same entry twice
// if there was a pass that was aborted in the middle
COUNT_T m_iNextFixup;
COUNT_T m_iNextMethod;
enum {
e_FIXED_UP = 0x1
};
public:
IJWFixupData(void* pBase);
~IJWFixupData();
void* GetBase() { LIMITED_METHOD_CONTRACT; return m_base; }
Crst* GetLock() { LIMITED_METHOD_CONTRACT; return &m_lock; }
BOOL IsFixedUp() { LIMITED_METHOD_CONTRACT; return m_flags & e_FIXED_UP; }
void SetIsFixedUp() { LIMITED_METHOD_CONTRACT; m_flags |= e_FIXED_UP; }
PTR_LoaderHeap GetThunkHeap();
void MarkMethodFixedUp(COUNT_T iFixup, COUNT_T iMethod);
BOOL IsMethodFixedUp(COUNT_T iFixup, COUNT_T iMethod);
};
static IJWFixupData* GetIJWData(void* pBase);
static PTR_LoaderHeap GetDllThunkHeap(void* pBase);
static void UnloadIJWModule(void* pBase);
private:
// ------------------------------------------------------------
// Static fields
// ------------------------------------------------------------
static CrstStatic s_hashLock;
static PtrHashMap* s_Images;
//@TODO:workaround: Remove this when we have one PEImage per mapped image,
//@TODO:workaround: and move the lock there
// This is for IJW thunk initialization, as it is no longer guaranteed
// that the initialization will occur under the loader lock.
static CrstStatic s_ijwHashLock;
static PtrHashMap* s_ijwFixupDataHash;
// ------------------------------------------------------------
// Instance fields
// ------------------------------------------------------------
SString m_path;
LONG m_refCount;
// means this is a unique (deduped) instance.
BOOL m_bInHashMap;
// If this image is located within a single-file bundle, the location within the bundle.
// If m_bundleFileLocation is valid, it takes precedence over m_path for loading.
BundleFileLocation m_bundleFileLocation;
// valid handle if we tried to open the file/path and succeeded.
HANDLE m_hFile;
DWORD m_dwPEKind;
DWORD m_dwMachine;
// This variable will have the data of module name.
// It is only used by DAC to remap fusion loaded modules back to
// disk IL. This really is a workaround. The real fix is for fusion loader
// hook (public API on hosting) to take an additional file name hint.
// We are piggy backing on the fact that module name is the same as file name!!!
SString m_sModuleFileNameHintUsedByDac; // This is only used by DAC
enum
{
IMAGE_FLAT=0,
IMAGE_LOADED=1,
IMAGE_COUNT=2
};
SimpleRWLock *m_pLayoutLock;
PTR_PEImageLayout m_pLayouts[IMAGE_COUNT];
#ifdef METADATATRACKER_DATA
class MetaDataTracker *m_pMDTracker;
#endif // METADATATRACKER_DATA
IMDInternalImport* m_pMDImport;
IMDInternalImport* m_pNativeMDImport;
};
FORCEINLINE void PEImageRelease(PEImage *i)
{
WRAPPER_NO_CONTRACT;
i->Release();
}
typedef Wrapper<PEImage *, DoNothing, PEImageRelease> PEImageHolder;
// ================================================================================
// Inline definitions
// ================================================================================
#include "peimage.inl"
#endif // PEIMAGE_H_
| // Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
// --------------------------------------------------------------------------------
// PEImage.h
//
// --------------------------------------------------------------------------------
#ifndef PEIMAGE_H_
#define PEIMAGE_H_
// --------------------------------------------------------------------------------
// Required headers
// --------------------------------------------------------------------------------
#include "clrtypes.h"
#include "peimagelayout.h"
#include "sstring.h"
#include "holder.h"
#include <bundle.h>
class SimpleRWLock;
// --------------------------------------------------------------------------------
// Forward declarations
// --------------------------------------------------------------------------------
class Crst;
// --------------------------------------------------------------------------------
// PEImage is a PE file loaded into memory.
//
// The actual data is represented by PEImageLayout instances which are created on demand.
//
// Various PEImageLayouts can be classified into two kinds -
// - Flat - the same layout as on disk/array or
//
// - Loaded - PE sections are mapped into virtual addresses.
// PE relocations are applied.
// Native exception handlers are registered with OS (on Windows).
//
// Flat layouts are sufficient for operations that do not require running native code,
// Anything based on RVA, such as retrieving IL method bodies, is slightly less efficient,
// since RVA must be translated to file offsets by iterating through section headers.
// The additional cost is not very high though, since our PEs have only a few sections.
//
// Loaded layouts are functional supersets of Flat - anything that can be done with Flat
// can be done with Loaded.
//
// Running native code in the PE (i.e. R2R or IJW scenarios) requires Loaded layout.
// It is possible to execute R2R assembly from Flat layout in IL mode, but its R2R functionality
// will be disabled. When R2R is explicitly turned off, Flat is sufficient for any scenario with
// R2R assemblies.
// In a case of IJW, the PE must be loaded by the native loader to ensure that native dependencies
// are resolved.
//
// In some scenarios we create Loaded layouts by manually mapping images into memory.
// That is particularly true on Unix where we cannot rely on OS loader.
// Manual creation of layouts is limited to "IL only" images. This can be checked
// for via `PEDecoder::IsILOnlyImage`
// NOTE: historically, and somewhat confusingly, R2R PEs are considered IsILOnlyImage for this
// purpose. That is true even for composite R2R PEs that do not contain IL.
//
// A PEImage, depending on scenario, may end up creating both Flat and Loaded layouts,
// thus it has two slots - m_pLayouts[IMAGE_COUNT].
//
// m_pLayouts[IMAGE_FLAT]
// When initialized contains a layout that allows operations for which Flat layout is sufficient -
// i.e. reading metadata
//
// m_pLayouts[IMAGE_LOADED]
// When initialized contains a layout that allows loading/running code.
//
// The layouts can only be unloaded together with the owning PEImage, so if we have Flat and
// then need Loaded, we can only add one more. Thus we have two slots.
//
// Often the slots refer to the same layout though. That is because if we create Loaded before Flat,
// we put Loaded into both slots, since it is functionally a superset of Flat.
// Also for pure-IL assemblies Flat is sufficient for anything, so we may put Flat into both slots.
//
#define CV_SIGNATURE_RSDS 0x53445352
// CodeView RSDS debug information -> PDB 7.00
struct CV_INFO_PDB70
{
DWORD magic;
GUID signature; // unique identifier
DWORD age; // an always-incrementing value
char path[MAX_LONGPATH]; // zero terminated string with the name of the PDB file
};
typedef DPTR(class PEImage) PTR_PEImage;
class PEImage final
{
public:
// ------------------------------------------------------------
// Public API
// ------------------------------------------------------------
// initialize static data (i.e. locks, unique instance cache, etc..)
static void Startup();
~PEImage();
PEImage();
BOOL Equals(PEImage* pImage);
ULONG AddRef();
ULONG Release();
#ifndef DACCESS_COMPILE
static PTR_PEImage CreateFromByteArray(const BYTE* array, COUNT_T size);
#ifndef TARGET_UNIX
static PTR_PEImage CreateFromHMODULE(HMODULE hMod);
#endif // !TARGET_UNIX
static PTR_PEImage OpenImage(
LPCWSTR pPath,
MDInternalImportFlags flags = MDInternalImport_Default,
BundleFileLocation bundleFileLocation = BundleFileLocation::Invalid());
static PTR_PEImage FindByPath(LPCWSTR pPath, BOOL isInBundle = TRUE);
void AddToHashMap();
#endif
BOOL IsOpened();
PTR_PEImageLayout GetOrCreateLayout(DWORD imageLayoutMask);
BOOL HasLoadedLayout();
PTR_PEImageLayout GetLoadedLayout();
PTR_PEImageLayout GetFlatLayout();
BOOL HasPath();
ULONG GetPathHash();
const SString& GetPath();
const SString& GetPathToLoad();
LPCWSTR GetPathForErrorMessages() { return GetPath(); }
BOOL IsFile();
BOOL IsInBundle() const;
INT64 GetOffset() const;
INT64 GetSize() const;
INT64 GetUncompressedSize() const;
HANDLE GetFileHandle();
HRESULT TryOpenFile(bool takeLock = false);
void GetMVID(GUID *pMvid);
BOOL HasV1Metadata();
IMDInternalImport* GetMDImport();
BOOL MDImportLoaded();
IMDInternalImport* GetNativeMDImport(BOOL loadAllowed = TRUE);
BOOL HasContents() ;
BOOL IsPtrInImage(PTR_CVOID data);
BOOL HasNTHeaders();
BOOL HasCorHeader();
BOOL HasReadyToRunHeader();
BOOL HasDirectoryEntry(int entry);
BOOL Has32BitNTHeaders();
void GetPEKindAndMachine(DWORD* pdwKind, DWORD* pdwMachine);
BOOL IsILOnly();
BOOL IsReferenceAssembly();
BOOL IsComponentAssembly();
PTR_CVOID GetNativeManifestMetadata(COUNT_T* pSize = NULL);
mdToken GetEntryPointToken();
DWORD GetCorHeaderFlags();
PTR_CVOID GetMetadata(COUNT_T* pSize = NULL);
// Check utilites
static CHECK CheckStartup();
static CHECK CheckCanonicalFullPath(const SString& path);
CHECK CheckFormat();
CHECK CheckILFormat();
CHECK CheckUniqueInstance();
void SetModuleFileNameHintForDAC();
#ifdef DACCESS_COMPILE
void EnumMemoryRegions(CLRDataEnumMemoryFlags flags);
const SString &GetModuleFileNameHintForDAC();
#endif
private:
#ifndef DACCESS_COMPILE
// Get or create the layout corresponding to the mask, with an AddRef
PTR_PEImageLayout GetOrCreateLayoutInternal(DWORD imageLayoutMask);
// Create the mapped layout
PTR_PEImageLayout CreateLoadedLayout(bool throwOnFailure);
// Create the flat layout
PTR_PEImageLayout CreateFlatLayout();
void SetLayout(DWORD dwLayout, PTR_PEImageLayout pLayout);
#endif
// Get an existing layout corresponding to the mask, no AddRef
PTR_PEImageLayout GetExistingLayoutInternal(DWORD imageLayoutMask);
void OpenMDImport();
void OpenNativeMDImport();
// ------------------------------------------------------------
// Private routines
// ------------------------------------------------------------
void Init(LPCWSTR pPath, BundleFileLocation bundleFileLocation);
struct PEImageLocator
{
LPCWSTR m_pPath;
BOOL m_bIsInBundle;
PEImageLocator(LPCWSTR pPath, BOOL bIsInBundle)
: m_pPath(pPath),
m_bIsInBundle(bIsInBundle)
{
}
PEImageLocator(PEImage * pImage)
: m_pPath(pImage->m_path.GetUnicode())
{
m_bIsInBundle = pImage->IsInBundle();
}
};
static BOOL CompareImage(UPTR image1, UPTR image2);
static BOOL CompareIJWDataBase(UPTR base, UPTR mapping);
void DECLSPEC_NORETURN ThrowFormat(HRESULT hr);
public:
class IJWFixupData
{
private:
Crst m_lock;
void* m_base;
DWORD m_flags;
PTR_LoaderHeap m_DllThunkHeap;
// the fixup for the next iteration in FixupVTables
// we use it to make sure that we do not try to fix up the same entry twice
// if there was a pass that was aborted in the middle
COUNT_T m_iNextFixup;
COUNT_T m_iNextMethod;
enum {
e_FIXED_UP = 0x1
};
public:
IJWFixupData(void* pBase);
~IJWFixupData();
void* GetBase() { LIMITED_METHOD_CONTRACT; return m_base; }
Crst* GetLock() { LIMITED_METHOD_CONTRACT; return &m_lock; }
BOOL IsFixedUp() { LIMITED_METHOD_CONTRACT; return m_flags & e_FIXED_UP; }
void SetIsFixedUp() { LIMITED_METHOD_CONTRACT; m_flags |= e_FIXED_UP; }
PTR_LoaderHeap GetThunkHeap();
void MarkMethodFixedUp(COUNT_T iFixup, COUNT_T iMethod);
BOOL IsMethodFixedUp(COUNT_T iFixup, COUNT_T iMethod);
};
static IJWFixupData* GetIJWData(void* pBase);
static PTR_LoaderHeap GetDllThunkHeap(void* pBase);
static void UnloadIJWModule(void* pBase);
private:
// ------------------------------------------------------------
// Static fields
// ------------------------------------------------------------
static CrstStatic s_hashLock;
static PtrHashMap* s_Images;
//@TODO:workaround: Remove this when we have one PEImage per mapped image,
//@TODO:workaround: and move the lock there
// This is for IJW thunk initialization, as it is no longer guaranteed
// that the initialization will occur under the loader lock.
static CrstStatic s_ijwHashLock;
static PtrHashMap* s_ijwFixupDataHash;
// ------------------------------------------------------------
// Instance fields
// ------------------------------------------------------------
SString m_path;
LONG m_refCount;
// means this is a unique (deduped) instance.
BOOL m_bInHashMap;
// If this image is located within a single-file bundle, the location within the bundle.
// If m_bundleFileLocation is valid, it takes precedence over m_path for loading.
BundleFileLocation m_bundleFileLocation;
// valid handle if we tried to open the file/path and succeeded.
HANDLE m_hFile;
DWORD m_dwPEKind;
DWORD m_dwMachine;
// This variable will have the data of module name.
// It is only used by DAC to remap fusion loaded modules back to
// disk IL. This really is a workaround. The real fix is for fusion loader
// hook (public API on hosting) to take an additional file name hint.
// We are piggy backing on the fact that module name is the same as file name!!!
SString m_sModuleFileNameHintUsedByDac; // This is only used by DAC
enum
{
IMAGE_FLAT=0,
IMAGE_LOADED=1,
IMAGE_COUNT=2
};
SimpleRWLock *m_pLayoutLock;
PTR_PEImageLayout m_pLayouts[IMAGE_COUNT];
#ifdef METADATATRACKER_DATA
class MetaDataTracker *m_pMDTracker;
#endif // METADATATRACKER_DATA
IMDInternalImport* m_pMDImport;
IMDInternalImport* m_pNativeMDImport;
};
FORCEINLINE void PEImageRelease(PEImage *i)
{
WRAPPER_NO_CONTRACT;
i->Release();
}
typedef Wrapper<PEImage *, DoNothing, PEImageRelease> PEImageHolder;
// ================================================================================
// Inline definitions
// ================================================================================
#include "peimage.inl"
#endif // PEIMAGE_H_
| -1 |
dotnet/runtime | 66,251 | Do not generate `TYP_STRUCT` `LCL_FLD` nodes | Today, the morpher can fold `IND(struct ADDR(LCL))` into `LCL_FLD` `TYP_STRUCT` layout-less nodes, these end up being locations, always (well -- there is one case where that's not true, such a node can end up under a return of a small struct later to be retyped by lowering, but this is rare).
These will be a significant impediment to actual, proper `TYP_STRUCT` local field nodes, as it would mean handling the case, everywhere in code, where the layout would be missing, that we don't want to allow in the first place.
This change fixed that by introducing the folding that the creation of these location nodes achieves at the address level, where we can choose the type of the location node arbitrarily and freely.
Along the way, `IsLocalAddrExpr` was fixed to not forget to look at zero-offset sequences attached to `ADDR`s, to avoid regressions.
We are expecting some positive diffs here, from the `IsLocalAddrExpr` change (enabling more precise VNs) and the folding change itself (the old code path would, ever so rarely, miss out on some things).
We are also expecting one small regression due to call retyping of reg-sized `IND struct(ADDR(LCL))` nodes losing the field sequence. I did not fix it because a) it was small, and rare, b) I do not want to permeate the zero-offset code any more than is strictly required.
[Diffs](https://dev.azure.com/dnceng/public/_build/results?buildId=1653792&view=results). | SingleAccretion | 2022-03-05T21:01:45Z | 2022-03-30T04:48:39Z | cbf3f9c43649035f59e626792f82e7b579c44865 | 2453f16807b85b279efc26d17d6f20de87801c09 | Do not generate `TYP_STRUCT` `LCL_FLD` nodes. Today, the morpher can fold `IND(struct ADDR(LCL))` into `LCL_FLD` `TYP_STRUCT` layout-less nodes, these end up being locations, always (well -- there is one case where that's not true, such a node can end up under a return of a small struct later to be retyped by lowering, but this is rare).
These will be a significant impediment to actual, proper `TYP_STRUCT` local field nodes, as it would mean handling the case, everywhere in code, where the layout would be missing, that we don't want to allow in the first place.
This change fixed that by introducing the folding that the creation of these location nodes achieves at the address level, where we can choose the type of the location node arbitrarily and freely.
Along the way, `IsLocalAddrExpr` was fixed to not forget to look at zero-offset sequences attached to `ADDR`s, to avoid regressions.
We are expecting some positive diffs here, from the `IsLocalAddrExpr` change (enabling more precise VNs) and the folding change itself (the old code path would, ever so rarely, miss out on some things).
We are also expecting one small regression due to call retyping of reg-sized `IND struct(ADDR(LCL))` nodes losing the field sequence. I did not fix it because a) it was small, and rare, b) I do not want to permeate the zero-offset code any more than is strictly required.
[Diffs](https://dev.azure.com/dnceng/public/_build/results?buildId=1653792&view=results). | ./src/mono/mono/mini/interp/interp-intrins.h | #ifndef __MONO_MINI_INTERP_INTRINSICS_H__
#define __MONO_MINI_INTERP_INTRINSICS_H__
#include <glib.h>
#include <mono/metadata/object.h>
#include "interp-internals.h"
void
interp_intrins_marvin_block (guint32 *pp0, guint32 *pp1);
guint32
interp_intrins_ascii_chars_to_uppercase (guint32 val);
int
interp_intrins_ordinal_ignore_case_ascii (guint32 valueA, guint32 valueB);
int
interp_intrins_64ordinal_ignore_case_ascii (guint64 valueA, guint64 valueB);
MonoString*
interp_intrins_u32_to_decstr (guint32 value, MonoArray *cache, MonoVTable *vtable);
mono_u
interp_intrins_widen_ascii_to_utf16 (guint8 *pAsciiBuffer, mono_unichar2 *pUtf16Buffer, mono_u elementCount);
#endif /* __MONO_MINI_INTERP_INTRINSICS_H__ */
| #ifndef __MONO_MINI_INTERP_INTRINSICS_H__
#define __MONO_MINI_INTERP_INTRINSICS_H__
#include <glib.h>
#include <mono/metadata/object.h>
#include "interp-internals.h"
void
interp_intrins_marvin_block (guint32 *pp0, guint32 *pp1);
guint32
interp_intrins_ascii_chars_to_uppercase (guint32 val);
int
interp_intrins_ordinal_ignore_case_ascii (guint32 valueA, guint32 valueB);
int
interp_intrins_64ordinal_ignore_case_ascii (guint64 valueA, guint64 valueB);
MonoString*
interp_intrins_u32_to_decstr (guint32 value, MonoArray *cache, MonoVTable *vtable);
mono_u
interp_intrins_widen_ascii_to_utf16 (guint8 *pAsciiBuffer, mono_unichar2 *pUtf16Buffer, mono_u elementCount);
#endif /* __MONO_MINI_INTERP_INTRINSICS_H__ */
| -1 |
dotnet/runtime | 66,251 | Do not generate `TYP_STRUCT` `LCL_FLD` nodes | Today, the morpher can fold `IND(struct ADDR(LCL))` into `LCL_FLD` `TYP_STRUCT` layout-less nodes, these end up being locations, always (well -- there is one case where that's not true, such a node can end up under a return of a small struct later to be retyped by lowering, but this is rare).
These will be a significant impediment to actual, proper `TYP_STRUCT` local field nodes, as it would mean handling the case, everywhere in code, where the layout would be missing, that we don't want to allow in the first place.
This change fixed that by introducing the folding that the creation of these location nodes achieves at the address level, where we can choose the type of the location node arbitrarily and freely.
Along the way, `IsLocalAddrExpr` was fixed to not forget to look at zero-offset sequences attached to `ADDR`s, to avoid regressions.
We are expecting some positive diffs here, from the `IsLocalAddrExpr` change (enabling more precise VNs) and the folding change itself (the old code path would, ever so rarely, miss out on some things).
We are also expecting one small regression due to call retyping of reg-sized `IND struct(ADDR(LCL))` nodes losing the field sequence. I did not fix it because a) it was small, and rare, b) I do not want to permeate the zero-offset code any more than is strictly required.
[Diffs](https://dev.azure.com/dnceng/public/_build/results?buildId=1653792&view=results). | SingleAccretion | 2022-03-05T21:01:45Z | 2022-03-30T04:48:39Z | cbf3f9c43649035f59e626792f82e7b579c44865 | 2453f16807b85b279efc26d17d6f20de87801c09 | Do not generate `TYP_STRUCT` `LCL_FLD` nodes. Today, the morpher can fold `IND(struct ADDR(LCL))` into `LCL_FLD` `TYP_STRUCT` layout-less nodes, these end up being locations, always (well -- there is one case where that's not true, such a node can end up under a return of a small struct later to be retyped by lowering, but this is rare).
These will be a significant impediment to actual, proper `TYP_STRUCT` local field nodes, as it would mean handling the case, everywhere in code, where the layout would be missing, that we don't want to allow in the first place.
This change fixed that by introducing the folding that the creation of these location nodes achieves at the address level, where we can choose the type of the location node arbitrarily and freely.
Along the way, `IsLocalAddrExpr` was fixed to not forget to look at zero-offset sequences attached to `ADDR`s, to avoid regressions.
We are expecting some positive diffs here, from the `IsLocalAddrExpr` change (enabling more precise VNs) and the folding change itself (the old code path would, ever so rarely, miss out on some things).
We are also expecting one small regression due to call retyping of reg-sized `IND struct(ADDR(LCL))` nodes losing the field sequence. I did not fix it because a) it was small, and rare, b) I do not want to permeate the zero-offset code any more than is strictly required.
[Diffs](https://dev.azure.com/dnceng/public/_build/results?buildId=1653792&view=results). | ./src/coreclr/inc/corhlpr.h | // Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
/*****************************************************************************
** **
** Corhlpr.h - **
** **
*****************************************************************************/
#ifndef __CORHLPR_H__
#define __CORHLPR_H__
#if defined(_MSC_VER) && defined(HOST_X86) && !defined(FPO_ON)
#pragma optimize("y", on) // Small critical routines, don't put in EBP frame
#define FPO_ON 1
#define CORHLPR_TURNED_FPO_ON 1
#endif
#include "cor.h"
#include "corhdr.h"
#include "corerror.h"
#include "unreachable.h"
// This header is consumed both within the runtime and externally. In the former
// case we need to wrap memory allocations, in the latter there is no
// infrastructure to support this. Detect which way we're building and provide a
// very simple abstraction layer (handles allocating bytes only).
#ifdef _BLD_CLR
#include "new.hpp"
#define NEW_NOTHROW(_bytes) new (nothrow) BYTE[_bytes]
#define NEW_THROWS(_bytes) new BYTE[_bytes]
void DECLSPEC_NORETURN ThrowOutOfMemory();
inline void DECLSPEC_NORETURN THROW_OUT_OF_MEMORY()
{
ThrowOutOfMemory();
}
#else
#define NEW_NOTHROW(_bytes) new BYTE[_bytes]
#define NEW_THROWS(_bytes) __CorHlprNewThrows(_bytes)
static inline void DECLSPEC_NORETURN __CorHlprThrowOOM()
{
RaiseException(STATUS_NO_MEMORY, 0, 0, NULL);
__UNREACHABLE();
}
static inline BYTE *__CorHlprNewThrows(size_t bytes)
{
BYTE *pbMemory = new BYTE[bytes];
if (pbMemory == NULL)
__CorHlprThrowOOM();
return pbMemory;
}
inline void DECLSPEC_NORETURN THROW_OUT_OF_MEMORY()
{
__CorHlprThrowOOM();
}
#endif
//*****************************************************************************
// There are a set of macros commonly used in the helpers which you will want
// to override to get richer behavior. The following defines what is needed
// if you chose not to do the extra work.
//*****************************************************************************
#ifndef IfFailGoto
#define IfFailGoto(EXPR, LABEL) \
do { hr = (EXPR); if(FAILED(hr)) { goto LABEL; } } while (0)
#endif
#ifndef IfFailGo
#define IfFailGo(EXPR) IfFailGoto(EXPR, ErrExit)
#endif
#ifndef IfFailRet
#define IfFailRet(EXPR) do { hr = (EXPR); if(FAILED(hr)) { return (hr); } } while (0)
#endif
#ifndef IfNullRet
#define IfNullRet(EXPR) do { if ((EXPR) == NULL){ return (E_OUTOFMEMORY); } } while (0)
#endif
#ifndef _ASSERTE
#define _ASSERTE(expr)
#endif
#if !BIGENDIAN
#define VAL16(x) x
#define VAL32(x) x
#endif
//*****************************************************************************
//
//***** Macro to assist with cleaning up local static variables
//
//*****************************************************************************
#define CHECK_LOCAL_STATIC_VAR(x) \
x \
//*****************************************************************************
//
//***** Utility helpers
//
//*****************************************************************************
#define MAX_CLASSNAME_LENGTH 1024
//*****************************************************************************
//
//***** Signature helpers
//
//*****************************************************************************
inline bool isCallConv(unsigned sigByte, CorCallingConvention conv)
{
return ((sigByte & IMAGE_CEE_CS_CALLCONV_MASK) == (unsigned) conv);
}
//*****************************************************************************
//
//***** File format helper classes
//
//*****************************************************************************
//*****************************************************************************
typedef struct tagCOR_ILMETHOD_SECT_SMALL : IMAGE_COR_ILMETHOD_SECT_SMALL {
//Data follows
const BYTE* Data() const
{
return(((const BYTE*) this) + sizeof(struct tagCOR_ILMETHOD_SECT_SMALL));
}
bool IsSmall() const
{
return (Kind & CorILMethod_Sect_FatFormat) == 0;
}
bool More() const
{
return (Kind & CorILMethod_Sect_MoreSects) != 0;
}
} COR_ILMETHOD_SECT_SMALL;
/************************************/
/* NOTE this structure must be DWORD aligned!! */
typedef struct tagCOR_ILMETHOD_SECT_FAT : IMAGE_COR_ILMETHOD_SECT_FAT {
//Data follows
const BYTE* Data() const
{
return(((const BYTE*) this) + sizeof(struct tagCOR_ILMETHOD_SECT_FAT));
}
//Endian-safe wrappers
unsigned GetKind() const {
/* return Kind; */
return *(BYTE*)this;
}
void SetKind(unsigned kind) {
/* Kind = kind; */
*(BYTE*)this = (BYTE)kind;
}
unsigned GetDataSize() const {
/* return DataSize; */
BYTE* p = (BYTE*)this;
return ((unsigned)*(p+1)) |
(((unsigned)*(p+2)) << 8) |
(((unsigned)*(p+3)) << 16);
}
void SetDataSize(unsigned datasize) {
/* DataSize = dataSize; */
BYTE* p = (BYTE*)this;
*(p+1) = (BYTE)(datasize);
*(p+2) = (BYTE)(datasize >> 8);
*(p+3) = (BYTE)(datasize >> 16);
}
} COR_ILMETHOD_SECT_FAT;
typedef struct tagCOR_ILMETHOD_SECT_EH_CLAUSE_FAT : public IMAGE_COR_ILMETHOD_SECT_EH_CLAUSE_FAT {
//Endian-safe wrappers
CorExceptionFlag GetFlags() const {
return (CorExceptionFlag)VAL32((unsigned)Flags);
}
void SetFlags(CorExceptionFlag flags) {
Flags = (CorExceptionFlag)VAL32((unsigned)flags);
}
DWORD GetTryOffset() const {
return VAL32(TryOffset);
}
void SetTryOffset(DWORD Offset) {
TryOffset = VAL32(Offset);
}
DWORD GetTryLength() const {
return VAL32(TryLength);
}
void SetTryLength(DWORD Length) {
TryLength = VAL32(Length);
}
DWORD GetHandlerOffset() const {
return VAL32(HandlerOffset);
}
void SetHandlerOffset(DWORD Offset) {
HandlerOffset = VAL32(Offset);
}
DWORD GetHandlerLength() const {
return VAL32(HandlerLength);
}
void SetHandlerLength(DWORD Length) {
HandlerLength = VAL32(Length);
}
DWORD GetClassToken() const {
return VAL32(ClassToken);
}
void SetClassToken(DWORD tok) {
ClassToken = VAL32(tok);
}
DWORD GetFilterOffset() const {
return VAL32(FilterOffset);
}
void SetFilterOffset(DWORD offset) {
FilterOffset = VAL32(offset);
}
} COR_ILMETHOD_SECT_EH_CLAUSE_FAT;
//*****************************************************************************
struct COR_ILMETHOD_SECT_EH_FAT : public COR_ILMETHOD_SECT_FAT {
static unsigned Size(unsigned ehCount) {
return (sizeof(COR_ILMETHOD_SECT_EH_FAT) +
sizeof(IMAGE_COR_ILMETHOD_SECT_EH_CLAUSE_FAT) * (ehCount-1));
}
IMAGE_COR_ILMETHOD_SECT_EH_CLAUSE_FAT Clauses[1]; // actually variable size
};
typedef struct tagCOR_ILMETHOD_SECT_EH_CLAUSE_SMALL : public IMAGE_COR_ILMETHOD_SECT_EH_CLAUSE_SMALL {
//Endian-safe wrappers
CorExceptionFlag GetFlags() const {
return (CorExceptionFlag)VAL16((SHORT)Flags);
}
void SetFlags(CorExceptionFlag flags) {
Flags = (CorExceptionFlag)VAL16((SHORT)flags);
}
DWORD GetTryOffset() const {
return VAL16(TryOffset);
}
void SetTryOffset(DWORD Offset) {
_ASSERTE((Offset & ~0xffff) == 0);
TryOffset = VAL16(Offset);
}
DWORD GetTryLength() const {
return TryLength;
}
void SetTryLength(DWORD Length) {
_ASSERTE((Length & ~0xff) == 0);
TryLength = Length;
}
DWORD GetHandlerOffset() const {
return VAL16(HandlerOffset);
}
void SetHandlerOffset(DWORD Offset) {
_ASSERTE((Offset & ~0xffff) == 0);
HandlerOffset = VAL16(Offset);
}
DWORD GetHandlerLength() const {
return HandlerLength;
}
void SetHandlerLength(DWORD Length) {
_ASSERTE((Length & ~0xff) == 0);
HandlerLength = Length;
}
DWORD GetClassToken() const {
return VAL32(ClassToken);
}
void SetClassToken(DWORD tok) {
ClassToken = VAL32(tok);
}
DWORD GetFilterOffset() const {
return VAL32(FilterOffset);
}
void SetFilterOffset(DWORD offset) {
FilterOffset = VAL32(offset);
}
} COR_ILMETHOD_SECT_EH_CLAUSE_SMALL;
//*****************************************************************************
struct COR_ILMETHOD_SECT_EH_SMALL : public COR_ILMETHOD_SECT_SMALL {
static unsigned Size(unsigned ehCount) {
return (sizeof(COR_ILMETHOD_SECT_EH_SMALL) +
sizeof(IMAGE_COR_ILMETHOD_SECT_EH_CLAUSE_SMALL) * (ehCount-1));
}
WORD Reserved; // alignment padding
IMAGE_COR_ILMETHOD_SECT_EH_CLAUSE_SMALL Clauses[1]; // actually variable size
};
/************************************/
/* NOTE this structure must be DWORD aligned!! */
struct COR_ILMETHOD_SECT
{
bool More() const
{
return((AsSmall()->Kind & CorILMethod_Sect_MoreSects) != 0);
}
CorILMethodSect Kind() const
{
return((CorILMethodSect) (AsSmall()->Kind & CorILMethod_Sect_KindMask));
}
const COR_ILMETHOD_SECT* Next() const
{
if (!More()) return(0);
return ((COR_ILMETHOD_SECT*)Align(((BYTE *)this) + DataSize()));
}
const BYTE* Data() const
{
if (IsFat()) return(AsFat()->Data());
return(AsSmall()->Data());
}
unsigned DataSize() const
{
if (Kind() == CorILMethod_Sect_EHTable)
{
// VB and MC++ shipped with bug where they have not accounted for size of COR_ILMETHOD_SECT_EH_XXX
// in DataSize. To avoid breaking these images, we will align the size of EH sections up. This works
// because IMAGE_COR_ILMETHOD_SECT_EH_CLAUSE_XXX is bigger than COR_ILMETHOD_SECT_EH_XXX
// (see VSWhidbey #99031 and related bugs for details).
if (IsFat())
return Fat.Size(Fat.GetDataSize() / sizeof(IMAGE_COR_ILMETHOD_SECT_EH_CLAUSE_FAT));
else
return Small.Size(Small.DataSize / sizeof(IMAGE_COR_ILMETHOD_SECT_EH_CLAUSE_SMALL));
}
else
{
if (IsFat()) return(AsFat()->GetDataSize());
return(AsSmall()->DataSize);
}
}
friend struct COR_ILMETHOD;
friend struct tagCOR_ILMETHOD_FAT;
friend struct tagCOR_ILMETHOD_TINY;
bool IsFat() const
{
return((AsSmall()->Kind & CorILMethod_Sect_FatFormat) != 0);
}
static const void* Align(const void* p)
{
return((void*) ((((UINT_PTR) p) + 3) & ~3));
}
protected:
const COR_ILMETHOD_SECT_FAT* AsFat() const
{
return((COR_ILMETHOD_SECT_FAT*) this);
}
const COR_ILMETHOD_SECT_SMALL* AsSmall() const
{
return((COR_ILMETHOD_SECT_SMALL*) this);
}
public:
// The body is either a COR_ILMETHOD_SECT_SMALL or COR_ILMETHOD_SECT_FAT
// (as indicated by the CorILMethod_Sect_FatFormat bit
union {
COR_ILMETHOD_SECT_EH_SMALL Small;
COR_ILMETHOD_SECT_EH_FAT Fat;
};
};
/***********************************/
// exported functions (implementation in Format\Format.cpp:
extern "C" {
IMAGE_COR_ILMETHOD_SECT_EH_CLAUSE_FAT* __stdcall SectEH_EHClause(void *pSectEH, unsigned idx, IMAGE_COR_ILMETHOD_SECT_EH_CLAUSE_FAT* buff);
// compute the size of the section (best format)
// codeSize is the size of the method
// deprecated
unsigned __stdcall SectEH_SizeWithCode(unsigned ehCount, unsigned codeSize);
// will return worse-case size and then Emit will return actual size
unsigned __stdcall SectEH_SizeWorst(unsigned ehCount);
// will return exact size which will match the size returned by Emit
unsigned __stdcall SectEH_SizeExact(unsigned ehCount, IMAGE_COR_ILMETHOD_SECT_EH_CLAUSE_FAT* clauses);
// emit the section (best format);
unsigned __stdcall SectEH_Emit(unsigned size, unsigned ehCount,
IMAGE_COR_ILMETHOD_SECT_EH_CLAUSE_FAT* clauses,
BOOL moreSections, BYTE* outBuff,
ULONG* ehTypeOffsets = 0);
} // extern "C"
struct COR_ILMETHOD_SECT_EH : public COR_ILMETHOD_SECT
{
unsigned EHCount() const
{
return (unsigned)(IsFat() ? (Fat.GetDataSize() / sizeof(IMAGE_COR_ILMETHOD_SECT_EH_CLAUSE_FAT)) :
(Small.DataSize / sizeof(IMAGE_COR_ILMETHOD_SECT_EH_CLAUSE_SMALL)));
}
// return one clause in its fat form. Use 'buff' if needed
const IMAGE_COR_ILMETHOD_SECT_EH_CLAUSE_FAT* EHClause(unsigned idx, IMAGE_COR_ILMETHOD_SECT_EH_CLAUSE_FAT* buff) const
{
return SectEH_EHClause((void *)this, idx, buff);
};
// compute the size of the section (best format)
// codeSize is the size of the method
// deprecated
unsigned static Size(unsigned ehCount, unsigned codeSize)
{
return SectEH_SizeWithCode(ehCount, codeSize);
};
// will return worse-case size and then Emit will return actual size
unsigned static Size(unsigned ehCount)
{
return SectEH_SizeWorst(ehCount);
};
// will return exact size which will match the size returned by Emit
unsigned static Size(unsigned ehCount, const IMAGE_COR_ILMETHOD_SECT_EH_CLAUSE_FAT* clauses)
{
return SectEH_SizeExact(ehCount, (IMAGE_COR_ILMETHOD_SECT_EH_CLAUSE_FAT*)clauses);
};
// emit the section (best format);
unsigned static Emit(unsigned size, unsigned ehCount,
const IMAGE_COR_ILMETHOD_SECT_EH_CLAUSE_FAT* clauses,
bool moreSections, BYTE* outBuff,
ULONG* ehTypeOffsets = 0)
{
return SectEH_Emit(size, ehCount,
(IMAGE_COR_ILMETHOD_SECT_EH_CLAUSE_FAT*)clauses,
moreSections, outBuff, ehTypeOffsets);
};
};
/***************************************************************************/
/* Used when the method is tiny (< 64 bytes), and there are no local vars */
typedef struct tagCOR_ILMETHOD_TINY : IMAGE_COR_ILMETHOD_TINY
{
bool IsTiny() const
{
return((Flags_CodeSize & (CorILMethod_FormatMask >> 1)) == CorILMethod_TinyFormat);
}
unsigned GetCodeSize() const
{
return(((unsigned) Flags_CodeSize) >> (CorILMethod_FormatShift-1));
}
unsigned GetMaxStack() const
{
return(8);
}
BYTE* GetCode() const
{
return(((BYTE*) this) + sizeof(struct tagCOR_ILMETHOD_TINY));
}
DWORD GetLocalVarSigTok() const
{
return(0);
}
COR_ILMETHOD_SECT* GetSect() const
{
return(0);
}
} COR_ILMETHOD_TINY;
/************************************/
// This strucuture is the 'fat' layout, where no compression is attempted.
// Note that this structure can be added on at the end, thus making it extensible
typedef struct tagCOR_ILMETHOD_FAT : IMAGE_COR_ILMETHOD_FAT
{
//Endian-safe wrappers
unsigned GetSize() const {
/* return Size; */
BYTE* p = (BYTE*)this;
return *(p+1) >> 4;
}
void SetSize(unsigned size) {
/* Size = size; */
BYTE* p = (BYTE*)this;
*(p+1) = (BYTE)((*(p+1) & 0x0F) | (size << 4));
}
unsigned GetFlags() const {
/* return Flags; */
BYTE* p = (BYTE*)this;
return ((unsigned)*(p+0)) | (( ((unsigned)*(p+1)) & 0x0F) << 8);
}
void SetFlags(unsigned flags) {
/* flags = Flags; */
BYTE* p = (BYTE*)this;
*p = (BYTE)flags;
*(p+1) = (BYTE)((*(p+1) & 0xF0) | ((flags >> 8) & 0x0F));
}
bool IsFat() const {
/* return((IMAGE_COR_ILMETHOD_FAT::GetFlags() & CorILMethod_FormatMask) == CorILMethod_FatFormat); */
return (*(BYTE*)this & CorILMethod_FormatMask) == CorILMethod_FatFormat;
}
unsigned GetMaxStack() const {
/* return MaxStack; */
return VAL16(*(USHORT*)((BYTE*)this+2));
}
void SetMaxStack(unsigned maxStack) {
/* MaxStack = maxStack; */
*(USHORT*)((BYTE*)this+2) = VAL16((USHORT)maxStack);
}
unsigned GetCodeSize() const
{
return VAL32(CodeSize);
}
void SetCodeSize(DWORD Size)
{
CodeSize = VAL32(Size);
}
mdToken GetLocalVarSigTok() const
{
return VAL32(LocalVarSigTok);
}
void SetLocalVarSigTok(mdSignature tok)
{
LocalVarSigTok = VAL32(tok);
}
BYTE* GetCode() const {
return(((BYTE*) this) + 4*GetSize());
}
bool More() const {
// return (GetFlags() & CorILMethod_MoreSects) != 0;
return (*(BYTE*)this & CorILMethod_MoreSects) != 0;
}
const COR_ILMETHOD_SECT* GetSect() const {
if (!More()) return (0);
return(((COR_ILMETHOD_SECT*) COR_ILMETHOD_SECT::Align(GetCode() + GetCodeSize())));
}
} COR_ILMETHOD_FAT;
extern "C" {
/************************************/
// exported functions (impl. Format\Format.cpp)
unsigned __stdcall IlmethodSize(COR_ILMETHOD_FAT* header, BOOL MoreSections);
// emit the header (bestFormat) return amount emitted
unsigned __stdcall IlmethodEmit(unsigned size, COR_ILMETHOD_FAT* header,
BOOL moreSections, BYTE* outBuff);
}
struct COR_ILMETHOD
{
// a COR_ILMETHOD header should not be decoded by hand. Instead us
// COR_ILMETHOD_DECODER to decode it.
friend class COR_ILMETHOD_DECODER;
// compute the size of the header (best format)
unsigned static Size(const COR_ILMETHOD_FAT* header, bool MoreSections)
{
return IlmethodSize((COR_ILMETHOD_FAT*)header,MoreSections);
};
// emit the header (bestFormat) return amount emitted
unsigned static Emit(unsigned size, const COR_ILMETHOD_FAT* header,
bool moreSections, BYTE* outBuff)
{
return IlmethodEmit(size, (COR_ILMETHOD_FAT*)header, moreSections, outBuff);
};
//private:
union
{
COR_ILMETHOD_TINY Tiny;
COR_ILMETHOD_FAT Fat;
};
// Code follows the Header, then immedately after the code comes
// any sections (COR_ILMETHOD_SECT).
};
extern "C" {
/***************************************************************************/
/* COR_ILMETHOD_DECODER is the only way functions internal to the EE should
fetch data from a COR_ILMETHOD. This way any dependancy on the file format
(and the multiple ways of encoding the header) is centralized to the
COR_ILMETHOD_DECODER constructor) */
void __stdcall DecoderInit(void * pThis, COR_ILMETHOD* header);
int __stdcall DecoderGetOnDiskSize(void * pThis, COR_ILMETHOD* header);
} // extern "C"
class COR_ILMETHOD_DECODER : public COR_ILMETHOD_FAT
{
public:
// This returns an uninitialized decoder, suitable for placement new but nothing
// else. Use with caution.
COR_ILMETHOD_DECODER() {}
// Typically the ONLY way you should access COR_ILMETHOD is through
// this constructor so format changes are easier.
COR_ILMETHOD_DECODER(const COR_ILMETHOD* header)
{
DecoderInit(this,(COR_ILMETHOD*)header);
};
// The above variant of the constructor can not do a 'complete' job, because
// it can not look up the local variable signature meta-data token.
// This method should be used when you have access to the Meta data API
// If the construction fails, the 'Code' field is set to 0
enum DecoderStatus {SUCCESS, FORMAT_ERROR, VERIFICATION_ERROR};
// If we want the decoder to verify the that local signature is OK we
// will pass a non-NULL value for wbStatus
//
// When using LazyInit we want ask that the local signature be verified
// But if we fail verification we still need access to the 'Code' field
// Because we may be able to demand SkipVerification and thus it was OK
// to have had a verification error.
COR_ILMETHOD_DECODER(COR_ILMETHOD* header,
void *pInternalImport,
DecoderStatus* wbStatus);
unsigned EHCount() const
{
return (EH != 0) ? EH->EHCount() : 0;
}
unsigned GetHeaderSize() const
{
return GetCodeSize() + ((EH != 0) ? EH->DataSize() : 0);
}
// returns total size of method for use in copying
int GetOnDiskSize(const COR_ILMETHOD* header)
{
return DecoderGetOnDiskSize(this,(COR_ILMETHOD*)header);
}
// Flags these are available because we inherit COR_ILMETHOD_FAT
// MaxStack
// CodeSize
const BYTE * Code;
PCCOR_SIGNATURE LocalVarSig; // pointer to signature blob, or 0 if none
DWORD cbLocalVarSig; // size of dignature blob, or 0 if none
const COR_ILMETHOD_SECT_EH * EH; // eh table if any 0 if none
const COR_ILMETHOD_SECT * Sect; // additional sections 0 if none
}; // class COR_ILMETHOD_DECODER
#if defined(CORHLPR_TURNED_FPO_ON)
#pragma optimize("", on) // Go back to command line default optimizations
#undef CORHLPR_TURNED_FPO_ON
#undef FPO_ON
#endif
#endif // __CORHLPR_H__
| // Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
/*****************************************************************************
** **
** Corhlpr.h - **
** **
*****************************************************************************/
#ifndef __CORHLPR_H__
#define __CORHLPR_H__
#if defined(_MSC_VER) && defined(HOST_X86) && !defined(FPO_ON)
#pragma optimize("y", on) // Small critical routines, don't put in EBP frame
#define FPO_ON 1
#define CORHLPR_TURNED_FPO_ON 1
#endif
#include "cor.h"
#include "corhdr.h"
#include "corerror.h"
#include "unreachable.h"
// This header is consumed both within the runtime and externally. In the former
// case we need to wrap memory allocations, in the latter there is no
// infrastructure to support this. Detect which way we're building and provide a
// very simple abstraction layer (handles allocating bytes only).
#ifdef _BLD_CLR
#include "new.hpp"
#define NEW_NOTHROW(_bytes) new (nothrow) BYTE[_bytes]
#define NEW_THROWS(_bytes) new BYTE[_bytes]
void DECLSPEC_NORETURN ThrowOutOfMemory();
inline void DECLSPEC_NORETURN THROW_OUT_OF_MEMORY()
{
ThrowOutOfMemory();
}
#else
#define NEW_NOTHROW(_bytes) new BYTE[_bytes]
#define NEW_THROWS(_bytes) __CorHlprNewThrows(_bytes)
static inline void DECLSPEC_NORETURN __CorHlprThrowOOM()
{
RaiseException(STATUS_NO_MEMORY, 0, 0, NULL);
__UNREACHABLE();
}
static inline BYTE *__CorHlprNewThrows(size_t bytes)
{
BYTE *pbMemory = new BYTE[bytes];
if (pbMemory == NULL)
__CorHlprThrowOOM();
return pbMemory;
}
inline void DECLSPEC_NORETURN THROW_OUT_OF_MEMORY()
{
__CorHlprThrowOOM();
}
#endif
//*****************************************************************************
// There are a set of macros commonly used in the helpers which you will want
// to override to get richer behavior. The following defines what is needed
// if you chose not to do the extra work.
//*****************************************************************************
#ifndef IfFailGoto
#define IfFailGoto(EXPR, LABEL) \
do { hr = (EXPR); if(FAILED(hr)) { goto LABEL; } } while (0)
#endif
#ifndef IfFailGo
#define IfFailGo(EXPR) IfFailGoto(EXPR, ErrExit)
#endif
#ifndef IfFailRet
#define IfFailRet(EXPR) do { hr = (EXPR); if(FAILED(hr)) { return (hr); } } while (0)
#endif
#ifndef IfNullRet
#define IfNullRet(EXPR) do { if ((EXPR) == NULL){ return (E_OUTOFMEMORY); } } while (0)
#endif
#ifndef _ASSERTE
#define _ASSERTE(expr)
#endif
#if !BIGENDIAN
#define VAL16(x) x
#define VAL32(x) x
#endif
//*****************************************************************************
//
//***** Macro to assist with cleaning up local static variables
//
//*****************************************************************************
#define CHECK_LOCAL_STATIC_VAR(x) \
x \
//*****************************************************************************
//
//***** Utility helpers
//
//*****************************************************************************
#define MAX_CLASSNAME_LENGTH 1024
//*****************************************************************************
//
//***** Signature helpers
//
//*****************************************************************************
inline bool isCallConv(unsigned sigByte, CorCallingConvention conv)
{
return ((sigByte & IMAGE_CEE_CS_CALLCONV_MASK) == (unsigned) conv);
}
//*****************************************************************************
//
//***** File format helper classes
//
//*****************************************************************************
//*****************************************************************************
typedef struct tagCOR_ILMETHOD_SECT_SMALL : IMAGE_COR_ILMETHOD_SECT_SMALL {
//Data follows
const BYTE* Data() const
{
return(((const BYTE*) this) + sizeof(struct tagCOR_ILMETHOD_SECT_SMALL));
}
bool IsSmall() const
{
return (Kind & CorILMethod_Sect_FatFormat) == 0;
}
bool More() const
{
return (Kind & CorILMethod_Sect_MoreSects) != 0;
}
} COR_ILMETHOD_SECT_SMALL;
/************************************/
/* NOTE this structure must be DWORD aligned!! */
typedef struct tagCOR_ILMETHOD_SECT_FAT : IMAGE_COR_ILMETHOD_SECT_FAT {
//Data follows
const BYTE* Data() const
{
return(((const BYTE*) this) + sizeof(struct tagCOR_ILMETHOD_SECT_FAT));
}
//Endian-safe wrappers
unsigned GetKind() const {
/* return Kind; */
return *(BYTE*)this;
}
void SetKind(unsigned kind) {
/* Kind = kind; */
*(BYTE*)this = (BYTE)kind;
}
unsigned GetDataSize() const {
/* return DataSize; */
BYTE* p = (BYTE*)this;
return ((unsigned)*(p+1)) |
(((unsigned)*(p+2)) << 8) |
(((unsigned)*(p+3)) << 16);
}
void SetDataSize(unsigned datasize) {
/* DataSize = dataSize; */
BYTE* p = (BYTE*)this;
*(p+1) = (BYTE)(datasize);
*(p+2) = (BYTE)(datasize >> 8);
*(p+3) = (BYTE)(datasize >> 16);
}
} COR_ILMETHOD_SECT_FAT;
typedef struct tagCOR_ILMETHOD_SECT_EH_CLAUSE_FAT : public IMAGE_COR_ILMETHOD_SECT_EH_CLAUSE_FAT {
//Endian-safe wrappers
CorExceptionFlag GetFlags() const {
return (CorExceptionFlag)VAL32((unsigned)Flags);
}
void SetFlags(CorExceptionFlag flags) {
Flags = (CorExceptionFlag)VAL32((unsigned)flags);
}
DWORD GetTryOffset() const {
return VAL32(TryOffset);
}
void SetTryOffset(DWORD Offset) {
TryOffset = VAL32(Offset);
}
DWORD GetTryLength() const {
return VAL32(TryLength);
}
void SetTryLength(DWORD Length) {
TryLength = VAL32(Length);
}
DWORD GetHandlerOffset() const {
return VAL32(HandlerOffset);
}
void SetHandlerOffset(DWORD Offset) {
HandlerOffset = VAL32(Offset);
}
DWORD GetHandlerLength() const {
return VAL32(HandlerLength);
}
void SetHandlerLength(DWORD Length) {
HandlerLength = VAL32(Length);
}
DWORD GetClassToken() const {
return VAL32(ClassToken);
}
void SetClassToken(DWORD tok) {
ClassToken = VAL32(tok);
}
DWORD GetFilterOffset() const {
return VAL32(FilterOffset);
}
void SetFilterOffset(DWORD offset) {
FilterOffset = VAL32(offset);
}
} COR_ILMETHOD_SECT_EH_CLAUSE_FAT;
//*****************************************************************************
struct COR_ILMETHOD_SECT_EH_FAT : public COR_ILMETHOD_SECT_FAT {
static unsigned Size(unsigned ehCount) {
return (sizeof(COR_ILMETHOD_SECT_EH_FAT) +
sizeof(IMAGE_COR_ILMETHOD_SECT_EH_CLAUSE_FAT) * (ehCount-1));
}
IMAGE_COR_ILMETHOD_SECT_EH_CLAUSE_FAT Clauses[1]; // actually variable size
};
typedef struct tagCOR_ILMETHOD_SECT_EH_CLAUSE_SMALL : public IMAGE_COR_ILMETHOD_SECT_EH_CLAUSE_SMALL {
//Endian-safe wrappers
CorExceptionFlag GetFlags() const {
return (CorExceptionFlag)VAL16((SHORT)Flags);
}
void SetFlags(CorExceptionFlag flags) {
Flags = (CorExceptionFlag)VAL16((SHORT)flags);
}
DWORD GetTryOffset() const {
return VAL16(TryOffset);
}
void SetTryOffset(DWORD Offset) {
_ASSERTE((Offset & ~0xffff) == 0);
TryOffset = VAL16(Offset);
}
DWORD GetTryLength() const {
return TryLength;
}
void SetTryLength(DWORD Length) {
_ASSERTE((Length & ~0xff) == 0);
TryLength = Length;
}
DWORD GetHandlerOffset() const {
return VAL16(HandlerOffset);
}
void SetHandlerOffset(DWORD Offset) {
_ASSERTE((Offset & ~0xffff) == 0);
HandlerOffset = VAL16(Offset);
}
DWORD GetHandlerLength() const {
return HandlerLength;
}
void SetHandlerLength(DWORD Length) {
_ASSERTE((Length & ~0xff) == 0);
HandlerLength = Length;
}
DWORD GetClassToken() const {
return VAL32(ClassToken);
}
void SetClassToken(DWORD tok) {
ClassToken = VAL32(tok);
}
DWORD GetFilterOffset() const {
return VAL32(FilterOffset);
}
void SetFilterOffset(DWORD offset) {
FilterOffset = VAL32(offset);
}
} COR_ILMETHOD_SECT_EH_CLAUSE_SMALL;
//*****************************************************************************
struct COR_ILMETHOD_SECT_EH_SMALL : public COR_ILMETHOD_SECT_SMALL {
static unsigned Size(unsigned ehCount) {
return (sizeof(COR_ILMETHOD_SECT_EH_SMALL) +
sizeof(IMAGE_COR_ILMETHOD_SECT_EH_CLAUSE_SMALL) * (ehCount-1));
}
WORD Reserved; // alignment padding
IMAGE_COR_ILMETHOD_SECT_EH_CLAUSE_SMALL Clauses[1]; // actually variable size
};
/************************************/
/* NOTE this structure must be DWORD aligned!! */
struct COR_ILMETHOD_SECT
{
bool More() const
{
return((AsSmall()->Kind & CorILMethod_Sect_MoreSects) != 0);
}
CorILMethodSect Kind() const
{
return((CorILMethodSect) (AsSmall()->Kind & CorILMethod_Sect_KindMask));
}
const COR_ILMETHOD_SECT* Next() const
{
if (!More()) return(0);
return ((COR_ILMETHOD_SECT*)Align(((BYTE *)this) + DataSize()));
}
const BYTE* Data() const
{
if (IsFat()) return(AsFat()->Data());
return(AsSmall()->Data());
}
unsigned DataSize() const
{
if (Kind() == CorILMethod_Sect_EHTable)
{
// VB and MC++ shipped with bug where they have not accounted for size of COR_ILMETHOD_SECT_EH_XXX
// in DataSize. To avoid breaking these images, we will align the size of EH sections up. This works
// because IMAGE_COR_ILMETHOD_SECT_EH_CLAUSE_XXX is bigger than COR_ILMETHOD_SECT_EH_XXX
// (see VSWhidbey #99031 and related bugs for details).
if (IsFat())
return Fat.Size(Fat.GetDataSize() / sizeof(IMAGE_COR_ILMETHOD_SECT_EH_CLAUSE_FAT));
else
return Small.Size(Small.DataSize / sizeof(IMAGE_COR_ILMETHOD_SECT_EH_CLAUSE_SMALL));
}
else
{
if (IsFat()) return(AsFat()->GetDataSize());
return(AsSmall()->DataSize);
}
}
friend struct COR_ILMETHOD;
friend struct tagCOR_ILMETHOD_FAT;
friend struct tagCOR_ILMETHOD_TINY;
bool IsFat() const
{
return((AsSmall()->Kind & CorILMethod_Sect_FatFormat) != 0);
}
static const void* Align(const void* p)
{
return((void*) ((((UINT_PTR) p) + 3) & ~3));
}
protected:
const COR_ILMETHOD_SECT_FAT* AsFat() const
{
return((COR_ILMETHOD_SECT_FAT*) this);
}
const COR_ILMETHOD_SECT_SMALL* AsSmall() const
{
return((COR_ILMETHOD_SECT_SMALL*) this);
}
public:
// The body is either a COR_ILMETHOD_SECT_SMALL or COR_ILMETHOD_SECT_FAT
// (as indicated by the CorILMethod_Sect_FatFormat bit
union {
COR_ILMETHOD_SECT_EH_SMALL Small;
COR_ILMETHOD_SECT_EH_FAT Fat;
};
};
/***********************************/
// exported functions (implementation in Format\Format.cpp:
extern "C" {
IMAGE_COR_ILMETHOD_SECT_EH_CLAUSE_FAT* __stdcall SectEH_EHClause(void *pSectEH, unsigned idx, IMAGE_COR_ILMETHOD_SECT_EH_CLAUSE_FAT* buff);
// compute the size of the section (best format)
// codeSize is the size of the method
// deprecated
unsigned __stdcall SectEH_SizeWithCode(unsigned ehCount, unsigned codeSize);
// will return worse-case size and then Emit will return actual size
unsigned __stdcall SectEH_SizeWorst(unsigned ehCount);
// will return exact size which will match the size returned by Emit
unsigned __stdcall SectEH_SizeExact(unsigned ehCount, IMAGE_COR_ILMETHOD_SECT_EH_CLAUSE_FAT* clauses);
// emit the section (best format);
unsigned __stdcall SectEH_Emit(unsigned size, unsigned ehCount,
IMAGE_COR_ILMETHOD_SECT_EH_CLAUSE_FAT* clauses,
BOOL moreSections, BYTE* outBuff,
ULONG* ehTypeOffsets = 0);
} // extern "C"
struct COR_ILMETHOD_SECT_EH : public COR_ILMETHOD_SECT
{
unsigned EHCount() const
{
return (unsigned)(IsFat() ? (Fat.GetDataSize() / sizeof(IMAGE_COR_ILMETHOD_SECT_EH_CLAUSE_FAT)) :
(Small.DataSize / sizeof(IMAGE_COR_ILMETHOD_SECT_EH_CLAUSE_SMALL)));
}
// return one clause in its fat form. Use 'buff' if needed
const IMAGE_COR_ILMETHOD_SECT_EH_CLAUSE_FAT* EHClause(unsigned idx, IMAGE_COR_ILMETHOD_SECT_EH_CLAUSE_FAT* buff) const
{
return SectEH_EHClause((void *)this, idx, buff);
};
// compute the size of the section (best format)
// codeSize is the size of the method
// deprecated
unsigned static Size(unsigned ehCount, unsigned codeSize)
{
return SectEH_SizeWithCode(ehCount, codeSize);
};
// will return worse-case size and then Emit will return actual size
unsigned static Size(unsigned ehCount)
{
return SectEH_SizeWorst(ehCount);
};
// will return exact size which will match the size returned by Emit
unsigned static Size(unsigned ehCount, const IMAGE_COR_ILMETHOD_SECT_EH_CLAUSE_FAT* clauses)
{
return SectEH_SizeExact(ehCount, (IMAGE_COR_ILMETHOD_SECT_EH_CLAUSE_FAT*)clauses);
};
// emit the section (best format);
unsigned static Emit(unsigned size, unsigned ehCount,
const IMAGE_COR_ILMETHOD_SECT_EH_CLAUSE_FAT* clauses,
bool moreSections, BYTE* outBuff,
ULONG* ehTypeOffsets = 0)
{
return SectEH_Emit(size, ehCount,
(IMAGE_COR_ILMETHOD_SECT_EH_CLAUSE_FAT*)clauses,
moreSections, outBuff, ehTypeOffsets);
};
};
/***************************************************************************/
/* Used when the method is tiny (< 64 bytes), and there are no local vars */
typedef struct tagCOR_ILMETHOD_TINY : IMAGE_COR_ILMETHOD_TINY
{
bool IsTiny() const
{
return((Flags_CodeSize & (CorILMethod_FormatMask >> 1)) == CorILMethod_TinyFormat);
}
unsigned GetCodeSize() const
{
return(((unsigned) Flags_CodeSize) >> (CorILMethod_FormatShift-1));
}
unsigned GetMaxStack() const
{
return(8);
}
BYTE* GetCode() const
{
return(((BYTE*) this) + sizeof(struct tagCOR_ILMETHOD_TINY));
}
DWORD GetLocalVarSigTok() const
{
return(0);
}
COR_ILMETHOD_SECT* GetSect() const
{
return(0);
}
} COR_ILMETHOD_TINY;
/************************************/
// This strucuture is the 'fat' layout, where no compression is attempted.
// Note that this structure can be added on at the end, thus making it extensible
typedef struct tagCOR_ILMETHOD_FAT : IMAGE_COR_ILMETHOD_FAT
{
//Endian-safe wrappers
unsigned GetSize() const {
/* return Size; */
BYTE* p = (BYTE*)this;
return *(p+1) >> 4;
}
void SetSize(unsigned size) {
/* Size = size; */
BYTE* p = (BYTE*)this;
*(p+1) = (BYTE)((*(p+1) & 0x0F) | (size << 4));
}
unsigned GetFlags() const {
/* return Flags; */
BYTE* p = (BYTE*)this;
return ((unsigned)*(p+0)) | (( ((unsigned)*(p+1)) & 0x0F) << 8);
}
void SetFlags(unsigned flags) {
/* flags = Flags; */
BYTE* p = (BYTE*)this;
*p = (BYTE)flags;
*(p+1) = (BYTE)((*(p+1) & 0xF0) | ((flags >> 8) & 0x0F));
}
bool IsFat() const {
/* return((IMAGE_COR_ILMETHOD_FAT::GetFlags() & CorILMethod_FormatMask) == CorILMethod_FatFormat); */
return (*(BYTE*)this & CorILMethod_FormatMask) == CorILMethod_FatFormat;
}
unsigned GetMaxStack() const {
/* return MaxStack; */
return VAL16(*(USHORT*)((BYTE*)this+2));
}
void SetMaxStack(unsigned maxStack) {
/* MaxStack = maxStack; */
*(USHORT*)((BYTE*)this+2) = VAL16((USHORT)maxStack);
}
unsigned GetCodeSize() const
{
return VAL32(CodeSize);
}
void SetCodeSize(DWORD Size)
{
CodeSize = VAL32(Size);
}
mdToken GetLocalVarSigTok() const
{
return VAL32(LocalVarSigTok);
}
void SetLocalVarSigTok(mdSignature tok)
{
LocalVarSigTok = VAL32(tok);
}
BYTE* GetCode() const {
return(((BYTE*) this) + 4*GetSize());
}
bool More() const {
// return (GetFlags() & CorILMethod_MoreSects) != 0;
return (*(BYTE*)this & CorILMethod_MoreSects) != 0;
}
const COR_ILMETHOD_SECT* GetSect() const {
if (!More()) return (0);
return(((COR_ILMETHOD_SECT*) COR_ILMETHOD_SECT::Align(GetCode() + GetCodeSize())));
}
} COR_ILMETHOD_FAT;
extern "C" {
/************************************/
// exported functions (impl. Format\Format.cpp)
unsigned __stdcall IlmethodSize(COR_ILMETHOD_FAT* header, BOOL MoreSections);
// emit the header (bestFormat) return amount emitted
unsigned __stdcall IlmethodEmit(unsigned size, COR_ILMETHOD_FAT* header,
BOOL moreSections, BYTE* outBuff);
}
struct COR_ILMETHOD
{
// a COR_ILMETHOD header should not be decoded by hand. Instead us
// COR_ILMETHOD_DECODER to decode it.
friend class COR_ILMETHOD_DECODER;
// compute the size of the header (best format)
unsigned static Size(const COR_ILMETHOD_FAT* header, bool MoreSections)
{
return IlmethodSize((COR_ILMETHOD_FAT*)header,MoreSections);
};
// emit the header (bestFormat) return amount emitted
unsigned static Emit(unsigned size, const COR_ILMETHOD_FAT* header,
bool moreSections, BYTE* outBuff)
{
return IlmethodEmit(size, (COR_ILMETHOD_FAT*)header, moreSections, outBuff);
};
//private:
union
{
COR_ILMETHOD_TINY Tiny;
COR_ILMETHOD_FAT Fat;
};
// Code follows the Header, then immedately after the code comes
// any sections (COR_ILMETHOD_SECT).
};
extern "C" {
/***************************************************************************/
/* COR_ILMETHOD_DECODER is the only way functions internal to the EE should
fetch data from a COR_ILMETHOD. This way any dependancy on the file format
(and the multiple ways of encoding the header) is centralized to the
COR_ILMETHOD_DECODER constructor) */
void __stdcall DecoderInit(void * pThis, COR_ILMETHOD* header);
int __stdcall DecoderGetOnDiskSize(void * pThis, COR_ILMETHOD* header);
} // extern "C"
class COR_ILMETHOD_DECODER : public COR_ILMETHOD_FAT
{
public:
// This returns an uninitialized decoder, suitable for placement new but nothing
// else. Use with caution.
COR_ILMETHOD_DECODER() {}
// Typically the ONLY way you should access COR_ILMETHOD is through
// this constructor so format changes are easier.
COR_ILMETHOD_DECODER(const COR_ILMETHOD* header)
{
DecoderInit(this,(COR_ILMETHOD*)header);
};
// The above variant of the constructor can not do a 'complete' job, because
// it can not look up the local variable signature meta-data token.
// This method should be used when you have access to the Meta data API
// If the construction fails, the 'Code' field is set to 0
enum DecoderStatus {SUCCESS, FORMAT_ERROR, VERIFICATION_ERROR};
// If we want the decoder to verify the that local signature is OK we
// will pass a non-NULL value for wbStatus
//
// When using LazyInit we want ask that the local signature be verified
// But if we fail verification we still need access to the 'Code' field
// Because we may be able to demand SkipVerification and thus it was OK
// to have had a verification error.
COR_ILMETHOD_DECODER(COR_ILMETHOD* header,
void *pInternalImport,
DecoderStatus* wbStatus);
unsigned EHCount() const
{
return (EH != 0) ? EH->EHCount() : 0;
}
unsigned GetHeaderSize() const
{
return GetCodeSize() + ((EH != 0) ? EH->DataSize() : 0);
}
// returns total size of method for use in copying
int GetOnDiskSize(const COR_ILMETHOD* header)
{
return DecoderGetOnDiskSize(this,(COR_ILMETHOD*)header);
}
// Flags these are available because we inherit COR_ILMETHOD_FAT
// MaxStack
// CodeSize
const BYTE * Code;
PCCOR_SIGNATURE LocalVarSig; // pointer to signature blob, or 0 if none
DWORD cbLocalVarSig; // size of dignature blob, or 0 if none
const COR_ILMETHOD_SECT_EH * EH; // eh table if any 0 if none
const COR_ILMETHOD_SECT * Sect; // additional sections 0 if none
}; // class COR_ILMETHOD_DECODER
#if defined(CORHLPR_TURNED_FPO_ON)
#pragma optimize("", on) // Go back to command line default optimizations
#undef CORHLPR_TURNED_FPO_ON
#undef FPO_ON
#endif
#endif // __CORHLPR_H__
| -1 |
dotnet/runtime | 66,251 | Do not generate `TYP_STRUCT` `LCL_FLD` nodes | Today, the morpher can fold `IND(struct ADDR(LCL))` into `LCL_FLD` `TYP_STRUCT` layout-less nodes, these end up being locations, always (well -- there is one case where that's not true, such a node can end up under a return of a small struct later to be retyped by lowering, but this is rare).
These will be a significant impediment to actual, proper `TYP_STRUCT` local field nodes, as it would mean handling the case, everywhere in code, where the layout would be missing, that we don't want to allow in the first place.
This change fixed that by introducing the folding that the creation of these location nodes achieves at the address level, where we can choose the type of the location node arbitrarily and freely.
Along the way, `IsLocalAddrExpr` was fixed to not forget to look at zero-offset sequences attached to `ADDR`s, to avoid regressions.
We are expecting some positive diffs here, from the `IsLocalAddrExpr` change (enabling more precise VNs) and the folding change itself (the old code path would, ever so rarely, miss out on some things).
We are also expecting one small regression due to call retyping of reg-sized `IND struct(ADDR(LCL))` nodes losing the field sequence. I did not fix it because a) it was small, and rare, b) I do not want to permeate the zero-offset code any more than is strictly required.
[Diffs](https://dev.azure.com/dnceng/public/_build/results?buildId=1653792&view=results). | SingleAccretion | 2022-03-05T21:01:45Z | 2022-03-30T04:48:39Z | cbf3f9c43649035f59e626792f82e7b579c44865 | 2453f16807b85b279efc26d17d6f20de87801c09 | Do not generate `TYP_STRUCT` `LCL_FLD` nodes. Today, the morpher can fold `IND(struct ADDR(LCL))` into `LCL_FLD` `TYP_STRUCT` layout-less nodes, these end up being locations, always (well -- there is one case where that's not true, such a node can end up under a return of a small struct later to be retyped by lowering, but this is rare).
These will be a significant impediment to actual, proper `TYP_STRUCT` local field nodes, as it would mean handling the case, everywhere in code, where the layout would be missing, that we don't want to allow in the first place.
This change fixed that by introducing the folding that the creation of these location nodes achieves at the address level, where we can choose the type of the location node arbitrarily and freely.
Along the way, `IsLocalAddrExpr` was fixed to not forget to look at zero-offset sequences attached to `ADDR`s, to avoid regressions.
We are expecting some positive diffs here, from the `IsLocalAddrExpr` change (enabling more precise VNs) and the folding change itself (the old code path would, ever so rarely, miss out on some things).
We are also expecting one small regression due to call retyping of reg-sized `IND struct(ADDR(LCL))` nodes losing the field sequence. I did not fix it because a) it was small, and rare, b) I do not want to permeate the zero-offset code any more than is strictly required.
[Diffs](https://dev.azure.com/dnceng/public/_build/results?buildId=1653792&view=results). | ./src/coreclr/pal/src/include/pal/numa.h | // Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
/*++
Module Name:
include/pal/numa.h
Abstract:
Header file for the NUMA functions.
--*/
#ifndef _PAL_NUMA_H_
#define _PAL_NUMA_H_
#ifdef __cplusplus
extern "C"
{
#endif // __cplusplus
BOOL
NUMASupportInitialize();
VOID
NUMASupportCleanup();
#ifdef __cplusplus
}
#endif // __cplusplus
#endif /* _PAL_CRITSECT_H_ */
| // Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
/*++
Module Name:
include/pal/numa.h
Abstract:
Header file for the NUMA functions.
--*/
#ifndef _PAL_NUMA_H_
#define _PAL_NUMA_H_
#ifdef __cplusplus
extern "C"
{
#endif // __cplusplus
BOOL
NUMASupportInitialize();
VOID
NUMASupportCleanup();
#ifdef __cplusplus
}
#endif // __cplusplus
#endif /* _PAL_CRITSECT_H_ */
| -1 |
dotnet/runtime | 66,251 | Do not generate `TYP_STRUCT` `LCL_FLD` nodes | Today, the morpher can fold `IND(struct ADDR(LCL))` into `LCL_FLD` `TYP_STRUCT` layout-less nodes, these end up being locations, always (well -- there is one case where that's not true, such a node can end up under a return of a small struct later to be retyped by lowering, but this is rare).
These will be a significant impediment to actual, proper `TYP_STRUCT` local field nodes, as it would mean handling the case, everywhere in code, where the layout would be missing, that we don't want to allow in the first place.
This change fixed that by introducing the folding that the creation of these location nodes achieves at the address level, where we can choose the type of the location node arbitrarily and freely.
Along the way, `IsLocalAddrExpr` was fixed to not forget to look at zero-offset sequences attached to `ADDR`s, to avoid regressions.
We are expecting some positive diffs here, from the `IsLocalAddrExpr` change (enabling more precise VNs) and the folding change itself (the old code path would, ever so rarely, miss out on some things).
We are also expecting one small regression due to call retyping of reg-sized `IND struct(ADDR(LCL))` nodes losing the field sequence. I did not fix it because a) it was small, and rare, b) I do not want to permeate the zero-offset code any more than is strictly required.
[Diffs](https://dev.azure.com/dnceng/public/_build/results?buildId=1653792&view=results). | SingleAccretion | 2022-03-05T21:01:45Z | 2022-03-30T04:48:39Z | cbf3f9c43649035f59e626792f82e7b579c44865 | 2453f16807b85b279efc26d17d6f20de87801c09 | Do not generate `TYP_STRUCT` `LCL_FLD` nodes. Today, the morpher can fold `IND(struct ADDR(LCL))` into `LCL_FLD` `TYP_STRUCT` layout-less nodes, these end up being locations, always (well -- there is one case where that's not true, such a node can end up under a return of a small struct later to be retyped by lowering, but this is rare).
These will be a significant impediment to actual, proper `TYP_STRUCT` local field nodes, as it would mean handling the case, everywhere in code, where the layout would be missing, that we don't want to allow in the first place.
This change fixed that by introducing the folding that the creation of these location nodes achieves at the address level, where we can choose the type of the location node arbitrarily and freely.
Along the way, `IsLocalAddrExpr` was fixed to not forget to look at zero-offset sequences attached to `ADDR`s, to avoid regressions.
We are expecting some positive diffs here, from the `IsLocalAddrExpr` change (enabling more precise VNs) and the folding change itself (the old code path would, ever so rarely, miss out on some things).
We are also expecting one small regression due to call retyping of reg-sized `IND struct(ADDR(LCL))` nodes losing the field sequence. I did not fix it because a) it was small, and rare, b) I do not want to permeate the zero-offset code any more than is strictly required.
[Diffs](https://dev.azure.com/dnceng/public/_build/results?buildId=1653792&view=results). | ./src/tests/Interop/PInvoke/Generics/GenericsNative.Vector256B.cpp | // Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
#include <stdio.h>
#include <stdint.h>
#include <xplatform.h>
#include <platformdefines.h>
#if defined(TARGET_XARCH)
#include <immintrin.h>
#elif defined(TARGET_ARMARCH)
// Intentionally empty
#else
#error Unsupported target architecture
#endif
#if defined(TARGET_XARCH)
typedef __m256i Vector256B;
#else
typedef struct {
bool e00;
bool e01;
bool e02;
bool e03;
bool e04;
bool e05;
bool e06;
bool e07;
bool e08;
bool e09;
bool e10;
bool e11;
bool e12;
bool e13;
bool e14;
bool e15;
bool e16;
bool e17;
bool e18;
bool e19;
bool e20;
bool e21;
bool e22;
bool e23;
bool e24;
bool e25;
bool e26;
bool e27;
bool e28;
bool e29;
bool e30;
bool e31;
} Vector256B;
#endif
static Vector256B Vector256BValue = { };
extern "C" DLL_EXPORT Vector256B STDMETHODCALLTYPE ENABLE_AVX GetVector256B(bool e00, bool e01, bool e02, bool e03, bool e04, bool e05, bool e06, bool e07, bool e08, bool e09, bool e10, bool e11, bool e12, bool e13, bool e14, bool e15, bool e16, bool e17, bool e18, bool e19, bool e20, bool e21, bool e22, bool e23, bool e24, bool e25, bool e26, bool e27, bool e28, bool e29, bool e30, bool e31)
{
union {
bool value[32];
Vector256B result;
};
value[0] = e00;
value[1] = e01;
value[2] = e02;
value[3] = e03;
value[4] = e04;
value[5] = e05;
value[6] = e06;
value[7] = e07;
value[8] = e08;
value[9] = e09;
value[10] = e10;
value[11] = e11;
value[12] = e12;
value[13] = e13;
value[14] = e14;
value[15] = e15;
value[16] = e16;
value[17] = e17;
value[18] = e18;
value[19] = e19;
value[20] = e20;
value[21] = e21;
value[22] = e22;
value[23] = e23;
value[24] = e24;
value[25] = e25;
value[26] = e26;
value[27] = e27;
value[28] = e28;
value[29] = e29;
value[30] = e30;
value[31] = e31;
return result;
}
extern "C" DLL_EXPORT void STDMETHODCALLTYPE ENABLE_AVX GetVector256BOut(bool e00, bool e01, bool e02, bool e03, bool e04, bool e05, bool e06, bool e07, bool e08, bool e09, bool e10, bool e11, bool e12, bool e13, bool e14, bool e15, bool e16, bool e17, bool e18, bool e19, bool e20, bool e21, bool e22, bool e23, bool e24, bool e25, bool e26, bool e27, bool e28, bool e29, bool e30, bool e31, Vector256B* pValue)
{
Vector256B value = GetVector256B(e00, e01, e02, e03, e04, e05, e06, e07, e08, e09, e10, e11, e12, e13, e14, e15, e16, e17, e18, e19, e20, e21, e22, e23, e24, e25, e26, e27, e28, e29, e30, e31);
#if defined(TARGET_XARCH)
_mm_storeu_si128(((__m128i*)pValue) + 0, *(((__m128i*)&value) + 0));
_mm_storeu_si128(((__m128i*)pValue) + 1, *(((__m128i*)&value) + 1));
#else
*pValue = value;
#endif
}
extern "C" DLL_EXPORT const Vector256B* STDMETHODCALLTYPE ENABLE_AVX GetVector256BPtr(bool e00, bool e01, bool e02, bool e03, bool e04, bool e05, bool e06, bool e07, bool e08, bool e09, bool e10, bool e11, bool e12, bool e13, bool e14, bool e15, bool e16, bool e17, bool e18, bool e19, bool e20, bool e21, bool e22, bool e23, bool e24, bool e25, bool e26, bool e27, bool e28, bool e29, bool e30, bool e31)
{
GetVector256BOut(e00, e01, e02, e03, e04, e05, e06, e07, e08, e09, e10, e11, e12, e13, e14, e15, e16, e17, e18, e19, e20, e21, e22, e23, e24, e25, e26, e27, e28, e29, e30, e31, &Vector256BValue);
return &Vector256BValue;
}
extern "C" DLL_EXPORT Vector256B STDMETHODCALLTYPE ENABLE_AVX AddVector256B(Vector256B lhs, Vector256B rhs)
{
throw "P/Invoke for Vector256<bool> should be unsupported.";
}
extern "C" DLL_EXPORT Vector256B STDMETHODCALLTYPE ENABLE_AVX AddVector256Bs(const Vector256B* pValues, uint32_t count)
{
throw "P/Invoke for Vector256<bool> should be unsupported.";
}
| // Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
#include <stdio.h>
#include <stdint.h>
#include <xplatform.h>
#include <platformdefines.h>
#if defined(TARGET_XARCH)
#include <immintrin.h>
#elif defined(TARGET_ARMARCH)
// Intentionally empty
#else
#error Unsupported target architecture
#endif
#if defined(TARGET_XARCH)
typedef __m256i Vector256B;
#else
typedef struct {
bool e00;
bool e01;
bool e02;
bool e03;
bool e04;
bool e05;
bool e06;
bool e07;
bool e08;
bool e09;
bool e10;
bool e11;
bool e12;
bool e13;
bool e14;
bool e15;
bool e16;
bool e17;
bool e18;
bool e19;
bool e20;
bool e21;
bool e22;
bool e23;
bool e24;
bool e25;
bool e26;
bool e27;
bool e28;
bool e29;
bool e30;
bool e31;
} Vector256B;
#endif
static Vector256B Vector256BValue = { };
extern "C" DLL_EXPORT Vector256B STDMETHODCALLTYPE ENABLE_AVX GetVector256B(bool e00, bool e01, bool e02, bool e03, bool e04, bool e05, bool e06, bool e07, bool e08, bool e09, bool e10, bool e11, bool e12, bool e13, bool e14, bool e15, bool e16, bool e17, bool e18, bool e19, bool e20, bool e21, bool e22, bool e23, bool e24, bool e25, bool e26, bool e27, bool e28, bool e29, bool e30, bool e31)
{
union {
bool value[32];
Vector256B result;
};
value[0] = e00;
value[1] = e01;
value[2] = e02;
value[3] = e03;
value[4] = e04;
value[5] = e05;
value[6] = e06;
value[7] = e07;
value[8] = e08;
value[9] = e09;
value[10] = e10;
value[11] = e11;
value[12] = e12;
value[13] = e13;
value[14] = e14;
value[15] = e15;
value[16] = e16;
value[17] = e17;
value[18] = e18;
value[19] = e19;
value[20] = e20;
value[21] = e21;
value[22] = e22;
value[23] = e23;
value[24] = e24;
value[25] = e25;
value[26] = e26;
value[27] = e27;
value[28] = e28;
value[29] = e29;
value[30] = e30;
value[31] = e31;
return result;
}
extern "C" DLL_EXPORT void STDMETHODCALLTYPE ENABLE_AVX GetVector256BOut(bool e00, bool e01, bool e02, bool e03, bool e04, bool e05, bool e06, bool e07, bool e08, bool e09, bool e10, bool e11, bool e12, bool e13, bool e14, bool e15, bool e16, bool e17, bool e18, bool e19, bool e20, bool e21, bool e22, bool e23, bool e24, bool e25, bool e26, bool e27, bool e28, bool e29, bool e30, bool e31, Vector256B* pValue)
{
Vector256B value = GetVector256B(e00, e01, e02, e03, e04, e05, e06, e07, e08, e09, e10, e11, e12, e13, e14, e15, e16, e17, e18, e19, e20, e21, e22, e23, e24, e25, e26, e27, e28, e29, e30, e31);
#if defined(TARGET_XARCH)
_mm_storeu_si128(((__m128i*)pValue) + 0, *(((__m128i*)&value) + 0));
_mm_storeu_si128(((__m128i*)pValue) + 1, *(((__m128i*)&value) + 1));
#else
*pValue = value;
#endif
}
extern "C" DLL_EXPORT const Vector256B* STDMETHODCALLTYPE ENABLE_AVX GetVector256BPtr(bool e00, bool e01, bool e02, bool e03, bool e04, bool e05, bool e06, bool e07, bool e08, bool e09, bool e10, bool e11, bool e12, bool e13, bool e14, bool e15, bool e16, bool e17, bool e18, bool e19, bool e20, bool e21, bool e22, bool e23, bool e24, bool e25, bool e26, bool e27, bool e28, bool e29, bool e30, bool e31)
{
GetVector256BOut(e00, e01, e02, e03, e04, e05, e06, e07, e08, e09, e10, e11, e12, e13, e14, e15, e16, e17, e18, e19, e20, e21, e22, e23, e24, e25, e26, e27, e28, e29, e30, e31, &Vector256BValue);
return &Vector256BValue;
}
extern "C" DLL_EXPORT Vector256B STDMETHODCALLTYPE ENABLE_AVX AddVector256B(Vector256B lhs, Vector256B rhs)
{
throw "P/Invoke for Vector256<bool> should be unsupported.";
}
extern "C" DLL_EXPORT Vector256B STDMETHODCALLTYPE ENABLE_AVX AddVector256Bs(const Vector256B* pValues, uint32_t count)
{
throw "P/Invoke for Vector256<bool> should be unsupported.";
}
| -1 |
dotnet/runtime | 66,251 | Do not generate `TYP_STRUCT` `LCL_FLD` nodes | Today, the morpher can fold `IND(struct ADDR(LCL))` into `LCL_FLD` `TYP_STRUCT` layout-less nodes, these end up being locations, always (well -- there is one case where that's not true, such a node can end up under a return of a small struct later to be retyped by lowering, but this is rare).
These will be a significant impediment to actual, proper `TYP_STRUCT` local field nodes, as it would mean handling the case, everywhere in code, where the layout would be missing, that we don't want to allow in the first place.
This change fixed that by introducing the folding that the creation of these location nodes achieves at the address level, where we can choose the type of the location node arbitrarily and freely.
Along the way, `IsLocalAddrExpr` was fixed to not forget to look at zero-offset sequences attached to `ADDR`s, to avoid regressions.
We are expecting some positive diffs here, from the `IsLocalAddrExpr` change (enabling more precise VNs) and the folding change itself (the old code path would, ever so rarely, miss out on some things).
We are also expecting one small regression due to call retyping of reg-sized `IND struct(ADDR(LCL))` nodes losing the field sequence. I did not fix it because a) it was small, and rare, b) I do not want to permeate the zero-offset code any more than is strictly required.
[Diffs](https://dev.azure.com/dnceng/public/_build/results?buildId=1653792&view=results). | SingleAccretion | 2022-03-05T21:01:45Z | 2022-03-30T04:48:39Z | cbf3f9c43649035f59e626792f82e7b579c44865 | 2453f16807b85b279efc26d17d6f20de87801c09 | Do not generate `TYP_STRUCT` `LCL_FLD` nodes. Today, the morpher can fold `IND(struct ADDR(LCL))` into `LCL_FLD` `TYP_STRUCT` layout-less nodes, these end up being locations, always (well -- there is one case where that's not true, such a node can end up under a return of a small struct later to be retyped by lowering, but this is rare).
These will be a significant impediment to actual, proper `TYP_STRUCT` local field nodes, as it would mean handling the case, everywhere in code, where the layout would be missing, that we don't want to allow in the first place.
This change fixed that by introducing the folding that the creation of these location nodes achieves at the address level, where we can choose the type of the location node arbitrarily and freely.
Along the way, `IsLocalAddrExpr` was fixed to not forget to look at zero-offset sequences attached to `ADDR`s, to avoid regressions.
We are expecting some positive diffs here, from the `IsLocalAddrExpr` change (enabling more precise VNs) and the folding change itself (the old code path would, ever so rarely, miss out on some things).
We are also expecting one small regression due to call retyping of reg-sized `IND struct(ADDR(LCL))` nodes losing the field sequence. I did not fix it because a) it was small, and rare, b) I do not want to permeate the zero-offset code any more than is strictly required.
[Diffs](https://dev.azure.com/dnceng/public/_build/results?buildId=1653792&view=results). | ./src/coreclr/vm/gccover.h | // Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
#ifndef __GCCOVER_H__
#define __GCCOVER_H__
#ifdef HAVE_GCCOVER
/****************************************************************************/
/* GCCOverageInfo holds the state of which instructions have been visited by
a GC and which ones have not */
#ifdef _MSC_VER
#pragma warning(push)
#pragma warning(disable : 4200 ) // zero-sized array
#endif // _MSC_VER
class GCCoverageInfo {
public:
IJitManager::MethodRegionInfo methodRegion;
BYTE* curInstr; // The last instruction that was able to execute
// Following 6 variables are for prolog / epilog walking coverage
ICodeManager* codeMan; // CodeMan for this method
GCInfoToken gcInfoToken; // gcInfo for this method
Thread* callerThread; // Thread associated with context callerRegs
T_CONTEXT callerRegs; // register state when method was entered
unsigned gcCount; // GC count at the time we caputured the regs
bool doingEpilogChecks; // are we doing epilog unwind checks? (do we care about callerRegs?)
enum { hasExecutedSize = 4 };
unsigned hasExecuted[hasExecutedSize];
unsigned totalCount;
union
{
BYTE savedCode[0]; // really variable sized
// Note that DAC doesn't marshal the entire byte array automatically.
// Any client of this field needs to get the TADDR of this field and
// marshal over the bytes properly.
};
// Sloppy bitsets (will wrap, and not threadsafe) but best effort is OK
// since we just need half decent coverage.
BOOL IsBitSetForOffset(unsigned offset) {
unsigned dword = hasExecuted[(offset >> 5) % hasExecutedSize];
return(dword & (1 << (offset & 0x1F)));
}
void SetBitForOffset(unsigned offset) {
unsigned* dword = &hasExecuted[(offset >> 5) % hasExecutedSize];
*dword |= (1 << (offset & 0x1F)) ;
}
void SprinkleBreakpoints(BYTE * saveAddr, PCODE codeStart, size_t codeSize, size_t regionOffsetAdj, BOOL fZapped);
};
typedef DPTR(GCCoverageInfo) PTR_GCCoverageInfo; // see code:GCCoverageInfo::savedCode
#ifdef _MSC_VER
#pragma warning(pop)
#endif // _MSC_VER
#if defined(TARGET_X86) || defined(TARGET_AMD64)
#define INTERRUPT_INSTR 0xF4 // X86 HLT instruction (any 1 byte illegal instruction will do)
#define INTERRUPT_INSTR_CALL 0xFA // X86 CLI instruction
#define INTERRUPT_INSTR_PROTECT_FIRST_RET 0xFB // X86 STI instruction, protect the first return register
#define INTERRUPT_INSTR_PROTECT_SECOND_RET 0xEC // X86 IN instruction, protect the second return register
#define INTERRUPT_INSTR_PROTECT_BOTH_RET 0xED // X86 IN instruction, protect both return registers
#elif defined(TARGET_ARM)
// 16-bit illegal instructions which will cause exception and cause
// control to go to GcStress codepath
#define INTERRUPT_INSTR 0xde00
#define INTERRUPT_INSTR_CALL 0xde03 // 0xde01 generates SIGTRAP (breakpoint) instead of SIGILL on Unix
#define INTERRUPT_INSTR_PROTECT_RET 0xde02
// 32-bit illegal instructions. It is necessary to replace a 16-bit instruction
// with a 16-bit illegal instruction, and a 32-bit instruction with a 32-bit
// illegal instruction, to make GC stress with the "IT" instruction work, since
// it counts the number of instructions that follow it, so we can't change that
// number by replacing a 32-bit instruction with a 16-bit illegal instruction
// followed by 16 bits of junk that might end up being a legal instruction.
// Use the "Permanently UNDEFINED" section in the "ARM Architecture Reference Manual",
// section A6.3.4 "Branches and miscellaneous control" table.
// Note that we write these as a single 32-bit write, not two 16-bit writes, so the values
// need to be arranged as the ARM decoder wants them, with the high-order halfword first
// (in little-endian order).
#define INTERRUPT_INSTR_32 0xa001f7f0 // 0xf7f0a001
#define INTERRUPT_INSTR_CALL_32 0xa002f7f0 // 0xf7f0a002
#define INTERRUPT_INSTR_PROTECT_RET_32 0xa003f7f0 // 0xf7f0a003
#elif defined(TARGET_ARM64)
// The following encodings are undefined. They fall into section C4.5.8 - Data processing (2 source) of
// "Arm Architecture Reference Manual ARMv8"
//
#define INTERRUPT_INSTR 0xBADC0DE0
#define INTERRUPT_INSTR_CALL 0xBADC0DE1
#define INTERRUPT_INSTR_PROTECT_RET 0xBADC0DE2
#endif // _TARGET_*
// The body of this method is in this header file to allow
// mscordaccore.dll to link without getting an unsat symbol
//
inline bool IsGcCoverageInterruptInstructionVal(UINT32 instrVal)
{
#if defined(TARGET_ARM64)
switch (instrVal)
{
case INTERRUPT_INSTR:
case INTERRUPT_INSTR_CALL:
case INTERRUPT_INSTR_PROTECT_RET:
return true;
default:
return false;
}
#elif defined(TARGET_ARM)
UINT16 instrVal16 = static_cast<UINT16>(instrVal);
size_t instrLen = GetARMInstructionLength(instrVal16);
if (instrLen == 2)
{
switch (instrVal16)
{
case INTERRUPT_INSTR:
case INTERRUPT_INSTR_CALL:
case INTERRUPT_INSTR_PROTECT_RET:
return true;
default:
return false;
}
}
else
{
_ASSERTE(instrLen == 4);
switch (instrVal)
{
case INTERRUPT_INSTR_32:
case INTERRUPT_INSTR_CALL_32:
case INTERRUPT_INSTR_PROTECT_RET_32:
return true;
default:
return false;
}
}
#else // x64 and x86
switch (instrVal)
{
case INTERRUPT_INSTR:
case INTERRUPT_INSTR_CALL:
case INTERRUPT_INSTR_PROTECT_FIRST_RET:
case INTERRUPT_INSTR_PROTECT_SECOND_RET:
case INTERRUPT_INSTR_PROTECT_BOTH_RET:
return true;
default:
return false;
}
#endif // _TARGET_XXXX_
}
bool IsGcCoverageInterruptInstruction(PBYTE instrPtr);
bool IsGcCoverageInterrupt(LPVOID ip);
#endif // HAVE_GCCOVER
#endif // !__GCCOVER_H__
| // Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
#ifndef __GCCOVER_H__
#define __GCCOVER_H__
#ifdef HAVE_GCCOVER
/****************************************************************************/
/* GCCOverageInfo holds the state of which instructions have been visited by
a GC and which ones have not */
#ifdef _MSC_VER
#pragma warning(push)
#pragma warning(disable : 4200 ) // zero-sized array
#endif // _MSC_VER
class GCCoverageInfo {
public:
IJitManager::MethodRegionInfo methodRegion;
BYTE* curInstr; // The last instruction that was able to execute
// Following 6 variables are for prolog / epilog walking coverage
ICodeManager* codeMan; // CodeMan for this method
GCInfoToken gcInfoToken; // gcInfo for this method
Thread* callerThread; // Thread associated with context callerRegs
T_CONTEXT callerRegs; // register state when method was entered
unsigned gcCount; // GC count at the time we caputured the regs
bool doingEpilogChecks; // are we doing epilog unwind checks? (do we care about callerRegs?)
enum { hasExecutedSize = 4 };
unsigned hasExecuted[hasExecutedSize];
unsigned totalCount;
union
{
BYTE savedCode[0]; // really variable sized
// Note that DAC doesn't marshal the entire byte array automatically.
// Any client of this field needs to get the TADDR of this field and
// marshal over the bytes properly.
};
// Sloppy bitsets (will wrap, and not threadsafe) but best effort is OK
// since we just need half decent coverage.
BOOL IsBitSetForOffset(unsigned offset) {
unsigned dword = hasExecuted[(offset >> 5) % hasExecutedSize];
return(dword & (1 << (offset & 0x1F)));
}
void SetBitForOffset(unsigned offset) {
unsigned* dword = &hasExecuted[(offset >> 5) % hasExecutedSize];
*dword |= (1 << (offset & 0x1F)) ;
}
void SprinkleBreakpoints(BYTE * saveAddr, PCODE codeStart, size_t codeSize, size_t regionOffsetAdj, BOOL fZapped);
};
typedef DPTR(GCCoverageInfo) PTR_GCCoverageInfo; // see code:GCCoverageInfo::savedCode
#ifdef _MSC_VER
#pragma warning(pop)
#endif // _MSC_VER
#if defined(TARGET_X86) || defined(TARGET_AMD64)
#define INTERRUPT_INSTR 0xF4 // X86 HLT instruction (any 1 byte illegal instruction will do)
#define INTERRUPT_INSTR_CALL 0xFA // X86 CLI instruction
#define INTERRUPT_INSTR_PROTECT_FIRST_RET 0xFB // X86 STI instruction, protect the first return register
#define INTERRUPT_INSTR_PROTECT_SECOND_RET 0xEC // X86 IN instruction, protect the second return register
#define INTERRUPT_INSTR_PROTECT_BOTH_RET 0xED // X86 IN instruction, protect both return registers
#elif defined(TARGET_ARM)
// 16-bit illegal instructions which will cause exception and cause
// control to go to GcStress codepath
#define INTERRUPT_INSTR 0xde00
#define INTERRUPT_INSTR_CALL 0xde03 // 0xde01 generates SIGTRAP (breakpoint) instead of SIGILL on Unix
#define INTERRUPT_INSTR_PROTECT_RET 0xde02
// 32-bit illegal instructions. It is necessary to replace a 16-bit instruction
// with a 16-bit illegal instruction, and a 32-bit instruction with a 32-bit
// illegal instruction, to make GC stress with the "IT" instruction work, since
// it counts the number of instructions that follow it, so we can't change that
// number by replacing a 32-bit instruction with a 16-bit illegal instruction
// followed by 16 bits of junk that might end up being a legal instruction.
// Use the "Permanently UNDEFINED" section in the "ARM Architecture Reference Manual",
// section A6.3.4 "Branches and miscellaneous control" table.
// Note that we write these as a single 32-bit write, not two 16-bit writes, so the values
// need to be arranged as the ARM decoder wants them, with the high-order halfword first
// (in little-endian order).
#define INTERRUPT_INSTR_32 0xa001f7f0 // 0xf7f0a001
#define INTERRUPT_INSTR_CALL_32 0xa002f7f0 // 0xf7f0a002
#define INTERRUPT_INSTR_PROTECT_RET_32 0xa003f7f0 // 0xf7f0a003
#elif defined(TARGET_ARM64)
// The following encodings are undefined. They fall into section C4.5.8 - Data processing (2 source) of
// "Arm Architecture Reference Manual ARMv8"
//
#define INTERRUPT_INSTR 0xBADC0DE0
#define INTERRUPT_INSTR_CALL 0xBADC0DE1
#define INTERRUPT_INSTR_PROTECT_RET 0xBADC0DE2
#endif // _TARGET_*
// The body of this method is in this header file to allow
// mscordaccore.dll to link without getting an unsat symbol
//
inline bool IsGcCoverageInterruptInstructionVal(UINT32 instrVal)
{
#if defined(TARGET_ARM64)
switch (instrVal)
{
case INTERRUPT_INSTR:
case INTERRUPT_INSTR_CALL:
case INTERRUPT_INSTR_PROTECT_RET:
return true;
default:
return false;
}
#elif defined(TARGET_ARM)
UINT16 instrVal16 = static_cast<UINT16>(instrVal);
size_t instrLen = GetARMInstructionLength(instrVal16);
if (instrLen == 2)
{
switch (instrVal16)
{
case INTERRUPT_INSTR:
case INTERRUPT_INSTR_CALL:
case INTERRUPT_INSTR_PROTECT_RET:
return true;
default:
return false;
}
}
else
{
_ASSERTE(instrLen == 4);
switch (instrVal)
{
case INTERRUPT_INSTR_32:
case INTERRUPT_INSTR_CALL_32:
case INTERRUPT_INSTR_PROTECT_RET_32:
return true;
default:
return false;
}
}
#else // x64 and x86
switch (instrVal)
{
case INTERRUPT_INSTR:
case INTERRUPT_INSTR_CALL:
case INTERRUPT_INSTR_PROTECT_FIRST_RET:
case INTERRUPT_INSTR_PROTECT_SECOND_RET:
case INTERRUPT_INSTR_PROTECT_BOTH_RET:
return true;
default:
return false;
}
#endif // _TARGET_XXXX_
}
bool IsGcCoverageInterruptInstruction(PBYTE instrPtr);
bool IsGcCoverageInterrupt(LPVOID ip);
#endif // HAVE_GCCOVER
#endif // !__GCCOVER_H__
| -1 |
dotnet/runtime | 66,251 | Do not generate `TYP_STRUCT` `LCL_FLD` nodes | Today, the morpher can fold `IND(struct ADDR(LCL))` into `LCL_FLD` `TYP_STRUCT` layout-less nodes, these end up being locations, always (well -- there is one case where that's not true, such a node can end up under a return of a small struct later to be retyped by lowering, but this is rare).
These will be a significant impediment to actual, proper `TYP_STRUCT` local field nodes, as it would mean handling the case, everywhere in code, where the layout would be missing, that we don't want to allow in the first place.
This change fixed that by introducing the folding that the creation of these location nodes achieves at the address level, where we can choose the type of the location node arbitrarily and freely.
Along the way, `IsLocalAddrExpr` was fixed to not forget to look at zero-offset sequences attached to `ADDR`s, to avoid regressions.
We are expecting some positive diffs here, from the `IsLocalAddrExpr` change (enabling more precise VNs) and the folding change itself (the old code path would, ever so rarely, miss out on some things).
We are also expecting one small regression due to call retyping of reg-sized `IND struct(ADDR(LCL))` nodes losing the field sequence. I did not fix it because a) it was small, and rare, b) I do not want to permeate the zero-offset code any more than is strictly required.
[Diffs](https://dev.azure.com/dnceng/public/_build/results?buildId=1653792&view=results). | SingleAccretion | 2022-03-05T21:01:45Z | 2022-03-30T04:48:39Z | cbf3f9c43649035f59e626792f82e7b579c44865 | 2453f16807b85b279efc26d17d6f20de87801c09 | Do not generate `TYP_STRUCT` `LCL_FLD` nodes. Today, the morpher can fold `IND(struct ADDR(LCL))` into `LCL_FLD` `TYP_STRUCT` layout-less nodes, these end up being locations, always (well -- there is one case where that's not true, such a node can end up under a return of a small struct later to be retyped by lowering, but this is rare).
These will be a significant impediment to actual, proper `TYP_STRUCT` local field nodes, as it would mean handling the case, everywhere in code, where the layout would be missing, that we don't want to allow in the first place.
This change fixed that by introducing the folding that the creation of these location nodes achieves at the address level, where we can choose the type of the location node arbitrarily and freely.
Along the way, `IsLocalAddrExpr` was fixed to not forget to look at zero-offset sequences attached to `ADDR`s, to avoid regressions.
We are expecting some positive diffs here, from the `IsLocalAddrExpr` change (enabling more precise VNs) and the folding change itself (the old code path would, ever so rarely, miss out on some things).
We are also expecting one small regression due to call retyping of reg-sized `IND struct(ADDR(LCL))` nodes losing the field sequence. I did not fix it because a) it was small, and rare, b) I do not want to permeate the zero-offset code any more than is strictly required.
[Diffs](https://dev.azure.com/dnceng/public/_build/results?buildId=1653792&view=results). | ./src/mono/mono/tests/verifier/invalid_empty_catch_block.il | .assembly 'invalid_empty_catch_block'
{
.hash algorithm 0x00008004
.ver 0:0:0:0
}
.method public static int32 Main() cil managed
{
.entrypoint
.maxstack 8
.locals init (int32 V0)
//try catch
BB_00:
newobj instance void class [mscorlib]System.Exception::.ctor()
throw
leave END
BB_01:
BB_02:
pop
leave END
BB_03:
END:
ldc.i4.0
ret
.try BB_00 to BB_01 catch [mscorlib]System.Exception handler BB_02 to BB_02
}
| .assembly 'invalid_empty_catch_block'
{
.hash algorithm 0x00008004
.ver 0:0:0:0
}
.method public static int32 Main() cil managed
{
.entrypoint
.maxstack 8
.locals init (int32 V0)
//try catch
BB_00:
newobj instance void class [mscorlib]System.Exception::.ctor()
throw
leave END
BB_01:
BB_02:
pop
leave END
BB_03:
END:
ldc.i4.0
ret
.try BB_00 to BB_01 catch [mscorlib]System.Exception handler BB_02 to BB_02
}
| -1 |
dotnet/runtime | 66,251 | Do not generate `TYP_STRUCT` `LCL_FLD` nodes | Today, the morpher can fold `IND(struct ADDR(LCL))` into `LCL_FLD` `TYP_STRUCT` layout-less nodes, these end up being locations, always (well -- there is one case where that's not true, such a node can end up under a return of a small struct later to be retyped by lowering, but this is rare).
These will be a significant impediment to actual, proper `TYP_STRUCT` local field nodes, as it would mean handling the case, everywhere in code, where the layout would be missing, that we don't want to allow in the first place.
This change fixed that by introducing the folding that the creation of these location nodes achieves at the address level, where we can choose the type of the location node arbitrarily and freely.
Along the way, `IsLocalAddrExpr` was fixed to not forget to look at zero-offset sequences attached to `ADDR`s, to avoid regressions.
We are expecting some positive diffs here, from the `IsLocalAddrExpr` change (enabling more precise VNs) and the folding change itself (the old code path would, ever so rarely, miss out on some things).
We are also expecting one small regression due to call retyping of reg-sized `IND struct(ADDR(LCL))` nodes losing the field sequence. I did not fix it because a) it was small, and rare, b) I do not want to permeate the zero-offset code any more than is strictly required.
[Diffs](https://dev.azure.com/dnceng/public/_build/results?buildId=1653792&view=results). | SingleAccretion | 2022-03-05T21:01:45Z | 2022-03-30T04:48:39Z | cbf3f9c43649035f59e626792f82e7b579c44865 | 2453f16807b85b279efc26d17d6f20de87801c09 | Do not generate `TYP_STRUCT` `LCL_FLD` nodes. Today, the morpher can fold `IND(struct ADDR(LCL))` into `LCL_FLD` `TYP_STRUCT` layout-less nodes, these end up being locations, always (well -- there is one case where that's not true, such a node can end up under a return of a small struct later to be retyped by lowering, but this is rare).
These will be a significant impediment to actual, proper `TYP_STRUCT` local field nodes, as it would mean handling the case, everywhere in code, where the layout would be missing, that we don't want to allow in the first place.
This change fixed that by introducing the folding that the creation of these location nodes achieves at the address level, where we can choose the type of the location node arbitrarily and freely.
Along the way, `IsLocalAddrExpr` was fixed to not forget to look at zero-offset sequences attached to `ADDR`s, to avoid regressions.
We are expecting some positive diffs here, from the `IsLocalAddrExpr` change (enabling more precise VNs) and the folding change itself (the old code path would, ever so rarely, miss out on some things).
We are also expecting one small regression due to call retyping of reg-sized `IND struct(ADDR(LCL))` nodes losing the field sequence. I did not fix it because a) it was small, and rare, b) I do not want to permeate the zero-offset code any more than is strictly required.
[Diffs](https://dev.azure.com/dnceng/public/_build/results?buildId=1653792&view=results). | ./src/tests/JIT/Regression/CLR-x86-JIT/V1-M09.5-PDC/b13586/b13586.il | // Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
.assembly extern legacy library mscorlib {}
.assembly extern System.Console
{
.publickeytoken = (B0 3F 5F 7F 11 D5 0A 3A )
.ver 4:0:0:0
}
.assembly b13586
{
}
.class ILGEN_6 {
.method static int32 main() {
.entrypoint
.maxstack 20
.locals (float64)
ldc.r8 3.3
stloc 0
ldloca 0
ldind.r8
ldc.r8 1.1
ldloc 0
sub
cgt
ldloca 0
ldind.r8
ldloc 0
clt
sub
ldc.i4 99
add
ret
}
}
| // Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
.assembly extern legacy library mscorlib {}
.assembly extern System.Console
{
.publickeytoken = (B0 3F 5F 7F 11 D5 0A 3A )
.ver 4:0:0:0
}
.assembly b13586
{
}
.class ILGEN_6 {
.method static int32 main() {
.entrypoint
.maxstack 20
.locals (float64)
ldc.r8 3.3
stloc 0
ldloca 0
ldind.r8
ldc.r8 1.1
ldloc 0
sub
cgt
ldloca 0
ldind.r8
ldloc 0
clt
sub
ldc.i4 99
add
ret
}
}
| -1 |
dotnet/runtime | 66,251 | Do not generate `TYP_STRUCT` `LCL_FLD` nodes | Today, the morpher can fold `IND(struct ADDR(LCL))` into `LCL_FLD` `TYP_STRUCT` layout-less nodes, these end up being locations, always (well -- there is one case where that's not true, such a node can end up under a return of a small struct later to be retyped by lowering, but this is rare).
These will be a significant impediment to actual, proper `TYP_STRUCT` local field nodes, as it would mean handling the case, everywhere in code, where the layout would be missing, that we don't want to allow in the first place.
This change fixed that by introducing the folding that the creation of these location nodes achieves at the address level, where we can choose the type of the location node arbitrarily and freely.
Along the way, `IsLocalAddrExpr` was fixed to not forget to look at zero-offset sequences attached to `ADDR`s, to avoid regressions.
We are expecting some positive diffs here, from the `IsLocalAddrExpr` change (enabling more precise VNs) and the folding change itself (the old code path would, ever so rarely, miss out on some things).
We are also expecting one small regression due to call retyping of reg-sized `IND struct(ADDR(LCL))` nodes losing the field sequence. I did not fix it because a) it was small, and rare, b) I do not want to permeate the zero-offset code any more than is strictly required.
[Diffs](https://dev.azure.com/dnceng/public/_build/results?buildId=1653792&view=results). | SingleAccretion | 2022-03-05T21:01:45Z | 2022-03-30T04:48:39Z | cbf3f9c43649035f59e626792f82e7b579c44865 | 2453f16807b85b279efc26d17d6f20de87801c09 | Do not generate `TYP_STRUCT` `LCL_FLD` nodes. Today, the morpher can fold `IND(struct ADDR(LCL))` into `LCL_FLD` `TYP_STRUCT` layout-less nodes, these end up being locations, always (well -- there is one case where that's not true, such a node can end up under a return of a small struct later to be retyped by lowering, but this is rare).
These will be a significant impediment to actual, proper `TYP_STRUCT` local field nodes, as it would mean handling the case, everywhere in code, where the layout would be missing, that we don't want to allow in the first place.
This change fixed that by introducing the folding that the creation of these location nodes achieves at the address level, where we can choose the type of the location node arbitrarily and freely.
Along the way, `IsLocalAddrExpr` was fixed to not forget to look at zero-offset sequences attached to `ADDR`s, to avoid regressions.
We are expecting some positive diffs here, from the `IsLocalAddrExpr` change (enabling more precise VNs) and the folding change itself (the old code path would, ever so rarely, miss out on some things).
We are also expecting one small regression due to call retyping of reg-sized `IND struct(ADDR(LCL))` nodes losing the field sequence. I did not fix it because a) it was small, and rare, b) I do not want to permeate the zero-offset code any more than is strictly required.
[Diffs](https://dev.azure.com/dnceng/public/_build/results?buildId=1653792&view=results). | ./src/tests/Loader/classloader/TypeGeneratorTests/TypeGeneratorTest1256/Generated1256.ilproj | <Project Sdk="Microsoft.NET.Sdk.IL">
<PropertyGroup>
<CLRTestPriority>1</CLRTestPriority>
</PropertyGroup>
<ItemGroup>
<Compile Include="Generated1256.il" />
</ItemGroup>
<ItemGroup>
<ProjectReference Include="..\TestFramework\TestFramework.csproj" />
</ItemGroup>
</Project>
| <Project Sdk="Microsoft.NET.Sdk.IL">
<PropertyGroup>
<CLRTestPriority>1</CLRTestPriority>
</PropertyGroup>
<ItemGroup>
<Compile Include="Generated1256.il" />
</ItemGroup>
<ItemGroup>
<ProjectReference Include="..\TestFramework\TestFramework.csproj" />
</ItemGroup>
</Project>
| -1 |
dotnet/runtime | 66,251 | Do not generate `TYP_STRUCT` `LCL_FLD` nodes | Today, the morpher can fold `IND(struct ADDR(LCL))` into `LCL_FLD` `TYP_STRUCT` layout-less nodes, these end up being locations, always (well -- there is one case where that's not true, such a node can end up under a return of a small struct later to be retyped by lowering, but this is rare).
These will be a significant impediment to actual, proper `TYP_STRUCT` local field nodes, as it would mean handling the case, everywhere in code, where the layout would be missing, that we don't want to allow in the first place.
This change fixed that by introducing the folding that the creation of these location nodes achieves at the address level, where we can choose the type of the location node arbitrarily and freely.
Along the way, `IsLocalAddrExpr` was fixed to not forget to look at zero-offset sequences attached to `ADDR`s, to avoid regressions.
We are expecting some positive diffs here, from the `IsLocalAddrExpr` change (enabling more precise VNs) and the folding change itself (the old code path would, ever so rarely, miss out on some things).
We are also expecting one small regression due to call retyping of reg-sized `IND struct(ADDR(LCL))` nodes losing the field sequence. I did not fix it because a) it was small, and rare, b) I do not want to permeate the zero-offset code any more than is strictly required.
[Diffs](https://dev.azure.com/dnceng/public/_build/results?buildId=1653792&view=results). | SingleAccretion | 2022-03-05T21:01:45Z | 2022-03-30T04:48:39Z | cbf3f9c43649035f59e626792f82e7b579c44865 | 2453f16807b85b279efc26d17d6f20de87801c09 | Do not generate `TYP_STRUCT` `LCL_FLD` nodes. Today, the morpher can fold `IND(struct ADDR(LCL))` into `LCL_FLD` `TYP_STRUCT` layout-less nodes, these end up being locations, always (well -- there is one case where that's not true, such a node can end up under a return of a small struct later to be retyped by lowering, but this is rare).
These will be a significant impediment to actual, proper `TYP_STRUCT` local field nodes, as it would mean handling the case, everywhere in code, where the layout would be missing, that we don't want to allow in the first place.
This change fixed that by introducing the folding that the creation of these location nodes achieves at the address level, where we can choose the type of the location node arbitrarily and freely.
Along the way, `IsLocalAddrExpr` was fixed to not forget to look at zero-offset sequences attached to `ADDR`s, to avoid regressions.
We are expecting some positive diffs here, from the `IsLocalAddrExpr` change (enabling more precise VNs) and the folding change itself (the old code path would, ever so rarely, miss out on some things).
We are also expecting one small regression due to call retyping of reg-sized `IND struct(ADDR(LCL))` nodes losing the field sequence. I did not fix it because a) it was small, and rare, b) I do not want to permeate the zero-offset code any more than is strictly required.
[Diffs](https://dev.azure.com/dnceng/public/_build/results?buildId=1653792&view=results). | ./src/tests/JIT/Generics/Instantiation/Structs/struct02.csproj | <Project Sdk="Microsoft.NET.Sdk">
<PropertyGroup>
<OutputType>Exe</OutputType>
<CLRTestPriority>1</CLRTestPriority>
</PropertyGroup>
<PropertyGroup>
<DebugType>PdbOnly</DebugType>
</PropertyGroup>
<ItemGroup>
<Compile Include="struct02.cs" />
</ItemGroup>
</Project>
| <Project Sdk="Microsoft.NET.Sdk">
<PropertyGroup>
<OutputType>Exe</OutputType>
<CLRTestPriority>1</CLRTestPriority>
</PropertyGroup>
<PropertyGroup>
<DebugType>PdbOnly</DebugType>
</PropertyGroup>
<ItemGroup>
<Compile Include="struct02.cs" />
</ItemGroup>
</Project>
| -1 |
dotnet/runtime | 66,251 | Do not generate `TYP_STRUCT` `LCL_FLD` nodes | Today, the morpher can fold `IND(struct ADDR(LCL))` into `LCL_FLD` `TYP_STRUCT` layout-less nodes, these end up being locations, always (well -- there is one case where that's not true, such a node can end up under a return of a small struct later to be retyped by lowering, but this is rare).
These will be a significant impediment to actual, proper `TYP_STRUCT` local field nodes, as it would mean handling the case, everywhere in code, where the layout would be missing, that we don't want to allow in the first place.
This change fixed that by introducing the folding that the creation of these location nodes achieves at the address level, where we can choose the type of the location node arbitrarily and freely.
Along the way, `IsLocalAddrExpr` was fixed to not forget to look at zero-offset sequences attached to `ADDR`s, to avoid regressions.
We are expecting some positive diffs here, from the `IsLocalAddrExpr` change (enabling more precise VNs) and the folding change itself (the old code path would, ever so rarely, miss out on some things).
We are also expecting one small regression due to call retyping of reg-sized `IND struct(ADDR(LCL))` nodes losing the field sequence. I did not fix it because a) it was small, and rare, b) I do not want to permeate the zero-offset code any more than is strictly required.
[Diffs](https://dev.azure.com/dnceng/public/_build/results?buildId=1653792&view=results). | SingleAccretion | 2022-03-05T21:01:45Z | 2022-03-30T04:48:39Z | cbf3f9c43649035f59e626792f82e7b579c44865 | 2453f16807b85b279efc26d17d6f20de87801c09 | Do not generate `TYP_STRUCT` `LCL_FLD` nodes. Today, the morpher can fold `IND(struct ADDR(LCL))` into `LCL_FLD` `TYP_STRUCT` layout-less nodes, these end up being locations, always (well -- there is one case where that's not true, such a node can end up under a return of a small struct later to be retyped by lowering, but this is rare).
These will be a significant impediment to actual, proper `TYP_STRUCT` local field nodes, as it would mean handling the case, everywhere in code, where the layout would be missing, that we don't want to allow in the first place.
This change fixed that by introducing the folding that the creation of these location nodes achieves at the address level, where we can choose the type of the location node arbitrarily and freely.
Along the way, `IsLocalAddrExpr` was fixed to not forget to look at zero-offset sequences attached to `ADDR`s, to avoid regressions.
We are expecting some positive diffs here, from the `IsLocalAddrExpr` change (enabling more precise VNs) and the folding change itself (the old code path would, ever so rarely, miss out on some things).
We are also expecting one small regression due to call retyping of reg-sized `IND struct(ADDR(LCL))` nodes losing the field sequence. I did not fix it because a) it was small, and rare, b) I do not want to permeate the zero-offset code any more than is strictly required.
[Diffs](https://dev.azure.com/dnceng/public/_build/results?buildId=1653792&view=results). | ./src/tests/nativeaot/SmokeTests/DynamicGenerics/statics.cs | // Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
using System;
using System.Collections.Generic;
using System.Reflection;
using System.Linq;
using System.Text;
using System.Runtime.CompilerServices;
using CoreFXTestLibrary;
using TypeOfRepo;
public class StaticsTests
{
#if USC
public struct MyCustomType
{
string _field;
public MyCustomType(String s) { _field = s; }
public override string ToString() { return _field; }
}
#else
public class MyCustomType
{
string _field;
public MyCustomType(String s) { _field = s; }
public override string ToString() { return _field; }
}
#endif
public class GenericTypeWithNonGcStaticField<T>
{
protected static int _myField;
public GenericTypeWithNonGcStaticField(int i)
{
_myField = i;
}
public override string ToString()
{
return _myField.ToString();
}
}
public class GenericTypeWithMultipleNonGcStaticFields<T>
{
static int _myInt1;
static bool _myBool1;
static int _myInt2;
public GenericTypeWithMultipleNonGcStaticFields(int int1, bool bool1, int int2)
{
_myInt1 = int1;
_myBool1 = bool1;
_myInt2 = int2;
}
public override string ToString()
{
return _myInt1.ToString() + " " + _myBool1.ToString() + " " + _myInt2.ToString();
}
}
public class DerivedGenericTypeWithNonGcStaticField<T> : GenericTypeWithNonGcStaticField<T>
{
protected static int _mySpecializedField;
public DerivedGenericTypeWithNonGcStaticField(int myField, int mySpecializedField) : base(myField)
{
_myField = myField;
_mySpecializedField = mySpecializedField;
}
public override string ToString()
{
return base.ToString() + " " + _mySpecializedField.ToString();
}
}
public class SuperDerivedGeneric<T> : DerivedGenericTypeWithNonGcStaticField<T>
{
static int _mySuperDerivedField;
public SuperDerivedGeneric(int myField, int mySpecializedField, int superDerivedField) : base(myField, mySpecializedField)
{
_mySuperDerivedField = superDerivedField;
}
public override string ToString()
{
return base.ToString() + " " + _mySuperDerivedField.ToString();
}
}
public class GenericTypeWithStaticTimeSpanField<T>
{
static TimeSpan s_timespan;
public GenericTypeWithStaticTimeSpanField(double s)
{
s_timespan = TimeSpan.FromSeconds(s);
}
static GenericTypeWithStaticTimeSpanField()
{
s_timespan = TimeSpan.FromSeconds(42.0);
}
public override string ToString()
{
return s_timespan.ToString();
}
}
public class GenericTypeWithGcStaticField<T>
{
static string _myString;
public GenericTypeWithGcStaticField(string myString)
{
_myString = myString;
}
public override string ToString()
{
return _myString;
}
public void SetMyString(string s)
{
_myString = s;
}
}
#if USC
public struct SillyString
#else
public class SillyString
#endif
{
public override string ToString()
{
return "SillyString";
}
}
public class GenericTypeWithStaticFieldOfTypeT<T>
{
static T _myField;
public GenericTypeWithStaticFieldOfTypeT(T val)
{
_myField = val;
}
public T Field
{
get
{
return _myField;
}
}
public override string ToString()
{
return _myField.ToString();
}
}
public class ClassWithStaticConstructor<T>
{
static string s_myStaticString;
static ClassWithStaticConstructor()
{
s_myStaticString = typeof(T).ToString();
}
public override string ToString()
{
return s_myStaticString;
}
}
public class AnotherClassWithStaticConstructor<T> : ClassWithStaticConstructor<T>
{
static int s_cctorRunCounter;
static AnotherClassWithStaticConstructor()
{
++s_cctorRunCounter;
}
public override string ToString()
{
return base.ToString() + " " + s_cctorRunCounter.ToString();
}
}
[TestMethod]
public static void TestStatics()
{
// Test that different instantiations of the same type get their own static data
{
Type stringInstType = TypeOf.ST_GenericTypeWithStaticFieldOfTypeT.MakeGenericType(typeof(MyCustomType));
Type sillyStringInstType = TypeOf.ST_GenericTypeWithStaticFieldOfTypeT.MakeGenericType(typeof(SillyString));
var sillyStringInst = Activator.CreateInstance(sillyStringInstType, new object[] { new SillyString() });
var stringInst = Activator.CreateInstance(stringInstType, new object[] { new MyCustomType("Not a silly string") });
string result = sillyStringInst.ToString() + " " + stringInst.ToString();
Assert.AreEqual("SillyString Not a silly string", result);
}
// Test that different instantiations of the same type get their own static data
{
Type stringInstType = TypeOf.ST_GenericTypeWithNonGcStaticField.MakeGenericType(TypeOf.CommonType1);
Type objectInstType = TypeOf.ST_GenericTypeWithNonGcStaticField.MakeGenericType(TypeOf.CommonType2);
Type boolInstType = TypeOf.ST_GenericTypeWithNonGcStaticField.MakeGenericType(typeof(StaticsTests));
var objectInst = Activator.CreateInstance(objectInstType, new object[] { 123 });
var stringInst = Activator.CreateInstance(stringInstType, new object[] { 666 });
var boolInst = Activator.CreateInstance(boolInstType, new object[] { 999 });
string result = objectInst.ToString() + " " + stringInst.ToString() + " " + boolInst.ToString();
Assert.AreEqual("123 666 999", result);
}
// Validate that multiple static non-GC fields on the generic type work correctly over several instantiations
{
Type stringInstType = TypeOf.ST_GenericTypeWithMultipleNonGcStaticFields.MakeGenericType(TypeOf.CommonType1);
Type objectInstType = TypeOf.ST_GenericTypeWithMultipleNonGcStaticFields.MakeGenericType(TypeOf.CommonType2);
Type boolInstType = TypeOf.ST_GenericTypeWithMultipleNonGcStaticFields.MakeGenericType(typeof(StaticsTests));
var objectInst = Activator.CreateInstance(objectInstType, new object[] { 123, true, 321 });
var stringInst = Activator.CreateInstance(stringInstType, new object[] { 666, false, 777 });
var boolInst = Activator.CreateInstance(boolInstType, new object[] { 999, true, 111 });
string result = objectInst.ToString() + " " + stringInst.ToString() + " " + boolInst.ToString();
Assert.AreEqual("123 True 321 666 False 777 999 True 111", result);
}
// Validate statics on several layers of a generic type hierarchy
{
Type stringInstType = TypeOf.ST_SuperDerivedGeneric.MakeGenericType(TypeOf.CommonType1);
Type objectInstType = TypeOf.ST_SuperDerivedGeneric.MakeGenericType(TypeOf.CommonType2);
var objectInst = Activator.CreateInstance(objectInstType, new object[] { 123, 321, 456 });
var stringInst = Activator.CreateInstance(stringInstType, new object[] { 666, 999, 111 });
string result = objectInst.ToString() + " " + stringInst.ToString();
Assert.AreEqual("123 321 456 666 999 111", result);
}
{
Type objectInstType = TypeOf.ST_GenericTypeWithStaticTimeSpanField.MakeGenericType(TypeOf.CommonType2);
Type stringInstType = TypeOf.ST_GenericTypeWithStaticTimeSpanField.MakeGenericType(TypeOf.CommonType1);
var objectInst = Activator.CreateInstance(objectInstType, new object[] { 123.0 });
var stringInst = Activator.CreateInstance(stringInstType, new object[] { 456.0 });
string result = objectInst.ToString() + " " + stringInst.ToString();
Assert.AreEqual("00:02:03 00:07:36", result);
}
// GC statics tests
{
Type stringInstType = TypeOf.ST_GenericTypeWithGcStaticField.MakeGenericType(TypeOf.CommonType1);
Type objectInstType = TypeOf.ST_GenericTypeWithGcStaticField.MakeGenericType(TypeOf.CommonType2);
var objectInst = Activator.CreateInstance(objectInstType, new object[] { "Hello" });
var stringInst0 = Activator.CreateInstance(stringInstType, new object[] { "And" });
var stringInst = Activator.CreateInstance(stringInstType, new object[] { "Bye" });
string result = objectInst.ToString() + " " + stringInst0.ToString() + " " + stringInst.ToString();
Assert.AreEqual("Hello Bye Bye", result);
}
// Statics keep things alive
{
Type stringInstType = TypeOf.ST_GenericTypeWithGcStaticField.MakeGenericType(TypeOf.CommonType1);
var stringInst = Activator.CreateInstance(stringInstType, new object[] { "Bye" });
var setMyStringMethodInfo = stringInstType.GetTypeInfo().GetDeclaredMethod("SetMyString");
Console.WriteLine("Setting GC static");
{
string newString = "New Value Of The String!";
string my = newString.Replace("!", "");
setMyStringMethodInfo.Invoke(stringInst, new object[] {my});
}
Console.WriteLine("Calling GC.Collect");
GC.Collect();
Console.WriteLine("Verifying GC static wasn't collected erroneously");
string result = stringInst.ToString();
Assert.AreEqual("New Value Of The String", result);
}
{
Type stringInstType = TypeOf.ST_ClassWithStaticConstructor.MakeGenericType(TypeOf.CommonType1);
Type objectInstType = TypeOf.ST_ClassWithStaticConstructor.MakeGenericType(TypeOf.CommonType2);
var objectInst = Activator.CreateInstance(objectInstType);
var stringInst = Activator.CreateInstance(stringInstType);
string result = objectInst.ToString() + " " + stringInst.ToString();
Assert.AreEqual("CommonType2 CommonType1", result);
}
{
Type stringInstType = TypeOf.ST_AnotherClassWithStaticConstructor.MakeGenericType(TypeOf.CommonType1);
Type objectInstType = TypeOf.ST_AnotherClassWithStaticConstructor.MakeGenericType(TypeOf.CommonType2);
Type sbInstType = TypeOf.ST_AnotherClassWithStaticConstructor.MakeGenericType(typeof(StringBuilder));
var objectInst = Activator.CreateInstance(objectInstType);
var stringInst = Activator.CreateInstance(stringInstType);
var sbInst = Activator.CreateInstance(sbInstType);
// Make sure the class constructor is only run once - the two results should be the same (the static int
// should only get incremented once per instantiation).
string result1 = objectInst.ToString() + " " + stringInst.ToString() + " " + sbInst.ToString();
string result2 = objectInst.ToString() + " " + stringInst.ToString() + " " + sbInst.ToString();
Assert.AreEqual(result1, result2);
Assert.AreEqual("CommonType2 1 CommonType1 1 System.Text.StringBuilder 1", result1);
}
}
}
| // Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
using System;
using System.Collections.Generic;
using System.Reflection;
using System.Linq;
using System.Text;
using System.Runtime.CompilerServices;
using CoreFXTestLibrary;
using TypeOfRepo;
public class StaticsTests
{
#if USC
public struct MyCustomType
{
string _field;
public MyCustomType(String s) { _field = s; }
public override string ToString() { return _field; }
}
#else
public class MyCustomType
{
string _field;
public MyCustomType(String s) { _field = s; }
public override string ToString() { return _field; }
}
#endif
public class GenericTypeWithNonGcStaticField<T>
{
protected static int _myField;
public GenericTypeWithNonGcStaticField(int i)
{
_myField = i;
}
public override string ToString()
{
return _myField.ToString();
}
}
public class GenericTypeWithMultipleNonGcStaticFields<T>
{
static int _myInt1;
static bool _myBool1;
static int _myInt2;
public GenericTypeWithMultipleNonGcStaticFields(int int1, bool bool1, int int2)
{
_myInt1 = int1;
_myBool1 = bool1;
_myInt2 = int2;
}
public override string ToString()
{
return _myInt1.ToString() + " " + _myBool1.ToString() + " " + _myInt2.ToString();
}
}
public class DerivedGenericTypeWithNonGcStaticField<T> : GenericTypeWithNonGcStaticField<T>
{
protected static int _mySpecializedField;
public DerivedGenericTypeWithNonGcStaticField(int myField, int mySpecializedField) : base(myField)
{
_myField = myField;
_mySpecializedField = mySpecializedField;
}
public override string ToString()
{
return base.ToString() + " " + _mySpecializedField.ToString();
}
}
public class SuperDerivedGeneric<T> : DerivedGenericTypeWithNonGcStaticField<T>
{
static int _mySuperDerivedField;
public SuperDerivedGeneric(int myField, int mySpecializedField, int superDerivedField) : base(myField, mySpecializedField)
{
_mySuperDerivedField = superDerivedField;
}
public override string ToString()
{
return base.ToString() + " " + _mySuperDerivedField.ToString();
}
}
public class GenericTypeWithStaticTimeSpanField<T>
{
static TimeSpan s_timespan;
public GenericTypeWithStaticTimeSpanField(double s)
{
s_timespan = TimeSpan.FromSeconds(s);
}
static GenericTypeWithStaticTimeSpanField()
{
s_timespan = TimeSpan.FromSeconds(42.0);
}
public override string ToString()
{
return s_timespan.ToString();
}
}
public class GenericTypeWithGcStaticField<T>
{
static string _myString;
public GenericTypeWithGcStaticField(string myString)
{
_myString = myString;
}
public override string ToString()
{
return _myString;
}
public void SetMyString(string s)
{
_myString = s;
}
}
#if USC
public struct SillyString
#else
public class SillyString
#endif
{
public override string ToString()
{
return "SillyString";
}
}
public class GenericTypeWithStaticFieldOfTypeT<T>
{
static T _myField;
public GenericTypeWithStaticFieldOfTypeT(T val)
{
_myField = val;
}
public T Field
{
get
{
return _myField;
}
}
public override string ToString()
{
return _myField.ToString();
}
}
public class ClassWithStaticConstructor<T>
{
static string s_myStaticString;
static ClassWithStaticConstructor()
{
s_myStaticString = typeof(T).ToString();
}
public override string ToString()
{
return s_myStaticString;
}
}
public class AnotherClassWithStaticConstructor<T> : ClassWithStaticConstructor<T>
{
static int s_cctorRunCounter;
static AnotherClassWithStaticConstructor()
{
++s_cctorRunCounter;
}
public override string ToString()
{
return base.ToString() + " " + s_cctorRunCounter.ToString();
}
}
[TestMethod]
public static void TestStatics()
{
// Test that different instantiations of the same type get their own static data
{
Type stringInstType = TypeOf.ST_GenericTypeWithStaticFieldOfTypeT.MakeGenericType(typeof(MyCustomType));
Type sillyStringInstType = TypeOf.ST_GenericTypeWithStaticFieldOfTypeT.MakeGenericType(typeof(SillyString));
var sillyStringInst = Activator.CreateInstance(sillyStringInstType, new object[] { new SillyString() });
var stringInst = Activator.CreateInstance(stringInstType, new object[] { new MyCustomType("Not a silly string") });
string result = sillyStringInst.ToString() + " " + stringInst.ToString();
Assert.AreEqual("SillyString Not a silly string", result);
}
// Test that different instantiations of the same type get their own static data
{
Type stringInstType = TypeOf.ST_GenericTypeWithNonGcStaticField.MakeGenericType(TypeOf.CommonType1);
Type objectInstType = TypeOf.ST_GenericTypeWithNonGcStaticField.MakeGenericType(TypeOf.CommonType2);
Type boolInstType = TypeOf.ST_GenericTypeWithNonGcStaticField.MakeGenericType(typeof(StaticsTests));
var objectInst = Activator.CreateInstance(objectInstType, new object[] { 123 });
var stringInst = Activator.CreateInstance(stringInstType, new object[] { 666 });
var boolInst = Activator.CreateInstance(boolInstType, new object[] { 999 });
string result = objectInst.ToString() + " " + stringInst.ToString() + " " + boolInst.ToString();
Assert.AreEqual("123 666 999", result);
}
// Validate that multiple static non-GC fields on the generic type work correctly over several instantiations
{
Type stringInstType = TypeOf.ST_GenericTypeWithMultipleNonGcStaticFields.MakeGenericType(TypeOf.CommonType1);
Type objectInstType = TypeOf.ST_GenericTypeWithMultipleNonGcStaticFields.MakeGenericType(TypeOf.CommonType2);
Type boolInstType = TypeOf.ST_GenericTypeWithMultipleNonGcStaticFields.MakeGenericType(typeof(StaticsTests));
var objectInst = Activator.CreateInstance(objectInstType, new object[] { 123, true, 321 });
var stringInst = Activator.CreateInstance(stringInstType, new object[] { 666, false, 777 });
var boolInst = Activator.CreateInstance(boolInstType, new object[] { 999, true, 111 });
string result = objectInst.ToString() + " " + stringInst.ToString() + " " + boolInst.ToString();
Assert.AreEqual("123 True 321 666 False 777 999 True 111", result);
}
// Validate statics on several layers of a generic type hierarchy
{
Type stringInstType = TypeOf.ST_SuperDerivedGeneric.MakeGenericType(TypeOf.CommonType1);
Type objectInstType = TypeOf.ST_SuperDerivedGeneric.MakeGenericType(TypeOf.CommonType2);
var objectInst = Activator.CreateInstance(objectInstType, new object[] { 123, 321, 456 });
var stringInst = Activator.CreateInstance(stringInstType, new object[] { 666, 999, 111 });
string result = objectInst.ToString() + " " + stringInst.ToString();
Assert.AreEqual("123 321 456 666 999 111", result);
}
{
Type objectInstType = TypeOf.ST_GenericTypeWithStaticTimeSpanField.MakeGenericType(TypeOf.CommonType2);
Type stringInstType = TypeOf.ST_GenericTypeWithStaticTimeSpanField.MakeGenericType(TypeOf.CommonType1);
var objectInst = Activator.CreateInstance(objectInstType, new object[] { 123.0 });
var stringInst = Activator.CreateInstance(stringInstType, new object[] { 456.0 });
string result = objectInst.ToString() + " " + stringInst.ToString();
Assert.AreEqual("00:02:03 00:07:36", result);
}
// GC statics tests
{
Type stringInstType = TypeOf.ST_GenericTypeWithGcStaticField.MakeGenericType(TypeOf.CommonType1);
Type objectInstType = TypeOf.ST_GenericTypeWithGcStaticField.MakeGenericType(TypeOf.CommonType2);
var objectInst = Activator.CreateInstance(objectInstType, new object[] { "Hello" });
var stringInst0 = Activator.CreateInstance(stringInstType, new object[] { "And" });
var stringInst = Activator.CreateInstance(stringInstType, new object[] { "Bye" });
string result = objectInst.ToString() + " " + stringInst0.ToString() + " " + stringInst.ToString();
Assert.AreEqual("Hello Bye Bye", result);
}
// Statics keep things alive
{
Type stringInstType = TypeOf.ST_GenericTypeWithGcStaticField.MakeGenericType(TypeOf.CommonType1);
var stringInst = Activator.CreateInstance(stringInstType, new object[] { "Bye" });
var setMyStringMethodInfo = stringInstType.GetTypeInfo().GetDeclaredMethod("SetMyString");
Console.WriteLine("Setting GC static");
{
string newString = "New Value Of The String!";
string my = newString.Replace("!", "");
setMyStringMethodInfo.Invoke(stringInst, new object[] {my});
}
Console.WriteLine("Calling GC.Collect");
GC.Collect();
Console.WriteLine("Verifying GC static wasn't collected erroneously");
string result = stringInst.ToString();
Assert.AreEqual("New Value Of The String", result);
}
{
Type stringInstType = TypeOf.ST_ClassWithStaticConstructor.MakeGenericType(TypeOf.CommonType1);
Type objectInstType = TypeOf.ST_ClassWithStaticConstructor.MakeGenericType(TypeOf.CommonType2);
var objectInst = Activator.CreateInstance(objectInstType);
var stringInst = Activator.CreateInstance(stringInstType);
string result = objectInst.ToString() + " " + stringInst.ToString();
Assert.AreEqual("CommonType2 CommonType1", result);
}
{
Type stringInstType = TypeOf.ST_AnotherClassWithStaticConstructor.MakeGenericType(TypeOf.CommonType1);
Type objectInstType = TypeOf.ST_AnotherClassWithStaticConstructor.MakeGenericType(TypeOf.CommonType2);
Type sbInstType = TypeOf.ST_AnotherClassWithStaticConstructor.MakeGenericType(typeof(StringBuilder));
var objectInst = Activator.CreateInstance(objectInstType);
var stringInst = Activator.CreateInstance(stringInstType);
var sbInst = Activator.CreateInstance(sbInstType);
// Make sure the class constructor is only run once - the two results should be the same (the static int
// should only get incremented once per instantiation).
string result1 = objectInst.ToString() + " " + stringInst.ToString() + " " + sbInst.ToString();
string result2 = objectInst.ToString() + " " + stringInst.ToString() + " " + sbInst.ToString();
Assert.AreEqual(result1, result2);
Assert.AreEqual("CommonType2 1 CommonType1 1 System.Text.StringBuilder 1", result1);
}
}
}
| -1 |
dotnet/runtime | 66,251 | Do not generate `TYP_STRUCT` `LCL_FLD` nodes | Today, the morpher can fold `IND(struct ADDR(LCL))` into `LCL_FLD` `TYP_STRUCT` layout-less nodes, these end up being locations, always (well -- there is one case where that's not true, such a node can end up under a return of a small struct later to be retyped by lowering, but this is rare).
These will be a significant impediment to actual, proper `TYP_STRUCT` local field nodes, as it would mean handling the case, everywhere in code, where the layout would be missing, that we don't want to allow in the first place.
This change fixed that by introducing the folding that the creation of these location nodes achieves at the address level, where we can choose the type of the location node arbitrarily and freely.
Along the way, `IsLocalAddrExpr` was fixed to not forget to look at zero-offset sequences attached to `ADDR`s, to avoid regressions.
We are expecting some positive diffs here, from the `IsLocalAddrExpr` change (enabling more precise VNs) and the folding change itself (the old code path would, ever so rarely, miss out on some things).
We are also expecting one small regression due to call retyping of reg-sized `IND struct(ADDR(LCL))` nodes losing the field sequence. I did not fix it because a) it was small, and rare, b) I do not want to permeate the zero-offset code any more than is strictly required.
[Diffs](https://dev.azure.com/dnceng/public/_build/results?buildId=1653792&view=results). | SingleAccretion | 2022-03-05T21:01:45Z | 2022-03-30T04:48:39Z | cbf3f9c43649035f59e626792f82e7b579c44865 | 2453f16807b85b279efc26d17d6f20de87801c09 | Do not generate `TYP_STRUCT` `LCL_FLD` nodes. Today, the morpher can fold `IND(struct ADDR(LCL))` into `LCL_FLD` `TYP_STRUCT` layout-less nodes, these end up being locations, always (well -- there is one case where that's not true, such a node can end up under a return of a small struct later to be retyped by lowering, but this is rare).
These will be a significant impediment to actual, proper `TYP_STRUCT` local field nodes, as it would mean handling the case, everywhere in code, where the layout would be missing, that we don't want to allow in the first place.
This change fixed that by introducing the folding that the creation of these location nodes achieves at the address level, where we can choose the type of the location node arbitrarily and freely.
Along the way, `IsLocalAddrExpr` was fixed to not forget to look at zero-offset sequences attached to `ADDR`s, to avoid regressions.
We are expecting some positive diffs here, from the `IsLocalAddrExpr` change (enabling more precise VNs) and the folding change itself (the old code path would, ever so rarely, miss out on some things).
We are also expecting one small regression due to call retyping of reg-sized `IND struct(ADDR(LCL))` nodes losing the field sequence. I did not fix it because a) it was small, and rare, b) I do not want to permeate the zero-offset code any more than is strictly required.
[Diffs](https://dev.azure.com/dnceng/public/_build/results?buildId=1653792&view=results). | ./src/tests/JIT/Regression/CLR-x86-JIT/V1-M09.5-PDC/b16328/b16328.cs | // Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
//
namespace DefaultNamespace
{
using System;
class DD
{
public static int zero = 0;
public static int Main()
{
try
{
int x = 100 / DD.zero;
}
catch (DivideByZeroException)
{
return 100;
}
return 1;
}
}
}
| // Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
//
namespace DefaultNamespace
{
using System;
class DD
{
public static int zero = 0;
public static int Main()
{
try
{
int x = 100 / DD.zero;
}
catch (DivideByZeroException)
{
return 100;
}
return 1;
}
}
}
| -1 |
dotnet/runtime | 66,251 | Do not generate `TYP_STRUCT` `LCL_FLD` nodes | Today, the morpher can fold `IND(struct ADDR(LCL))` into `LCL_FLD` `TYP_STRUCT` layout-less nodes, these end up being locations, always (well -- there is one case where that's not true, such a node can end up under a return of a small struct later to be retyped by lowering, but this is rare).
These will be a significant impediment to actual, proper `TYP_STRUCT` local field nodes, as it would mean handling the case, everywhere in code, where the layout would be missing, that we don't want to allow in the first place.
This change fixed that by introducing the folding that the creation of these location nodes achieves at the address level, where we can choose the type of the location node arbitrarily and freely.
Along the way, `IsLocalAddrExpr` was fixed to not forget to look at zero-offset sequences attached to `ADDR`s, to avoid regressions.
We are expecting some positive diffs here, from the `IsLocalAddrExpr` change (enabling more precise VNs) and the folding change itself (the old code path would, ever so rarely, miss out on some things).
We are also expecting one small regression due to call retyping of reg-sized `IND struct(ADDR(LCL))` nodes losing the field sequence. I did not fix it because a) it was small, and rare, b) I do not want to permeate the zero-offset code any more than is strictly required.
[Diffs](https://dev.azure.com/dnceng/public/_build/results?buildId=1653792&view=results). | SingleAccretion | 2022-03-05T21:01:45Z | 2022-03-30T04:48:39Z | cbf3f9c43649035f59e626792f82e7b579c44865 | 2453f16807b85b279efc26d17d6f20de87801c09 | Do not generate `TYP_STRUCT` `LCL_FLD` nodes. Today, the morpher can fold `IND(struct ADDR(LCL))` into `LCL_FLD` `TYP_STRUCT` layout-less nodes, these end up being locations, always (well -- there is one case where that's not true, such a node can end up under a return of a small struct later to be retyped by lowering, but this is rare).
These will be a significant impediment to actual, proper `TYP_STRUCT` local field nodes, as it would mean handling the case, everywhere in code, where the layout would be missing, that we don't want to allow in the first place.
This change fixed that by introducing the folding that the creation of these location nodes achieves at the address level, where we can choose the type of the location node arbitrarily and freely.
Along the way, `IsLocalAddrExpr` was fixed to not forget to look at zero-offset sequences attached to `ADDR`s, to avoid regressions.
We are expecting some positive diffs here, from the `IsLocalAddrExpr` change (enabling more precise VNs) and the folding change itself (the old code path would, ever so rarely, miss out on some things).
We are also expecting one small regression due to call retyping of reg-sized `IND struct(ADDR(LCL))` nodes losing the field sequence. I did not fix it because a) it was small, and rare, b) I do not want to permeate the zero-offset code any more than is strictly required.
[Diffs](https://dev.azure.com/dnceng/public/_build/results?buildId=1653792&view=results). | ./src/tests/JIT/Methodical/casts/coverage/isinst_call_do.csproj | <Project Sdk="Microsoft.NET.Sdk">
<PropertyGroup>
<OutputType>Exe</OutputType>
<CLRTestPriority>1</CLRTestPriority>
</PropertyGroup>
<PropertyGroup>
<DebugType>Full</DebugType>
<Optimize>True</Optimize>
</PropertyGroup>
<ItemGroup>
<Compile Include="isinst_call.cs" />
</ItemGroup>
</Project>
| <Project Sdk="Microsoft.NET.Sdk">
<PropertyGroup>
<OutputType>Exe</OutputType>
<CLRTestPriority>1</CLRTestPriority>
</PropertyGroup>
<PropertyGroup>
<DebugType>Full</DebugType>
<Optimize>True</Optimize>
</PropertyGroup>
<ItemGroup>
<Compile Include="isinst_call.cs" />
</ItemGroup>
</Project>
| -1 |
dotnet/runtime | 66,251 | Do not generate `TYP_STRUCT` `LCL_FLD` nodes | Today, the morpher can fold `IND(struct ADDR(LCL))` into `LCL_FLD` `TYP_STRUCT` layout-less nodes, these end up being locations, always (well -- there is one case where that's not true, such a node can end up under a return of a small struct later to be retyped by lowering, but this is rare).
These will be a significant impediment to actual, proper `TYP_STRUCT` local field nodes, as it would mean handling the case, everywhere in code, where the layout would be missing, that we don't want to allow in the first place.
This change fixed that by introducing the folding that the creation of these location nodes achieves at the address level, where we can choose the type of the location node arbitrarily and freely.
Along the way, `IsLocalAddrExpr` was fixed to not forget to look at zero-offset sequences attached to `ADDR`s, to avoid regressions.
We are expecting some positive diffs here, from the `IsLocalAddrExpr` change (enabling more precise VNs) and the folding change itself (the old code path would, ever so rarely, miss out on some things).
We are also expecting one small regression due to call retyping of reg-sized `IND struct(ADDR(LCL))` nodes losing the field sequence. I did not fix it because a) it was small, and rare, b) I do not want to permeate the zero-offset code any more than is strictly required.
[Diffs](https://dev.azure.com/dnceng/public/_build/results?buildId=1653792&view=results). | SingleAccretion | 2022-03-05T21:01:45Z | 2022-03-30T04:48:39Z | cbf3f9c43649035f59e626792f82e7b579c44865 | 2453f16807b85b279efc26d17d6f20de87801c09 | Do not generate `TYP_STRUCT` `LCL_FLD` nodes. Today, the morpher can fold `IND(struct ADDR(LCL))` into `LCL_FLD` `TYP_STRUCT` layout-less nodes, these end up being locations, always (well -- there is one case where that's not true, such a node can end up under a return of a small struct later to be retyped by lowering, but this is rare).
These will be a significant impediment to actual, proper `TYP_STRUCT` local field nodes, as it would mean handling the case, everywhere in code, where the layout would be missing, that we don't want to allow in the first place.
This change fixed that by introducing the folding that the creation of these location nodes achieves at the address level, where we can choose the type of the location node arbitrarily and freely.
Along the way, `IsLocalAddrExpr` was fixed to not forget to look at zero-offset sequences attached to `ADDR`s, to avoid regressions.
We are expecting some positive diffs here, from the `IsLocalAddrExpr` change (enabling more precise VNs) and the folding change itself (the old code path would, ever so rarely, miss out on some things).
We are also expecting one small regression due to call retyping of reg-sized `IND struct(ADDR(LCL))` nodes losing the field sequence. I did not fix it because a) it was small, and rare, b) I do not want to permeate the zero-offset code any more than is strictly required.
[Diffs](https://dev.azure.com/dnceng/public/_build/results?buildId=1653792&view=results). | ./src/tests/JIT/Regression/JitBlue/GitHub_26417/GitHub_26417.cs | // Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
using System;
using System.Runtime.CompilerServices;
class GitHub_26417
{
static int _a;
[MethodImplAttribute(MethodImplOptions.NoInlining)]
static void MyWriteLine(int v)
{
Console.WriteLine(v);
if (v == 0)
{
throw new Exception();
}
}
[MethodImplAttribute(MethodImplOptions.NoInlining)]
static void Test()
{
_a = 1;
while (_a == 1)
{
MyWriteLine(_a);
_a = 0;
}
}
static int Main()
{
int result = 100;
try {
Test();
}
catch (Exception)
{
Console.WriteLine("FAILED");
result = -1;
}
return result;
}
}
| // Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
using System;
using System.Runtime.CompilerServices;
class GitHub_26417
{
static int _a;
[MethodImplAttribute(MethodImplOptions.NoInlining)]
static void MyWriteLine(int v)
{
Console.WriteLine(v);
if (v == 0)
{
throw new Exception();
}
}
[MethodImplAttribute(MethodImplOptions.NoInlining)]
static void Test()
{
_a = 1;
while (_a == 1)
{
MyWriteLine(_a);
_a = 0;
}
}
static int Main()
{
int result = 100;
try {
Test();
}
catch (Exception)
{
Console.WriteLine("FAILED");
result = -1;
}
return result;
}
}
| -1 |
dotnet/runtime | 66,251 | Do not generate `TYP_STRUCT` `LCL_FLD` nodes | Today, the morpher can fold `IND(struct ADDR(LCL))` into `LCL_FLD` `TYP_STRUCT` layout-less nodes, these end up being locations, always (well -- there is one case where that's not true, such a node can end up under a return of a small struct later to be retyped by lowering, but this is rare).
These will be a significant impediment to actual, proper `TYP_STRUCT` local field nodes, as it would mean handling the case, everywhere in code, where the layout would be missing, that we don't want to allow in the first place.
This change fixed that by introducing the folding that the creation of these location nodes achieves at the address level, where we can choose the type of the location node arbitrarily and freely.
Along the way, `IsLocalAddrExpr` was fixed to not forget to look at zero-offset sequences attached to `ADDR`s, to avoid regressions.
We are expecting some positive diffs here, from the `IsLocalAddrExpr` change (enabling more precise VNs) and the folding change itself (the old code path would, ever so rarely, miss out on some things).
We are also expecting one small regression due to call retyping of reg-sized `IND struct(ADDR(LCL))` nodes losing the field sequence. I did not fix it because a) it was small, and rare, b) I do not want to permeate the zero-offset code any more than is strictly required.
[Diffs](https://dev.azure.com/dnceng/public/_build/results?buildId=1653792&view=results). | SingleAccretion | 2022-03-05T21:01:45Z | 2022-03-30T04:48:39Z | cbf3f9c43649035f59e626792f82e7b579c44865 | 2453f16807b85b279efc26d17d6f20de87801c09 | Do not generate `TYP_STRUCT` `LCL_FLD` nodes. Today, the morpher can fold `IND(struct ADDR(LCL))` into `LCL_FLD` `TYP_STRUCT` layout-less nodes, these end up being locations, always (well -- there is one case where that's not true, such a node can end up under a return of a small struct later to be retyped by lowering, but this is rare).
These will be a significant impediment to actual, proper `TYP_STRUCT` local field nodes, as it would mean handling the case, everywhere in code, where the layout would be missing, that we don't want to allow in the first place.
This change fixed that by introducing the folding that the creation of these location nodes achieves at the address level, where we can choose the type of the location node arbitrarily and freely.
Along the way, `IsLocalAddrExpr` was fixed to not forget to look at zero-offset sequences attached to `ADDR`s, to avoid regressions.
We are expecting some positive diffs here, from the `IsLocalAddrExpr` change (enabling more precise VNs) and the folding change itself (the old code path would, ever so rarely, miss out on some things).
We are also expecting one small regression due to call retyping of reg-sized `IND struct(ADDR(LCL))` nodes losing the field sequence. I did not fix it because a) it was small, and rare, b) I do not want to permeate the zero-offset code any more than is strictly required.
[Diffs](https://dev.azure.com/dnceng/public/_build/results?buildId=1653792&view=results). | ./src/tests/JIT/Methodical/FPtrunc/convr4a_cs_r.csproj | <Project Sdk="Microsoft.NET.Sdk">
<PropertyGroup>
<OutputType>Exe</OutputType>
<CLRTestPriority>1</CLRTestPriority>
</PropertyGroup>
<PropertyGroup>
<DebugType>None</DebugType>
<Optimize>False</Optimize>
</PropertyGroup>
<ItemGroup>
<Compile Include="convr4a.cs" />
</ItemGroup>
</Project>
| <Project Sdk="Microsoft.NET.Sdk">
<PropertyGroup>
<OutputType>Exe</OutputType>
<CLRTestPriority>1</CLRTestPriority>
</PropertyGroup>
<PropertyGroup>
<DebugType>None</DebugType>
<Optimize>False</Optimize>
</PropertyGroup>
<ItemGroup>
<Compile Include="convr4a.cs" />
</ItemGroup>
</Project>
| -1 |
dotnet/runtime | 66,251 | Do not generate `TYP_STRUCT` `LCL_FLD` nodes | Today, the morpher can fold `IND(struct ADDR(LCL))` into `LCL_FLD` `TYP_STRUCT` layout-less nodes, these end up being locations, always (well -- there is one case where that's not true, such a node can end up under a return of a small struct later to be retyped by lowering, but this is rare).
These will be a significant impediment to actual, proper `TYP_STRUCT` local field nodes, as it would mean handling the case, everywhere in code, where the layout would be missing, that we don't want to allow in the first place.
This change fixed that by introducing the folding that the creation of these location nodes achieves at the address level, where we can choose the type of the location node arbitrarily and freely.
Along the way, `IsLocalAddrExpr` was fixed to not forget to look at zero-offset sequences attached to `ADDR`s, to avoid regressions.
We are expecting some positive diffs here, from the `IsLocalAddrExpr` change (enabling more precise VNs) and the folding change itself (the old code path would, ever so rarely, miss out on some things).
We are also expecting one small regression due to call retyping of reg-sized `IND struct(ADDR(LCL))` nodes losing the field sequence. I did not fix it because a) it was small, and rare, b) I do not want to permeate the zero-offset code any more than is strictly required.
[Diffs](https://dev.azure.com/dnceng/public/_build/results?buildId=1653792&view=results). | SingleAccretion | 2022-03-05T21:01:45Z | 2022-03-30T04:48:39Z | cbf3f9c43649035f59e626792f82e7b579c44865 | 2453f16807b85b279efc26d17d6f20de87801c09 | Do not generate `TYP_STRUCT` `LCL_FLD` nodes. Today, the morpher can fold `IND(struct ADDR(LCL))` into `LCL_FLD` `TYP_STRUCT` layout-less nodes, these end up being locations, always (well -- there is one case where that's not true, such a node can end up under a return of a small struct later to be retyped by lowering, but this is rare).
These will be a significant impediment to actual, proper `TYP_STRUCT` local field nodes, as it would mean handling the case, everywhere in code, where the layout would be missing, that we don't want to allow in the first place.
This change fixed that by introducing the folding that the creation of these location nodes achieves at the address level, where we can choose the type of the location node arbitrarily and freely.
Along the way, `IsLocalAddrExpr` was fixed to not forget to look at zero-offset sequences attached to `ADDR`s, to avoid regressions.
We are expecting some positive diffs here, from the `IsLocalAddrExpr` change (enabling more precise VNs) and the folding change itself (the old code path would, ever so rarely, miss out on some things).
We are also expecting one small regression due to call retyping of reg-sized `IND struct(ADDR(LCL))` nodes losing the field sequence. I did not fix it because a) it was small, and rare, b) I do not want to permeate the zero-offset code any more than is strictly required.
[Diffs](https://dev.azure.com/dnceng/public/_build/results?buildId=1653792&view=results). | ./src/coreclr/tools/aot/ILCompiler.Compiler/Compiler/DependencyAnalysis/DynamicDependencyAttributeAlgorithm.cs | // Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
using System.Collections.Immutable;
using System.Collections.Generic;
using System.Diagnostics;
using System.Diagnostics.CodeAnalysis;
using System.Reflection.Metadata;
using Internal.TypeSystem;
using Internal.TypeSystem.Ecma;
using ILCompiler.Logging;
using static ILCompiler.Dataflow.DynamicallyAccessedMembersBinder;
using DependencyList = ILCompiler.DependencyAnalysisFramework.DependencyNodeCore<ILCompiler.DependencyAnalysis.NodeFactory>.DependencyList;
using MethodAttributes = System.Reflection.MethodAttributes;
using DynamicallyAccessedMemberTypes = ILCompiler.Dataflow.DynamicallyAccessedMemberTypes;
namespace ILCompiler.DependencyAnalysis
{
/// <summary>
/// Computes the list of dependencies from DynamicDependencyAttribute.
/// https://docs.microsoft.com/en-us/dotnet/api/system.diagnostics.codeanalysis.dynamicdependencyattribute
/// </summary>
internal class DynamicDependencyAttributeAlgorithm
{
public static void AddDependenciesDueToDynamicDependencyAttribute(ref DependencyList dependencies, NodeFactory factory, EcmaMethod method)
{
foreach (var attribute in method.GetDecodedCustomAttributes("System.Diagnostics.CodeAnalysis", "DynamicDependencyAttribute"))
{
IEnumerable<TypeSystemEntity> members;
static MetadataType Linkerify(TypeDesc type)
{
// IL Linker compatibility: illink will call Resolve() that will strip parameter types and genericness
// and operate on the definition.
while (type.IsParameterizedType)
type = ((ParameterizedType)type).ParameterType;
return (MetadataType)type.GetTypeDefinition();
}
// First figure out the list of members that this maps to.
// These are the ways to specify the members:
//
// * A string that contains a documentation signature
// * DynamicallyAccessedMembers enum
var fixedArgs = attribute.FixedArguments;
TypeDesc targetType;
if (fixedArgs.Length > 0 && fixedArgs[0].Value is string sigFromAttribute)
{
if (fixedArgs.Length == 1)
{
// DynamicDependencyAttribute(String)
targetType = method.OwningType;
}
else if (fixedArgs.Length == 2 && fixedArgs[1].Value is TypeDesc typeFromAttribute)
{
// DynamicDependencyAttribute(String, Type)
targetType = typeFromAttribute;
}
else if (fixedArgs.Length == 3 && fixedArgs[1].Value is string typeStringFromAttribute
&& fixedArgs[2].Value is string assemblyStringFromAttribute)
{
// DynamicDependencyAttribute(String, String, String)
ModuleDesc asm = factory.TypeSystemContext.ResolveAssembly(new System.Reflection.AssemblyName(assemblyStringFromAttribute), throwIfNotFound: false);
if (asm == null)
{
// _context.LogWarning($"Unresolved assembly '{dynamicDependency.AssemblyName}' in 'DynamicDependencyAttribute'", 2035, context);
continue;
}
targetType = DocumentationSignatureParser.GetTypeByDocumentationSignature((IAssemblyDesc)asm, typeStringFromAttribute);
if (targetType == null)
{
// _context.LogWarning ($"Unresolved type '{typeName}' in DynamicDependencyAttribute", 2036, context);
continue;
}
}
else
{
Debug.Fail("Did we introduce a new overload?");
continue;
}
members = DocumentationSignatureParser.GetMembersByDocumentationSignature(Linkerify(targetType), sigFromAttribute, acceptName: true);
}
else if (fixedArgs.Length > 0 && fixedArgs[0].Value is int memberTypesFromAttribute)
{
if (fixedArgs.Length == 2 && fixedArgs[1].Value is TypeDesc typeFromAttribute)
{
// DynamicDependencyAttribute(DynamicallyAccessedMemberTypes, Type)
targetType = typeFromAttribute;
}
else if (fixedArgs.Length == 3 && fixedArgs[1].Value is string typeStringFromAttribute
&& fixedArgs[2].Value is string assemblyStringFromAttribute)
{
// DynamicDependencyAttribute(DynamicallyAccessedMemberTypes, String, String)
ModuleDesc asm = factory.TypeSystemContext.ResolveAssembly(new System.Reflection.AssemblyName(assemblyStringFromAttribute), throwIfNotFound: false);
if (asm == null)
{
// _context.LogWarning($"Unresolved assembly '{dynamicDependency.AssemblyName}' in 'DynamicDependencyAttribute'", 2035, context);
continue;
}
targetType = DocumentationSignatureParser.GetTypeByDocumentationSignature((IAssemblyDesc)asm, typeStringFromAttribute);
if (targetType == null)
{
// _context.LogWarning ($"Unresolved type '{typeName}' in DynamicDependencyAttribute", 2036, context);
continue;
}
}
else
{
Debug.Fail("Did we introduce a new overload?");
continue;
}
members = Linkerify(targetType).GetDynamicallyAccessedMembers((DynamicallyAccessedMemberTypes)memberTypesFromAttribute);
}
else
{
Debug.Fail("Did we introduce a new overload?");
continue;
}
const string reason = "DynamicDependencyAttribute";
// Now root the discovered members
foreach (var member in members)
{
switch (member)
{
case MethodDesc m:
RootingHelpers.TryGetDependenciesForReflectedMethod(ref dependencies, factory, m, reason);
break;
case FieldDesc field:
RootingHelpers.TryGetDependenciesForReflectedField(ref dependencies, factory, field, reason);
break;
case MetadataType nestedType:
RootingHelpers.TryGetDependenciesForReflectedType(ref dependencies, factory, nestedType, reason);
break;
case PropertyPseudoDesc property:
if (property.GetMethod != null)
RootingHelpers.TryGetDependenciesForReflectedMethod(ref dependencies, factory, property.GetMethod, reason);
if (property.SetMethod != null)
RootingHelpers.TryGetDependenciesForReflectedMethod(ref dependencies, factory, property.SetMethod, reason);
break;
case EventPseudoDesc @event:
if (@event.AddMethod != null)
RootingHelpers.TryGetDependenciesForReflectedMethod(ref dependencies, factory, @event.AddMethod, reason);
if (@event.RemoveMethod != null)
RootingHelpers.TryGetDependenciesForReflectedMethod(ref dependencies, factory, @event.RemoveMethod, reason);
break;
default:
Debug.Fail(member.GetType().ToString());
break;
}
}
}
}
}
}
| // Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
using System.Collections.Immutable;
using System.Collections.Generic;
using System.Diagnostics;
using System.Diagnostics.CodeAnalysis;
using System.Reflection.Metadata;
using Internal.TypeSystem;
using Internal.TypeSystem.Ecma;
using ILCompiler.Logging;
using static ILCompiler.Dataflow.DynamicallyAccessedMembersBinder;
using DependencyList = ILCompiler.DependencyAnalysisFramework.DependencyNodeCore<ILCompiler.DependencyAnalysis.NodeFactory>.DependencyList;
using MethodAttributes = System.Reflection.MethodAttributes;
using DynamicallyAccessedMemberTypes = ILCompiler.Dataflow.DynamicallyAccessedMemberTypes;
namespace ILCompiler.DependencyAnalysis
{
/// <summary>
/// Computes the list of dependencies from DynamicDependencyAttribute.
/// https://docs.microsoft.com/en-us/dotnet/api/system.diagnostics.codeanalysis.dynamicdependencyattribute
/// </summary>
internal class DynamicDependencyAttributeAlgorithm
{
public static void AddDependenciesDueToDynamicDependencyAttribute(ref DependencyList dependencies, NodeFactory factory, EcmaMethod method)
{
foreach (var attribute in method.GetDecodedCustomAttributes("System.Diagnostics.CodeAnalysis", "DynamicDependencyAttribute"))
{
IEnumerable<TypeSystemEntity> members;
static MetadataType Linkerify(TypeDesc type)
{
// IL Linker compatibility: illink will call Resolve() that will strip parameter types and genericness
// and operate on the definition.
while (type.IsParameterizedType)
type = ((ParameterizedType)type).ParameterType;
return (MetadataType)type.GetTypeDefinition();
}
// First figure out the list of members that this maps to.
// These are the ways to specify the members:
//
// * A string that contains a documentation signature
// * DynamicallyAccessedMembers enum
var fixedArgs = attribute.FixedArguments;
TypeDesc targetType;
if (fixedArgs.Length > 0 && fixedArgs[0].Value is string sigFromAttribute)
{
if (fixedArgs.Length == 1)
{
// DynamicDependencyAttribute(String)
targetType = method.OwningType;
}
else if (fixedArgs.Length == 2 && fixedArgs[1].Value is TypeDesc typeFromAttribute)
{
// DynamicDependencyAttribute(String, Type)
targetType = typeFromAttribute;
}
else if (fixedArgs.Length == 3 && fixedArgs[1].Value is string typeStringFromAttribute
&& fixedArgs[2].Value is string assemblyStringFromAttribute)
{
// DynamicDependencyAttribute(String, String, String)
ModuleDesc asm = factory.TypeSystemContext.ResolveAssembly(new System.Reflection.AssemblyName(assemblyStringFromAttribute), throwIfNotFound: false);
if (asm == null)
{
// _context.LogWarning($"Unresolved assembly '{dynamicDependency.AssemblyName}' in 'DynamicDependencyAttribute'", 2035, context);
continue;
}
targetType = DocumentationSignatureParser.GetTypeByDocumentationSignature((IAssemblyDesc)asm, typeStringFromAttribute);
if (targetType == null)
{
// _context.LogWarning ($"Unresolved type '{typeName}' in DynamicDependencyAttribute", 2036, context);
continue;
}
}
else
{
Debug.Fail("Did we introduce a new overload?");
continue;
}
members = DocumentationSignatureParser.GetMembersByDocumentationSignature(Linkerify(targetType), sigFromAttribute, acceptName: true);
}
else if (fixedArgs.Length > 0 && fixedArgs[0].Value is int memberTypesFromAttribute)
{
if (fixedArgs.Length == 2 && fixedArgs[1].Value is TypeDesc typeFromAttribute)
{
// DynamicDependencyAttribute(DynamicallyAccessedMemberTypes, Type)
targetType = typeFromAttribute;
}
else if (fixedArgs.Length == 3 && fixedArgs[1].Value is string typeStringFromAttribute
&& fixedArgs[2].Value is string assemblyStringFromAttribute)
{
// DynamicDependencyAttribute(DynamicallyAccessedMemberTypes, String, String)
ModuleDesc asm = factory.TypeSystemContext.ResolveAssembly(new System.Reflection.AssemblyName(assemblyStringFromAttribute), throwIfNotFound: false);
if (asm == null)
{
// _context.LogWarning($"Unresolved assembly '{dynamicDependency.AssemblyName}' in 'DynamicDependencyAttribute'", 2035, context);
continue;
}
targetType = DocumentationSignatureParser.GetTypeByDocumentationSignature((IAssemblyDesc)asm, typeStringFromAttribute);
if (targetType == null)
{
// _context.LogWarning ($"Unresolved type '{typeName}' in DynamicDependencyAttribute", 2036, context);
continue;
}
}
else
{
Debug.Fail("Did we introduce a new overload?");
continue;
}
members = Linkerify(targetType).GetDynamicallyAccessedMembers((DynamicallyAccessedMemberTypes)memberTypesFromAttribute);
}
else
{
Debug.Fail("Did we introduce a new overload?");
continue;
}
const string reason = "DynamicDependencyAttribute";
// Now root the discovered members
foreach (var member in members)
{
switch (member)
{
case MethodDesc m:
RootingHelpers.TryGetDependenciesForReflectedMethod(ref dependencies, factory, m, reason);
break;
case FieldDesc field:
RootingHelpers.TryGetDependenciesForReflectedField(ref dependencies, factory, field, reason);
break;
case MetadataType nestedType:
RootingHelpers.TryGetDependenciesForReflectedType(ref dependencies, factory, nestedType, reason);
break;
case PropertyPseudoDesc property:
if (property.GetMethod != null)
RootingHelpers.TryGetDependenciesForReflectedMethod(ref dependencies, factory, property.GetMethod, reason);
if (property.SetMethod != null)
RootingHelpers.TryGetDependenciesForReflectedMethod(ref dependencies, factory, property.SetMethod, reason);
break;
case EventPseudoDesc @event:
if (@event.AddMethod != null)
RootingHelpers.TryGetDependenciesForReflectedMethod(ref dependencies, factory, @event.AddMethod, reason);
if (@event.RemoveMethod != null)
RootingHelpers.TryGetDependenciesForReflectedMethod(ref dependencies, factory, @event.RemoveMethod, reason);
break;
default:
Debug.Fail(member.GetType().ToString());
break;
}
}
}
}
}
}
| -1 |
dotnet/runtime | 66,251 | Do not generate `TYP_STRUCT` `LCL_FLD` nodes | Today, the morpher can fold `IND(struct ADDR(LCL))` into `LCL_FLD` `TYP_STRUCT` layout-less nodes, these end up being locations, always (well -- there is one case where that's not true, such a node can end up under a return of a small struct later to be retyped by lowering, but this is rare).
These will be a significant impediment to actual, proper `TYP_STRUCT` local field nodes, as it would mean handling the case, everywhere in code, where the layout would be missing, that we don't want to allow in the first place.
This change fixed that by introducing the folding that the creation of these location nodes achieves at the address level, where we can choose the type of the location node arbitrarily and freely.
Along the way, `IsLocalAddrExpr` was fixed to not forget to look at zero-offset sequences attached to `ADDR`s, to avoid regressions.
We are expecting some positive diffs here, from the `IsLocalAddrExpr` change (enabling more precise VNs) and the folding change itself (the old code path would, ever so rarely, miss out on some things).
We are also expecting one small regression due to call retyping of reg-sized `IND struct(ADDR(LCL))` nodes losing the field sequence. I did not fix it because a) it was small, and rare, b) I do not want to permeate the zero-offset code any more than is strictly required.
[Diffs](https://dev.azure.com/dnceng/public/_build/results?buildId=1653792&view=results). | SingleAccretion | 2022-03-05T21:01:45Z | 2022-03-30T04:48:39Z | cbf3f9c43649035f59e626792f82e7b579c44865 | 2453f16807b85b279efc26d17d6f20de87801c09 | Do not generate `TYP_STRUCT` `LCL_FLD` nodes. Today, the morpher can fold `IND(struct ADDR(LCL))` into `LCL_FLD` `TYP_STRUCT` layout-less nodes, these end up being locations, always (well -- there is one case where that's not true, such a node can end up under a return of a small struct later to be retyped by lowering, but this is rare).
These will be a significant impediment to actual, proper `TYP_STRUCT` local field nodes, as it would mean handling the case, everywhere in code, where the layout would be missing, that we don't want to allow in the first place.
This change fixed that by introducing the folding that the creation of these location nodes achieves at the address level, where we can choose the type of the location node arbitrarily and freely.
Along the way, `IsLocalAddrExpr` was fixed to not forget to look at zero-offset sequences attached to `ADDR`s, to avoid regressions.
We are expecting some positive diffs here, from the `IsLocalAddrExpr` change (enabling more precise VNs) and the folding change itself (the old code path would, ever so rarely, miss out on some things).
We are also expecting one small regression due to call retyping of reg-sized `IND struct(ADDR(LCL))` nodes losing the field sequence. I did not fix it because a) it was small, and rare, b) I do not want to permeate the zero-offset code any more than is strictly required.
[Diffs](https://dev.azure.com/dnceng/public/_build/results?buildId=1653792&view=results). | ./src/libraries/System.ServiceModel.Syndication/tests/System/ServiceModel/Syndication/SyndicationElementExtensionTests.cs | // Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
using System.Collections.Generic;
using System.IO;
using System.Runtime.Serialization;
using System.Xml;
using System.Xml.Linq;
using System.Xml.Schema;
using System.Xml.Serialization;
using Xunit;
namespace System.ServiceModel.Syndication.Tests
{
public class ServiceElementExtensionTests
{
[Fact]
public void Ctor_Reader()
{
var extension = new SyndicationElementExtension(new XElement("ExtensionObject", new XElement("Value", 10)).CreateReader());
Assert.Equal("ExtensionObject", extension.OuterName);
Assert.Empty(extension.OuterNamespace);
Assert.Equal(0, extension.GetObject<ExtensionObject>().Value);
Assert.Equal(0, extension.GetObject<ExtensionObject>(new DataContractSerializer(typeof(ExtensionObject))).Value);
Assert.Equal(10, extension.GetObject<ExtensionObject>(new XmlSerializer(typeof(ExtensionObject))).Value);
}
[Fact]
public void Ctor_ReaderNotAtStart_ReturnsExpected()
{
using (XmlReader reader = new XElement("parent", new XElement("ExtensionObject", new XElement("Value", 10))).CreateReader())
{
reader.MoveToElement();
var extension = new SyndicationElementExtension(new XElement("ExtensionObject", new XElement("Value", 10)).CreateReader());
Assert.Equal("ExtensionObject", extension.OuterName);
Assert.Empty(extension.OuterNamespace);
Assert.Equal(0, extension.GetObject<ExtensionObject>().Value);
Assert.Equal(0, extension.GetObject<ExtensionObject>(new DataContractSerializer(typeof(ExtensionObject))).Value);
Assert.Equal(10, extension.GetObject<ExtensionObject>(new XmlSerializer(typeof(ExtensionObject))).Value);
}
}
[Fact]
public void Ctor_EmptyReader_ThrowsXmlException()
{
using (var stringReader = new StringReader(""))
using (var reader = XmlReader.Create(stringReader))
{
Assert.Throws<XmlException>(() => new SyndicationElementExtension(reader));
}
}
[Fact]
public void Ctor_NullReader_ThrowsArgumentNullException()
{
AssertExtensions.Throws<ArgumentNullException>("xmlReader", () => new SyndicationElementExtension(null));
}
[Fact]
public void Ctor_DataContractExtension()
{
var extensionObject = new ExtensionObject { Value = 10 };
// Get OuterName first.
var extension = new SyndicationElementExtension(extensionObject);
Assert.Equal("ServiceElementExtensionTests.ExtensionObject", extension.OuterName);
Assert.Equal("http://schemas.datacontract.org/2004/07/System.ServiceModel.Syndication.Tests", extension.OuterNamespace);
Assert.Equal(10, extension.GetObject<ExtensionObject>().Value);
// Get OuterNamespace first.
extension = new SyndicationElementExtension(extensionObject);
Assert.Equal("http://schemas.datacontract.org/2004/07/System.ServiceModel.Syndication.Tests", extension.OuterNamespace);
Assert.Equal("ServiceElementExtensionTests.ExtensionObject", extension.OuterName);
Assert.Equal(10, extension.GetObject<ExtensionObject>().Value);
// Get Object first.
extension = new SyndicationElementExtension(extensionObject);
Assert.Equal(10, extension.GetObject<ExtensionObject>().Value);
Assert.Equal("ServiceElementExtensionTests.ExtensionObject", extension.OuterName);
Assert.Equal("http://schemas.datacontract.org/2004/07/System.ServiceModel.Syndication.Tests", extension.OuterNamespace);
}
public static IEnumerable<object[]> Ctor_XmlObjectSerializer_TestData()
{
yield return new object[] { null };
yield return new object[] { new DataContractSerializer(typeof(ExtensionObject)) };
}
[Theory]
[MemberData(nameof(Ctor_XmlObjectSerializer_TestData))]
public void Ctor_DataContractExtension_XmlObjectSerializer(XmlObjectSerializer serializer)
{
var extensionObject = new ExtensionObject { Value = 10 };
var extension = new SyndicationElementExtension(extensionObject, serializer);
Assert.Equal("ServiceElementExtensionTests.ExtensionObject", extension.OuterName);
Assert.Equal("http://schemas.datacontract.org/2004/07/System.ServiceModel.Syndication.Tests", extension.OuterNamespace);
Assert.Equal(10, extension.GetObject<ExtensionObject>().Value);
}
[Theory]
[InlineData(null, null)]
[InlineData("outerName", null)]
[InlineData(null, "")]
[InlineData(null, "outerNamespace")]
[InlineData("outerName", "")]
[InlineData("outerName", "outerNamespace")]
public void Ctor_String_String_Object(string outerName, string outerNamespace)
{
var extensionObject = new ExtensionObject { Value = 10 };
// Get OuterName first.
var extension = new SyndicationElementExtension(outerName, outerNamespace, extensionObject);
Assert.Equal(outerName ?? "ServiceElementExtensionTests.ExtensionObject", extension.OuterName);
Assert.Equal(outerName == null ? "http://schemas.datacontract.org/2004/07/System.ServiceModel.Syndication.Tests" : outerNamespace, extension.OuterNamespace);
Assert.Equal(10, extension.GetObject<ExtensionObject>().Value);
// Get OuterNamespace first.
extension = new SyndicationElementExtension(outerName, outerNamespace, extensionObject);
Assert.Equal(outerName == null ? "http://schemas.datacontract.org/2004/07/System.ServiceModel.Syndication.Tests" : outerNamespace, extension.OuterNamespace);
Assert.Equal(outerName ?? "ServiceElementExtensionTests.ExtensionObject", extension.OuterName);
Assert.Equal(10, extension.GetObject<ExtensionObject>().Value);
// Get Object first.
extension = new SyndicationElementExtension(outerName, outerNamespace, extensionObject);
Assert.Equal(10, extension.GetObject<ExtensionObject>().Value);
Assert.Equal(outerName ?? "ServiceElementExtensionTests.ExtensionObject", extension.OuterName);
Assert.Equal(outerName == null ? "http://schemas.datacontract.org/2004/07/System.ServiceModel.Syndication.Tests" : outerNamespace, extension.OuterNamespace);
}
[Theory]
[InlineData(null, null)]
[InlineData("outerName", null)]
[InlineData(null, "")]
[InlineData(null, "outerNamespace")]
[InlineData("outerName", "")]
[InlineData("outerName", "outerNamespace")]
public void Ctor_String_String_Object_XmlObjectSerializer(string outerName, string outerNamespace)
{
var extensionObject = new ExtensionObject { Value = 10 };
// Get OuterName first.
var extension = new SyndicationElementExtension(outerName, outerNamespace, extensionObject, new DataContractSerializer(typeof(ExtensionObject)));
Assert.Equal(outerName ?? "ServiceElementExtensionTests.ExtensionObject", extension.OuterName);
Assert.Equal(outerName == null ? "http://schemas.datacontract.org/2004/07/System.ServiceModel.Syndication.Tests" : outerNamespace, extension.OuterNamespace);
Assert.Equal(10, extension.GetObject<ExtensionObject>().Value);
// Get OuterNamespace first.
extension = new SyndicationElementExtension(outerName, outerNamespace, extensionObject, new DataContractSerializer(typeof(ExtensionObject)));
Assert.Equal(outerName == null ? "http://schemas.datacontract.org/2004/07/System.ServiceModel.Syndication.Tests" : outerNamespace, extension.OuterNamespace);
Assert.Equal(outerName ?? "ServiceElementExtensionTests.ExtensionObject", extension.OuterName);
Assert.Equal(10, extension.GetObject<ExtensionObject>().Value);
// Get Object first.
extension = new SyndicationElementExtension(outerName, outerNamespace, extensionObject, new DataContractSerializer(typeof(ExtensionObject)));
Assert.Equal(10, extension.GetObject<ExtensionObject>().Value);
Assert.Equal(outerName ?? "ServiceElementExtensionTests.ExtensionObject", extension.OuterName);
Assert.Equal(outerName == null ? "http://schemas.datacontract.org/2004/07/System.ServiceModel.Syndication.Tests" : outerNamespace, extension.OuterNamespace);
}
[Fact]
public void Ctor_EmptyOuterName_ThrowsArgumentException()
{
var extensionObject = new ExtensionObject { Value = 10 };
AssertExtensions.Throws<ArgumentException>("outerName", null, () => new SyndicationElementExtension("", "outerNamespace", extensionObject));
}
[Fact]
public void Ctor_NullDataContractExtension_ThrowsArgumentNullException()
{
AssertExtensions.Throws<ArgumentNullException>("dataContractExtension", () => new SyndicationElementExtension((object)null));
AssertExtensions.Throws<ArgumentNullException>("dataContractExtension", () => new SyndicationElementExtension(null, new DataContractSerializer(typeof(ExtensionObject))));
AssertExtensions.Throws<ArgumentNullException>("dataContractExtension", () => new SyndicationElementExtension("OuterName", "OuterNamespace", null));
}
public static IEnumerable<object[]> Ctor_XmlContractExtension_TestData()
{
yield return new object[] { null };
yield return new object[] { new XmlSerializer(typeof(ExtensionObject)) };
}
[Theory]
[MemberData(nameof(Ctor_XmlContractExtension_TestData))]
public void Ctor_XmlContractExtension_XmlSerializer(XmlSerializer serializer)
{
var extensionObject = new ExtensionObject { Value = 10 };
// Get OuterName first.
var extension = new SyndicationElementExtension(extensionObject, serializer);
Assert.Equal("ExtensionObject", extension.OuterName);
Assert.Empty(extension.OuterNamespace);
Assert.Equal(10, extension.GetObject<ExtensionObject>().Value);
// Get OuterNamespace first.
extension = new SyndicationElementExtension(extensionObject, serializer);
Assert.Empty(extension.OuterNamespace);
Assert.Equal("ExtensionObject", extension.OuterName);
Assert.Equal(10, extension.GetObject<ExtensionObject>().Value);
Assert.Equal(10, extension.GetObject<ExtensionObject>(new DataContractSerializer(typeof(ExtensionObject))).Value);
Assert.Equal(10, extension.GetObject<ExtensionObject>(new XmlSerializer(typeof(ExtensionObject))).Value);
// Get Object first.
extension = new SyndicationElementExtension(extensionObject, serializer);
Assert.Equal(10, extension.GetObject<ExtensionObject>().Value);
Assert.Equal("ExtensionObject", extension.OuterName);
Assert.Empty(extension.OuterNamespace);
}
[Fact]
public void Ctor_NullXmlContractExtension_ThrowsArgumentNullException()
{
AssertExtensions.Throws<ArgumentNullException>("xmlSerializerExtension", () => new SyndicationElementExtension(null, new XmlSerializer(typeof(ExtensionObject))));
}
[Fact]
public void GetReader_WithReader_ReturnsExpected()
{
var extension = new SyndicationElementExtension(new XElement("ExtensionObject", new XElement("Value", 10)).CreateReader());
XmlReader reader = extension.GetReader();
Assert.Equal(@"<ExtensionObject><Value>10</Value></ExtensionObject>", reader.ReadOuterXml());
}
[Fact]
public void GetReader_ObjectWithXmlObjectSerializer_ReturnsExpected()
{
var extensionObject = new ExtensionObject() { Value = 10 };
var extension = new SyndicationElementExtension(extensionObject, new DataContractSerializer(typeof(ExtensionObject)));
XmlReader reader = extension.GetReader();
Assert.Equal(@"<ServiceElementExtensionTests.ExtensionObject xmlns=""http://schemas.datacontract.org/2004/07/System.ServiceModel.Syndication.Tests"" xmlns:i=""http://www.w3.org/2001/XMLSchema-instance""><Value>10</Value></ServiceElementExtensionTests.ExtensionObject>", reader.ReadOuterXml());
}
[Fact]
[SkipOnTargetFramework(TargetFrameworkMonikers.NetFramework)]
public void GetReader_ObjectWithXmlSerializer_ReturnsExpected()
{
var extensionObject = new ExtensionObject() { Value = 10 };
var extension = new SyndicationElementExtension(extensionObject, new XmlSerializer(typeof(ExtensionObject)));
XmlReader reader = extension.GetReader();
Assert.Equal(@"<ExtensionObject xmlns:xsi=""http://www.w3.org/2001/XMLSchema-instance"" xmlns:xsd=""http://www.w3.org/2001/XMLSchema""><Value>10</Value></ExtensionObject>", reader.ReadOuterXml());
}
[Fact]
public void GetObject_NullXmlSerializer_ThrowsArgumentNullException()
{
var extension = new SyndicationElementExtension(new ExtensionObject());
Assert.Throws<ArgumentNullException>("serializer", () => extension.GetObject<ExtensionObject>((XmlSerializer)null));
}
[Fact]
public void GetObject_NullXmlObjectSerializer_ThrowsArgumentNullException()
{
var extension = new SyndicationElementExtension(new ExtensionObject());
Assert.Throws<ArgumentNullException>("serializer", () => extension.GetObject<ExtensionObject>((XmlObjectSerializer)null));
}
[Fact]
public void WriteTo_WithReader_ReturnsExpected()
{
var extension = new SyndicationElementExtension(new XElement("ExtensionObject", new XElement("Value", 10)).CreateReader());
using (var stringWriter = new StringWriter())
{
using (var writer = new XmlTextWriter(stringWriter))
{
extension.WriteTo(writer);
}
Assert.Equal(@"<ExtensionObject><Value>10</Value></ExtensionObject>", stringWriter.ToString());
}
}
[Fact]
public void WriteTo_ObjectWithXmlObjectSerializer_ReturnsExpected()
{
var extensionObject = new ExtensionObject { Value = 10 };
var extension = new SyndicationElementExtension(extensionObject, new DataContractSerializer(typeof(ExtensionObject)));
using (var stringWriter = new StringWriter())
{
using (var writer = new XmlTextWriter(stringWriter))
{
extension.WriteTo(writer);
}
Assert.Equal(@"<ServiceElementExtensionTests.ExtensionObject xmlns:i=""http://www.w3.org/2001/XMLSchema-instance"" xmlns=""http://schemas.datacontract.org/2004/07/System.ServiceModel.Syndication.Tests""><Value>10</Value></ServiceElementExtensionTests.ExtensionObject>", stringWriter.ToString());
}
}
[Fact]
[SkipOnTargetFramework(TargetFrameworkMonikers.NetFramework)]
public void WriteTo_ObjectWithXmlSerializer_ReturnsExpected()
{
var extensionObject = new ExtensionObject { Value = 10 };
var extension = new SyndicationElementExtension(extensionObject, new XmlSerializer(typeof(ExtensionObject)));
using (var stringWriter = new StringWriter())
{
using (var writer = new XmlTextWriter(stringWriter))
{
extension.WriteTo(writer);
}
Assert.Equal(@"<?xml version=""1.0"" encoding=""utf-16""?><ExtensionObject xmlns:xsi=""http://www.w3.org/2001/XMLSchema-instance"" xmlns:xsd=""http://www.w3.org/2001/XMLSchema""><Value>10</Value></ExtensionObject>", stringWriter.ToString());
}
}
[Fact]
public void WriteTo_NullWriter_ThrowsArgumentNullException()
{
var extension = new SyndicationElementExtension(new ExtensionObject());
Assert.Throws<ArgumentNullException>("writer", () => extension.WriteTo(null));
}
[Fact]
public void ISerializableIsAny_XmlObjectSerializer_OuterNameReturnsExpected()
{
var extensionObject = new XmlSerializabWithIsAnyNull();
var extension = new SyndicationElementExtension(extensionObject, new DataContractSerializer(typeof(XmlSerializabWithIsAnyNull)));
Assert.Equal("name", extension.OuterName);
Assert.Empty(extension.OuterNamespace);
Assert.NotNull(extension.GetObject<XmlSerializabWithIsAnyNull>(new DataContractSerializer(typeof(XmlSerializabWithIsAnyNull))));
}
[Fact]
public void ISerializableIsAny_XmlSerializer_OuterNameReturnsExpected()
{
var extensionObject = new XmlSerializabWithIsAny();
var extension = new SyndicationElementExtension(extensionObject, new XmlSerializer(typeof(XmlSerializabWithIsAny)));
Assert.Equal("name", extension.OuterName);
Assert.Empty(extension.OuterNamespace);
Assert.NotNull(extension.GetObject<XmlSerializabWithIsAny>(new XmlSerializer(typeof(XmlSerializabWithIsAny))));
}
[DataContract]
public class ExtensionObject
{
[DataMember]
public int Value { get; set; }
}
[XmlSchemaProvider("GetXsdType", IsAny = true)]
public class XmlSerializabWithIsAny : IXmlSerializable
{
public static XmlQualifiedName GetXsdType(XmlSchemaSet schemaSet) => new XmlQualifiedName("string", XmlSchema.Namespace);
public XmlSchema GetSchema() => null;
public void ReadXml(XmlReader reader) { }
public void WriteXml(XmlWriter writer)
{
writer.WriteElementString("name", "value");
}
}
[XmlSchemaProvider("GetXsdType", IsAny = true)]
public class XmlSerializabWithIsAnyNull : IXmlSerializable
{
public static XmlQualifiedName GetXsdType(XmlSchemaSet schemaSet) => null;
public XmlSchema GetSchema() => null;
public void ReadXml(XmlReader reader) { }
public void WriteXml(XmlWriter writer)
{
writer.WriteElementString("name", "value");
}
}
}
}
| // Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
using System.Collections.Generic;
using System.IO;
using System.Runtime.Serialization;
using System.Xml;
using System.Xml.Linq;
using System.Xml.Schema;
using System.Xml.Serialization;
using Xunit;
namespace System.ServiceModel.Syndication.Tests
{
public class ServiceElementExtensionTests
{
[Fact]
public void Ctor_Reader()
{
var extension = new SyndicationElementExtension(new XElement("ExtensionObject", new XElement("Value", 10)).CreateReader());
Assert.Equal("ExtensionObject", extension.OuterName);
Assert.Empty(extension.OuterNamespace);
Assert.Equal(0, extension.GetObject<ExtensionObject>().Value);
Assert.Equal(0, extension.GetObject<ExtensionObject>(new DataContractSerializer(typeof(ExtensionObject))).Value);
Assert.Equal(10, extension.GetObject<ExtensionObject>(new XmlSerializer(typeof(ExtensionObject))).Value);
}
[Fact]
public void Ctor_ReaderNotAtStart_ReturnsExpected()
{
using (XmlReader reader = new XElement("parent", new XElement("ExtensionObject", new XElement("Value", 10))).CreateReader())
{
reader.MoveToElement();
var extension = new SyndicationElementExtension(new XElement("ExtensionObject", new XElement("Value", 10)).CreateReader());
Assert.Equal("ExtensionObject", extension.OuterName);
Assert.Empty(extension.OuterNamespace);
Assert.Equal(0, extension.GetObject<ExtensionObject>().Value);
Assert.Equal(0, extension.GetObject<ExtensionObject>(new DataContractSerializer(typeof(ExtensionObject))).Value);
Assert.Equal(10, extension.GetObject<ExtensionObject>(new XmlSerializer(typeof(ExtensionObject))).Value);
}
}
[Fact]
public void Ctor_EmptyReader_ThrowsXmlException()
{
using (var stringReader = new StringReader(""))
using (var reader = XmlReader.Create(stringReader))
{
Assert.Throws<XmlException>(() => new SyndicationElementExtension(reader));
}
}
[Fact]
public void Ctor_NullReader_ThrowsArgumentNullException()
{
AssertExtensions.Throws<ArgumentNullException>("xmlReader", () => new SyndicationElementExtension(null));
}
[Fact]
public void Ctor_DataContractExtension()
{
var extensionObject = new ExtensionObject { Value = 10 };
// Get OuterName first.
var extension = new SyndicationElementExtension(extensionObject);
Assert.Equal("ServiceElementExtensionTests.ExtensionObject", extension.OuterName);
Assert.Equal("http://schemas.datacontract.org/2004/07/System.ServiceModel.Syndication.Tests", extension.OuterNamespace);
Assert.Equal(10, extension.GetObject<ExtensionObject>().Value);
// Get OuterNamespace first.
extension = new SyndicationElementExtension(extensionObject);
Assert.Equal("http://schemas.datacontract.org/2004/07/System.ServiceModel.Syndication.Tests", extension.OuterNamespace);
Assert.Equal("ServiceElementExtensionTests.ExtensionObject", extension.OuterName);
Assert.Equal(10, extension.GetObject<ExtensionObject>().Value);
// Get Object first.
extension = new SyndicationElementExtension(extensionObject);
Assert.Equal(10, extension.GetObject<ExtensionObject>().Value);
Assert.Equal("ServiceElementExtensionTests.ExtensionObject", extension.OuterName);
Assert.Equal("http://schemas.datacontract.org/2004/07/System.ServiceModel.Syndication.Tests", extension.OuterNamespace);
}
public static IEnumerable<object[]> Ctor_XmlObjectSerializer_TestData()
{
yield return new object[] { null };
yield return new object[] { new DataContractSerializer(typeof(ExtensionObject)) };
}
[Theory]
[MemberData(nameof(Ctor_XmlObjectSerializer_TestData))]
public void Ctor_DataContractExtension_XmlObjectSerializer(XmlObjectSerializer serializer)
{
var extensionObject = new ExtensionObject { Value = 10 };
var extension = new SyndicationElementExtension(extensionObject, serializer);
Assert.Equal("ServiceElementExtensionTests.ExtensionObject", extension.OuterName);
Assert.Equal("http://schemas.datacontract.org/2004/07/System.ServiceModel.Syndication.Tests", extension.OuterNamespace);
Assert.Equal(10, extension.GetObject<ExtensionObject>().Value);
}
[Theory]
[InlineData(null, null)]
[InlineData("outerName", null)]
[InlineData(null, "")]
[InlineData(null, "outerNamespace")]
[InlineData("outerName", "")]
[InlineData("outerName", "outerNamespace")]
public void Ctor_String_String_Object(string outerName, string outerNamespace)
{
var extensionObject = new ExtensionObject { Value = 10 };
// Get OuterName first.
var extension = new SyndicationElementExtension(outerName, outerNamespace, extensionObject);
Assert.Equal(outerName ?? "ServiceElementExtensionTests.ExtensionObject", extension.OuterName);
Assert.Equal(outerName == null ? "http://schemas.datacontract.org/2004/07/System.ServiceModel.Syndication.Tests" : outerNamespace, extension.OuterNamespace);
Assert.Equal(10, extension.GetObject<ExtensionObject>().Value);
// Get OuterNamespace first.
extension = new SyndicationElementExtension(outerName, outerNamespace, extensionObject);
Assert.Equal(outerName == null ? "http://schemas.datacontract.org/2004/07/System.ServiceModel.Syndication.Tests" : outerNamespace, extension.OuterNamespace);
Assert.Equal(outerName ?? "ServiceElementExtensionTests.ExtensionObject", extension.OuterName);
Assert.Equal(10, extension.GetObject<ExtensionObject>().Value);
// Get Object first.
extension = new SyndicationElementExtension(outerName, outerNamespace, extensionObject);
Assert.Equal(10, extension.GetObject<ExtensionObject>().Value);
Assert.Equal(outerName ?? "ServiceElementExtensionTests.ExtensionObject", extension.OuterName);
Assert.Equal(outerName == null ? "http://schemas.datacontract.org/2004/07/System.ServiceModel.Syndication.Tests" : outerNamespace, extension.OuterNamespace);
}
[Theory]
[InlineData(null, null)]
[InlineData("outerName", null)]
[InlineData(null, "")]
[InlineData(null, "outerNamespace")]
[InlineData("outerName", "")]
[InlineData("outerName", "outerNamespace")]
public void Ctor_String_String_Object_XmlObjectSerializer(string outerName, string outerNamespace)
{
var extensionObject = new ExtensionObject { Value = 10 };
// Get OuterName first.
var extension = new SyndicationElementExtension(outerName, outerNamespace, extensionObject, new DataContractSerializer(typeof(ExtensionObject)));
Assert.Equal(outerName ?? "ServiceElementExtensionTests.ExtensionObject", extension.OuterName);
Assert.Equal(outerName == null ? "http://schemas.datacontract.org/2004/07/System.ServiceModel.Syndication.Tests" : outerNamespace, extension.OuterNamespace);
Assert.Equal(10, extension.GetObject<ExtensionObject>().Value);
// Get OuterNamespace first.
extension = new SyndicationElementExtension(outerName, outerNamespace, extensionObject, new DataContractSerializer(typeof(ExtensionObject)));
Assert.Equal(outerName == null ? "http://schemas.datacontract.org/2004/07/System.ServiceModel.Syndication.Tests" : outerNamespace, extension.OuterNamespace);
Assert.Equal(outerName ?? "ServiceElementExtensionTests.ExtensionObject", extension.OuterName);
Assert.Equal(10, extension.GetObject<ExtensionObject>().Value);
// Get Object first.
extension = new SyndicationElementExtension(outerName, outerNamespace, extensionObject, new DataContractSerializer(typeof(ExtensionObject)));
Assert.Equal(10, extension.GetObject<ExtensionObject>().Value);
Assert.Equal(outerName ?? "ServiceElementExtensionTests.ExtensionObject", extension.OuterName);
Assert.Equal(outerName == null ? "http://schemas.datacontract.org/2004/07/System.ServiceModel.Syndication.Tests" : outerNamespace, extension.OuterNamespace);
}
[Fact]
public void Ctor_EmptyOuterName_ThrowsArgumentException()
{
var extensionObject = new ExtensionObject { Value = 10 };
AssertExtensions.Throws<ArgumentException>("outerName", null, () => new SyndicationElementExtension("", "outerNamespace", extensionObject));
}
[Fact]
public void Ctor_NullDataContractExtension_ThrowsArgumentNullException()
{
AssertExtensions.Throws<ArgumentNullException>("dataContractExtension", () => new SyndicationElementExtension((object)null));
AssertExtensions.Throws<ArgumentNullException>("dataContractExtension", () => new SyndicationElementExtension(null, new DataContractSerializer(typeof(ExtensionObject))));
AssertExtensions.Throws<ArgumentNullException>("dataContractExtension", () => new SyndicationElementExtension("OuterName", "OuterNamespace", null));
}
public static IEnumerable<object[]> Ctor_XmlContractExtension_TestData()
{
yield return new object[] { null };
yield return new object[] { new XmlSerializer(typeof(ExtensionObject)) };
}
[Theory]
[MemberData(nameof(Ctor_XmlContractExtension_TestData))]
public void Ctor_XmlContractExtension_XmlSerializer(XmlSerializer serializer)
{
var extensionObject = new ExtensionObject { Value = 10 };
// Get OuterName first.
var extension = new SyndicationElementExtension(extensionObject, serializer);
Assert.Equal("ExtensionObject", extension.OuterName);
Assert.Empty(extension.OuterNamespace);
Assert.Equal(10, extension.GetObject<ExtensionObject>().Value);
// Get OuterNamespace first.
extension = new SyndicationElementExtension(extensionObject, serializer);
Assert.Empty(extension.OuterNamespace);
Assert.Equal("ExtensionObject", extension.OuterName);
Assert.Equal(10, extension.GetObject<ExtensionObject>().Value);
Assert.Equal(10, extension.GetObject<ExtensionObject>(new DataContractSerializer(typeof(ExtensionObject))).Value);
Assert.Equal(10, extension.GetObject<ExtensionObject>(new XmlSerializer(typeof(ExtensionObject))).Value);
// Get Object first.
extension = new SyndicationElementExtension(extensionObject, serializer);
Assert.Equal(10, extension.GetObject<ExtensionObject>().Value);
Assert.Equal("ExtensionObject", extension.OuterName);
Assert.Empty(extension.OuterNamespace);
}
[Fact]
public void Ctor_NullXmlContractExtension_ThrowsArgumentNullException()
{
AssertExtensions.Throws<ArgumentNullException>("xmlSerializerExtension", () => new SyndicationElementExtension(null, new XmlSerializer(typeof(ExtensionObject))));
}
[Fact]
public void GetReader_WithReader_ReturnsExpected()
{
var extension = new SyndicationElementExtension(new XElement("ExtensionObject", new XElement("Value", 10)).CreateReader());
XmlReader reader = extension.GetReader();
Assert.Equal(@"<ExtensionObject><Value>10</Value></ExtensionObject>", reader.ReadOuterXml());
}
[Fact]
public void GetReader_ObjectWithXmlObjectSerializer_ReturnsExpected()
{
var extensionObject = new ExtensionObject() { Value = 10 };
var extension = new SyndicationElementExtension(extensionObject, new DataContractSerializer(typeof(ExtensionObject)));
XmlReader reader = extension.GetReader();
Assert.Equal(@"<ServiceElementExtensionTests.ExtensionObject xmlns=""http://schemas.datacontract.org/2004/07/System.ServiceModel.Syndication.Tests"" xmlns:i=""http://www.w3.org/2001/XMLSchema-instance""><Value>10</Value></ServiceElementExtensionTests.ExtensionObject>", reader.ReadOuterXml());
}
[Fact]
[SkipOnTargetFramework(TargetFrameworkMonikers.NetFramework)]
public void GetReader_ObjectWithXmlSerializer_ReturnsExpected()
{
var extensionObject = new ExtensionObject() { Value = 10 };
var extension = new SyndicationElementExtension(extensionObject, new XmlSerializer(typeof(ExtensionObject)));
XmlReader reader = extension.GetReader();
Assert.Equal(@"<ExtensionObject xmlns:xsi=""http://www.w3.org/2001/XMLSchema-instance"" xmlns:xsd=""http://www.w3.org/2001/XMLSchema""><Value>10</Value></ExtensionObject>", reader.ReadOuterXml());
}
[Fact]
public void GetObject_NullXmlSerializer_ThrowsArgumentNullException()
{
var extension = new SyndicationElementExtension(new ExtensionObject());
Assert.Throws<ArgumentNullException>("serializer", () => extension.GetObject<ExtensionObject>((XmlSerializer)null));
}
[Fact]
public void GetObject_NullXmlObjectSerializer_ThrowsArgumentNullException()
{
var extension = new SyndicationElementExtension(new ExtensionObject());
Assert.Throws<ArgumentNullException>("serializer", () => extension.GetObject<ExtensionObject>((XmlObjectSerializer)null));
}
[Fact]
public void WriteTo_WithReader_ReturnsExpected()
{
var extension = new SyndicationElementExtension(new XElement("ExtensionObject", new XElement("Value", 10)).CreateReader());
using (var stringWriter = new StringWriter())
{
using (var writer = new XmlTextWriter(stringWriter))
{
extension.WriteTo(writer);
}
Assert.Equal(@"<ExtensionObject><Value>10</Value></ExtensionObject>", stringWriter.ToString());
}
}
[Fact]
public void WriteTo_ObjectWithXmlObjectSerializer_ReturnsExpected()
{
var extensionObject = new ExtensionObject { Value = 10 };
var extension = new SyndicationElementExtension(extensionObject, new DataContractSerializer(typeof(ExtensionObject)));
using (var stringWriter = new StringWriter())
{
using (var writer = new XmlTextWriter(stringWriter))
{
extension.WriteTo(writer);
}
Assert.Equal(@"<ServiceElementExtensionTests.ExtensionObject xmlns:i=""http://www.w3.org/2001/XMLSchema-instance"" xmlns=""http://schemas.datacontract.org/2004/07/System.ServiceModel.Syndication.Tests""><Value>10</Value></ServiceElementExtensionTests.ExtensionObject>", stringWriter.ToString());
}
}
[Fact]
[SkipOnTargetFramework(TargetFrameworkMonikers.NetFramework)]
public void WriteTo_ObjectWithXmlSerializer_ReturnsExpected()
{
var extensionObject = new ExtensionObject { Value = 10 };
var extension = new SyndicationElementExtension(extensionObject, new XmlSerializer(typeof(ExtensionObject)));
using (var stringWriter = new StringWriter())
{
using (var writer = new XmlTextWriter(stringWriter))
{
extension.WriteTo(writer);
}
Assert.Equal(@"<?xml version=""1.0"" encoding=""utf-16""?><ExtensionObject xmlns:xsi=""http://www.w3.org/2001/XMLSchema-instance"" xmlns:xsd=""http://www.w3.org/2001/XMLSchema""><Value>10</Value></ExtensionObject>", stringWriter.ToString());
}
}
[Fact]
public void WriteTo_NullWriter_ThrowsArgumentNullException()
{
var extension = new SyndicationElementExtension(new ExtensionObject());
Assert.Throws<ArgumentNullException>("writer", () => extension.WriteTo(null));
}
[Fact]
public void ISerializableIsAny_XmlObjectSerializer_OuterNameReturnsExpected()
{
var extensionObject = new XmlSerializabWithIsAnyNull();
var extension = new SyndicationElementExtension(extensionObject, new DataContractSerializer(typeof(XmlSerializabWithIsAnyNull)));
Assert.Equal("name", extension.OuterName);
Assert.Empty(extension.OuterNamespace);
Assert.NotNull(extension.GetObject<XmlSerializabWithIsAnyNull>(new DataContractSerializer(typeof(XmlSerializabWithIsAnyNull))));
}
[Fact]
public void ISerializableIsAny_XmlSerializer_OuterNameReturnsExpected()
{
var extensionObject = new XmlSerializabWithIsAny();
var extension = new SyndicationElementExtension(extensionObject, new XmlSerializer(typeof(XmlSerializabWithIsAny)));
Assert.Equal("name", extension.OuterName);
Assert.Empty(extension.OuterNamespace);
Assert.NotNull(extension.GetObject<XmlSerializabWithIsAny>(new XmlSerializer(typeof(XmlSerializabWithIsAny))));
}
[DataContract]
public class ExtensionObject
{
[DataMember]
public int Value { get; set; }
}
[XmlSchemaProvider("GetXsdType", IsAny = true)]
public class XmlSerializabWithIsAny : IXmlSerializable
{
public static XmlQualifiedName GetXsdType(XmlSchemaSet schemaSet) => new XmlQualifiedName("string", XmlSchema.Namespace);
public XmlSchema GetSchema() => null;
public void ReadXml(XmlReader reader) { }
public void WriteXml(XmlWriter writer)
{
writer.WriteElementString("name", "value");
}
}
[XmlSchemaProvider("GetXsdType", IsAny = true)]
public class XmlSerializabWithIsAnyNull : IXmlSerializable
{
public static XmlQualifiedName GetXsdType(XmlSchemaSet schemaSet) => null;
public XmlSchema GetSchema() => null;
public void ReadXml(XmlReader reader) { }
public void WriteXml(XmlWriter writer)
{
writer.WriteElementString("name", "value");
}
}
}
}
| -1 |
dotnet/runtime | 66,251 | Do not generate `TYP_STRUCT` `LCL_FLD` nodes | Today, the morpher can fold `IND(struct ADDR(LCL))` into `LCL_FLD` `TYP_STRUCT` layout-less nodes, these end up being locations, always (well -- there is one case where that's not true, such a node can end up under a return of a small struct later to be retyped by lowering, but this is rare).
These will be a significant impediment to actual, proper `TYP_STRUCT` local field nodes, as it would mean handling the case, everywhere in code, where the layout would be missing, that we don't want to allow in the first place.
This change fixed that by introducing the folding that the creation of these location nodes achieves at the address level, where we can choose the type of the location node arbitrarily and freely.
Along the way, `IsLocalAddrExpr` was fixed to not forget to look at zero-offset sequences attached to `ADDR`s, to avoid regressions.
We are expecting some positive diffs here, from the `IsLocalAddrExpr` change (enabling more precise VNs) and the folding change itself (the old code path would, ever so rarely, miss out on some things).
We are also expecting one small regression due to call retyping of reg-sized `IND struct(ADDR(LCL))` nodes losing the field sequence. I did not fix it because a) it was small, and rare, b) I do not want to permeate the zero-offset code any more than is strictly required.
[Diffs](https://dev.azure.com/dnceng/public/_build/results?buildId=1653792&view=results). | SingleAccretion | 2022-03-05T21:01:45Z | 2022-03-30T04:48:39Z | cbf3f9c43649035f59e626792f82e7b579c44865 | 2453f16807b85b279efc26d17d6f20de87801c09 | Do not generate `TYP_STRUCT` `LCL_FLD` nodes. Today, the morpher can fold `IND(struct ADDR(LCL))` into `LCL_FLD` `TYP_STRUCT` layout-less nodes, these end up being locations, always (well -- there is one case where that's not true, such a node can end up under a return of a small struct later to be retyped by lowering, but this is rare).
These will be a significant impediment to actual, proper `TYP_STRUCT` local field nodes, as it would mean handling the case, everywhere in code, where the layout would be missing, that we don't want to allow in the first place.
This change fixed that by introducing the folding that the creation of these location nodes achieves at the address level, where we can choose the type of the location node arbitrarily and freely.
Along the way, `IsLocalAddrExpr` was fixed to not forget to look at zero-offset sequences attached to `ADDR`s, to avoid regressions.
We are expecting some positive diffs here, from the `IsLocalAddrExpr` change (enabling more precise VNs) and the folding change itself (the old code path would, ever so rarely, miss out on some things).
We are also expecting one small regression due to call retyping of reg-sized `IND struct(ADDR(LCL))` nodes losing the field sequence. I did not fix it because a) it was small, and rare, b) I do not want to permeate the zero-offset code any more than is strictly required.
[Diffs](https://dev.azure.com/dnceng/public/_build/results?buildId=1653792&view=results). | ./src/tests/JIT/Directed/refbyref/refpinned2iu.il | // Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
.assembly extern System.Console
{
.publickeytoken = (B0 3F 5F 7F 11 D5 0A 3A )
.ver 4:0:0:0
}
.assembly extern legacy library mscorlib {}
.assembly ref2iu {}
.method public static int32 Main()
{
.entrypoint
.maxstack 10
.locals init (object pinned V_0,
int8 V_1,
uint8 V_2,
int16 V_3,
uint16 V_4,
int32 V_5,
uint32 V_6,
int64 V_7,
uint64 V_8,
native int V_9,
native unsigned int V_10,
uint64 V_11)
ldc.i4 1
newarr object
stloc.0
ldloc.0
conv.i1
stloc V_1
ldloc.0
conv.u1
call void [mscorlib]System.GC::Collect()
stloc V_2
ldloc.0
conv.i2
stloc V_3
ldloc.0
conv.u2
stloc V_4
ldloc.0
conv.i4
stloc V_5
ldloc.0
conv.u4
stloc V_6
ldloc.0
conv.i8
stloc V_7
call void [mscorlib]System.GC::Collect()
ldloc.0
conv.u8
stloc V_8
ldloc.0
conv.i
stloc V_9
ldloc.0
call void [mscorlib]System.GC::Collect()
conv.u
call void [mscorlib]System.GC::Collect()
call void [mscorlib]System.GC::Collect()
call void [mscorlib]System.GC::Collect()
call void [mscorlib]System.GC::Collect()
call void [mscorlib]System.GC::Collect()
stloc V_10
ldc.i4 0x8000000
call void [mscorlib]System.GC::Collect()
conv.u8
ldloc V_1
conv.u8
add
ldloc V_2
conv.u8
add
ldloc V_3
conv.u8
add
ldloc V_4
conv.u8
add
ldloc V_5
conv.u8
add
ldloc V_6
conv.u8
add
ldloc V_7
conv.u8
add
ldloc V_8
conv.u8
add
ldloc V_9
conv.u8
add
ldloc V_10
conv.u8
add
stloc V_11
ldloc V_11
call void [System.Console]System.Console::WriteLine(uint64)
ldstr "PASSED"
call void [System.Console]System.Console::WriteLine(string)
ldc.i4 100
ret
}
| // Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
.assembly extern System.Console
{
.publickeytoken = (B0 3F 5F 7F 11 D5 0A 3A )
.ver 4:0:0:0
}
.assembly extern legacy library mscorlib {}
.assembly ref2iu {}
.method public static int32 Main()
{
.entrypoint
.maxstack 10
.locals init (object pinned V_0,
int8 V_1,
uint8 V_2,
int16 V_3,
uint16 V_4,
int32 V_5,
uint32 V_6,
int64 V_7,
uint64 V_8,
native int V_9,
native unsigned int V_10,
uint64 V_11)
ldc.i4 1
newarr object
stloc.0
ldloc.0
conv.i1
stloc V_1
ldloc.0
conv.u1
call void [mscorlib]System.GC::Collect()
stloc V_2
ldloc.0
conv.i2
stloc V_3
ldloc.0
conv.u2
stloc V_4
ldloc.0
conv.i4
stloc V_5
ldloc.0
conv.u4
stloc V_6
ldloc.0
conv.i8
stloc V_7
call void [mscorlib]System.GC::Collect()
ldloc.0
conv.u8
stloc V_8
ldloc.0
conv.i
stloc V_9
ldloc.0
call void [mscorlib]System.GC::Collect()
conv.u
call void [mscorlib]System.GC::Collect()
call void [mscorlib]System.GC::Collect()
call void [mscorlib]System.GC::Collect()
call void [mscorlib]System.GC::Collect()
call void [mscorlib]System.GC::Collect()
stloc V_10
ldc.i4 0x8000000
call void [mscorlib]System.GC::Collect()
conv.u8
ldloc V_1
conv.u8
add
ldloc V_2
conv.u8
add
ldloc V_3
conv.u8
add
ldloc V_4
conv.u8
add
ldloc V_5
conv.u8
add
ldloc V_6
conv.u8
add
ldloc V_7
conv.u8
add
ldloc V_8
conv.u8
add
ldloc V_9
conv.u8
add
ldloc V_10
conv.u8
add
stloc V_11
ldloc V_11
call void [System.Console]System.Console::WriteLine(uint64)
ldstr "PASSED"
call void [System.Console]System.Console::WriteLine(string)
ldc.i4 100
ret
}
| -1 |
dotnet/runtime | 66,250 | use the name of the header when throwing exception for invalid chars | Fixes #65136 | pedrobsaila | 2022-03-05T20:18:11Z | 2022-03-08T16:15:18Z | 8fe2d2b0bbb7bec841c97372528531c071b29c16 | fa5dcda23c681031b5d2bd68482702baef80b836 | use the name of the header when throwing exception for invalid chars. Fixes #65136 | ./src/libraries/Common/src/System/Net/HttpValidationHelpers.cs | // Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
namespace System.Net
{
internal static class HttpValidationHelpers
{
internal static string CheckBadHeaderNameChars(string name)
{
// First, check for absence of separators and spaces.
if (IsInvalidMethodOrHeaderString(name))
{
throw new ArgumentException(SR.net_WebHeaderInvalidHeaderChars, nameof(name));
}
// Second, check for non CTL ASCII-7 characters (32-126).
if (ContainsNonAsciiChars(name))
{
throw new ArgumentException(SR.net_WebHeaderInvalidHeaderChars, nameof(name));
}
return name;
}
internal static bool ContainsNonAsciiChars(string token)
{
for (int i = 0; i < token.Length; ++i)
{
if ((token[i] < 0x20) || (token[i] > 0x7e))
{
return true;
}
}
return false;
}
internal static bool IsValidToken(string token)
{
return (token.Length > 0)
&& !IsInvalidMethodOrHeaderString(token)
&& !ContainsNonAsciiChars(token);
}
private static readonly char[] s_httpTrimCharacters = new char[] { (char)0x09, (char)0xA, (char)0xB, (char)0xC, (char)0xD, (char)0x20 };
/// <summary>
/// Throws on invalid header value chars.
/// </summary>
public static string CheckBadHeaderValueChars(string? value)
{
if (string.IsNullOrEmpty(value))
{
// empty value is OK.
return string.Empty;
}
// Trim spaces from both ends.
value = value.Trim(s_httpTrimCharacters);
// First, check for correctly formed multi-line value.
// Second, check for absence of CTL characters.
int crlf = 0;
for (int i = 0; i < value.Length; ++i)
{
char c = (char)(0x000000ff & (uint)value[i]);
switch (crlf)
{
case 0:
if (c == '\r')
{
crlf = 1;
}
else if (c == '\n')
{
// Technically this is bad HTTP, but we want to be permissive in what we accept.
// It is important to note that it would be a breaking change to reject this.
crlf = 2;
}
else if (c == 127 || (c < ' ' && c != '\t'))
{
throw new ArgumentException(SR.net_WebHeaderInvalidControlChars, nameof(value));
}
break;
case 1:
if (c == '\n')
{
crlf = 2;
break;
}
throw new ArgumentException(SR.net_WebHeaderInvalidCRLFChars, nameof(value));
case 2:
if (c == ' ' || c == '\t')
{
crlf = 0;
break;
}
throw new ArgumentException(SR.net_WebHeaderInvalidControlChars, nameof(value));
}
}
if (crlf != 0)
{
throw new ArgumentException(SR.net_WebHeaderInvalidCRLFChars, nameof(value));
}
return value;
}
// Returns true if stringValue contains characters that cannot appear
// in a valid method-verb or HTTP header.
public static bool IsInvalidMethodOrHeaderString(string stringValue)
{
for (int i = 0; i < stringValue.Length; i++)
{
switch (stringValue[i])
{
case '(':
case ')':
case '<':
case '>':
case '@':
case ',':
case ';':
case ':':
case '\\':
case '"':
case '\'':
case '/':
case '[':
case ']':
case '?':
case '=':
case '{':
case '}':
case ' ':
case '\t':
case '\r':
case '\n':
return true;
default:
break;
}
}
return false;
}
}
}
| // Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
namespace System.Net
{
internal static class HttpValidationHelpers
{
internal static string CheckBadHeaderNameChars(string name)
{
// First, check for absence of separators and spaces.
if (IsInvalidMethodOrHeaderString(name))
{
throw new ArgumentException(string.Format(SR.net_WebHeaderInvalidHeaderChars, name), nameof(name));
}
// Second, check for non CTL ASCII-7 characters (32-126).
if (ContainsNonAsciiChars(name))
{
throw new ArgumentException(string.Format(SR.net_WebHeaderInvalidHeaderChars, name), nameof(name));
}
return name;
}
internal static bool ContainsNonAsciiChars(string token)
{
for (int i = 0; i < token.Length; ++i)
{
if ((token[i] < 0x20) || (token[i] > 0x7e))
{
return true;
}
}
return false;
}
internal static bool IsValidToken(string token)
{
return (token.Length > 0)
&& !IsInvalidMethodOrHeaderString(token)
&& !ContainsNonAsciiChars(token);
}
private static readonly char[] s_httpTrimCharacters = new char[] { (char)0x09, (char)0xA, (char)0xB, (char)0xC, (char)0xD, (char)0x20 };
/// <summary>
/// Throws on invalid header value chars.
/// </summary>
public static string CheckBadHeaderValueChars(string? value)
{
if (string.IsNullOrEmpty(value))
{
// empty value is OK.
return string.Empty;
}
// Trim spaces from both ends.
value = value.Trim(s_httpTrimCharacters);
// First, check for correctly formed multi-line value.
// Second, check for absence of CTL characters.
int crlf = 0;
for (int i = 0; i < value.Length; ++i)
{
char c = (char)(0x000000ff & (uint)value[i]);
switch (crlf)
{
case 0:
if (c == '\r')
{
crlf = 1;
}
else if (c == '\n')
{
// Technically this is bad HTTP, but we want to be permissive in what we accept.
// It is important to note that it would be a breaking change to reject this.
crlf = 2;
}
else if (c == 127 || (c < ' ' && c != '\t'))
{
throw new ArgumentException(SR.net_WebHeaderInvalidControlChars, nameof(value));
}
break;
case 1:
if (c == '\n')
{
crlf = 2;
break;
}
throw new ArgumentException(SR.net_WebHeaderInvalidCRLFChars, nameof(value));
case 2:
if (c == ' ' || c == '\t')
{
crlf = 0;
break;
}
throw new ArgumentException(SR.net_WebHeaderInvalidControlChars, nameof(value));
}
}
if (crlf != 0)
{
throw new ArgumentException(SR.net_WebHeaderInvalidCRLFChars, nameof(value));
}
return value;
}
// Returns true if stringValue contains characters that cannot appear
// in a valid method-verb or HTTP header.
public static bool IsInvalidMethodOrHeaderString(string stringValue)
{
for (int i = 0; i < stringValue.Length; i++)
{
switch (stringValue[i])
{
case '(':
case ')':
case '<':
case '>':
case '@':
case ',':
case ';':
case ':':
case '\\':
case '"':
case '\'':
case '/':
case '[':
case ']':
case '?':
case '=':
case '{':
case '}':
case ' ':
case '\t':
case '\r':
case '\n':
return true;
default:
break;
}
}
return false;
}
}
}
| 1 |
dotnet/runtime | 66,250 | use the name of the header when throwing exception for invalid chars | Fixes #65136 | pedrobsaila | 2022-03-05T20:18:11Z | 2022-03-08T16:15:18Z | 8fe2d2b0bbb7bec841c97372528531c071b29c16 | fa5dcda23c681031b5d2bd68482702baef80b836 | use the name of the header when throwing exception for invalid chars. Fixes #65136 | ./src/libraries/System.Net.Http/src/Resources/Strings.resx | <?xml version="1.0" encoding="utf-8"?>
<root>
<!--
Microsoft ResX Schema
Version 2.0
The primary goals of this format is to allow a simple XML format
that is mostly human readable. The generation and parsing of the
various data types are done through the TypeConverter classes
associated with the data types.
Example:
... ado.net/XML headers & schema ...
<resheader name="resmimetype">text/microsoft-resx</resheader>
<resheader name="version">2.0</resheader>
<resheader name="reader">System.Resources.ResXResourceReader, System.Windows.Forms, ...</resheader>
<resheader name="writer">System.Resources.ResXResourceWriter, System.Windows.Forms, ...</resheader>
<data name="Name1"><value>this is my long string</value><comment>this is a comment</comment></data>
<data name="Color1" type="System.Drawing.Color, System.Drawing">Blue</data>
<data name="Bitmap1" mimetype="application/x-microsoft.net.object.binary.base64">
<value>[base64 mime encoded serialized .NET Framework object]</value>
</data>
<data name="Icon1" type="System.Drawing.Icon, System.Drawing" mimetype="application/x-microsoft.net.object.bytearray.base64">
<value>[base64 mime encoded string representing a byte array form of the .NET Framework object]</value>
<comment>This is a comment</comment>
</data>
There are any number of "resheader" rows that contain simple
name/value pairs.
Each data row contains a name, and value. The row also contains a
type or mimetype. Type corresponds to a .NET class that support
text/value conversion through the TypeConverter architecture.
Classes that don't support this are serialized and stored with the
mimetype set.
The mimetype is used for serialized objects, and tells the
ResXResourceReader how to depersist the object. This is currently not
extensible. For a given mimetype the value must be set accordingly:
Note - application/x-microsoft.net.object.binary.base64 is the format
that the ResXResourceWriter will generate, however the reader can
read any of the formats listed below.
mimetype: application/x-microsoft.net.object.binary.base64
value : The object must be serialized with
: System.Runtime.Serialization.Formatters.Binary.BinaryFormatter
: and then encoded with base64 encoding.
mimetype: application/x-microsoft.net.object.soap.base64
value : The object must be serialized with
: System.Runtime.Serialization.Formatters.Soap.SoapFormatter
: and then encoded with base64 encoding.
mimetype: application/x-microsoft.net.object.bytearray.base64
value : The object must be serialized into a byte array
: using a System.ComponentModel.TypeConverter
: and then encoded with base64 encoding.
-->
<xsd:schema id="root" xmlns="" xmlns:xsd="http://www.w3.org/2001/XMLSchema" xmlns:msdata="urn:schemas-microsoft-com:xml-msdata">
<xsd:import namespace="http://www.w3.org/XML/1998/namespace" />
<xsd:element name="root" msdata:IsDataSet="true">
<xsd:complexType>
<xsd:choice maxOccurs="unbounded">
<xsd:element name="metadata">
<xsd:complexType>
<xsd:sequence>
<xsd:element name="value" type="xsd:string" minOccurs="0" />
</xsd:sequence>
<xsd:attribute name="name" use="required" type="xsd:string" />
<xsd:attribute name="type" type="xsd:string" />
<xsd:attribute name="mimetype" type="xsd:string" />
<xsd:attribute ref="xml:space" />
</xsd:complexType>
</xsd:element>
<xsd:element name="assembly">
<xsd:complexType>
<xsd:attribute name="alias" type="xsd:string" />
<xsd:attribute name="name" type="xsd:string" />
</xsd:complexType>
</xsd:element>
<xsd:element name="data">
<xsd:complexType>
<xsd:sequence>
<xsd:element name="value" type="xsd:string" minOccurs="0" msdata:Ordinal="1" />
<xsd:element name="comment" type="xsd:string" minOccurs="0" msdata:Ordinal="2" />
</xsd:sequence>
<xsd:attribute name="name" type="xsd:string" use="required" msdata:Ordinal="1" />
<xsd:attribute name="type" type="xsd:string" msdata:Ordinal="3" />
<xsd:attribute name="mimetype" type="xsd:string" msdata:Ordinal="4" />
<xsd:attribute ref="xml:space" />
</xsd:complexType>
</xsd:element>
<xsd:element name="resheader">
<xsd:complexType>
<xsd:sequence>
<xsd:element name="value" type="xsd:string" minOccurs="0" msdata:Ordinal="1" />
</xsd:sequence>
<xsd:attribute name="name" type="xsd:string" use="required" />
</xsd:complexType>
</xsd:element>
</xsd:choice>
</xsd:complexType>
</xsd:element>
</xsd:schema>
<resheader name="resmimetype">
<value>text/microsoft-resx</value>
</resheader>
<resheader name="version">
<value>2.0</value>
</resheader>
<resheader name="reader">
<value>System.Resources.ResXResourceReader, System.Windows.Forms, Version=4.0.0.0, Culture=neutral, PublicKeyToken=b77a5c561934e089</value>
</resheader>
<resheader name="writer">
<value>System.Resources.ResXResourceWriter, System.Windows.Forms, Version=4.0.0.0, Culture=neutral, PublicKeyToken=b77a5c561934e089</value>
</resheader>
<data name="net_securityprotocolnotsupported" xml:space="preserve">
<value>The requested security protocol is not supported.</value>
</data>
<data name="net_http_httpmethod_format_error" xml:space="preserve">
<value>The format of the HTTP method is invalid.</value>
</data>
<data name="net_http_reasonphrase_format_error" xml:space="preserve">
<value>The reason phrase must not contain new-line characters.</value>
</data>
<data name="net_http_copyto_array_too_small" xml:space="preserve">
<value>The number of elements is greater than the available space from arrayIndex to the end of the destination array.</value>
</data>
<data name="net_http_headers_not_found" xml:space="preserve">
<value>The given header was not found.</value>
</data>
<data name="net_http_headers_single_value_header" xml:space="preserve">
<value>Cannot add value because header '{0}' does not support multiple values.</value>
</data>
<data name="net_http_headers_invalid_header_name" xml:space="preserve">
<value>The header name format is invalid.</value>
</data>
<data name="net_http_headers_invalid_value" xml:space="preserve">
<value>The format of value '{0}' is invalid.</value>
</data>
<data name="net_http_headers_not_allowed_header_name" xml:space="preserve">
<value>Misused header name, '{0}'. Make sure request headers are used with HttpRequestMessage, response headers with HttpResponseMessage, and content headers with HttpContent objects.</value>
</data>
<data name="net_http_headers_invalid_host_header" xml:space="preserve">
<value>The specified value is not a valid 'Host' header string.</value>
</data>
<data name="net_http_headers_invalid_etag_name" xml:space="preserve">
<value>The specified value is not a valid quoted string.</value>
</data>
<data name="net_http_headers_invalid_range" xml:space="preserve">
<value>Invalid range. At least one of the two parameters must not be null.</value>
</data>
<data name="net_http_headers_no_newlines" xml:space="preserve">
<value>New-line characters are not allowed in header values.</value>
</data>
<data name="net_http_content_buffersize_exceeded" xml:space="preserve">
<value>Cannot write more bytes to the buffer than the configured maximum buffer size: {0}.</value>
</data>
<data name="net_http_content_no_task_returned" xml:space="preserve">
<value>The async operation did not return a System.Threading.Tasks.Task object.</value>
</data>
<data name="net_http_content_stream_already_read" xml:space="preserve">
<value>The stream was already consumed. It cannot be read again.</value>
</data>
<data name="net_http_content_readonly_stream" xml:space="preserve">
<value>The stream does not support writing.</value>
</data>
<data name="net_http_content_invalid_charset" xml:space="preserve">
<value>The character set provided in ContentType is invalid. Cannot read content as string using an invalid character set.</value>
</data>
<data name="net_http_content_stream_copy_error" xml:space="preserve">
<value>Error while copying content to a stream.</value>
</data>
<data name="net_http_content_read_as_stream_has_task" xml:space="preserve">
<value>The content's stream has already been retrieved via async ReadAsStreamAsync and cannot be subsequently accessed synchronously.</value>
</data>
<data name="net_http_argument_empty_string" xml:space="preserve">
<value>The value cannot be null or empty.</value>
</data>
<data name="net_http_client_request_already_sent" xml:space="preserve">
<value>The request message was already sent. Cannot send the same request message multiple times.</value>
</data>
<data name="net_http_operation_started" xml:space="preserve">
<value>This instance has already started one or more requests. Properties can only be modified before sending the first request.</value>
</data>
<data name="net_http_client_execution_error" xml:space="preserve">
<value>An error occurred while sending the request.</value>
</data>
<data name="net_http_client_absolute_baseaddress_required" xml:space="preserve">
<value>The base address must be an absolute URI.</value>
</data>
<data name="net_http_client_invalid_requesturi" xml:space="preserve">
<value>An invalid request URI was provided. Either the request URI must be an absolute URI or BaseAddress must be set.</value>
</data>
<data name="net_http_unsupported_requesturi_scheme" xml:space="preserve">
<value>The '{0}' scheme is not supported.</value>
</data>
<data name="net_http_parser_invalid_base64_string" xml:space="preserve">
<value>Value '{0}' is not a valid Base64 string. Error: {1}</value>
</data>
<data name="net_http_handler_noresponse" xml:space="preserve">
<value>Handler did not return a response message.</value>
</data>
<data name="net_http_handler_norequest" xml:space="preserve">
<value>A request message must be provided. It cannot be null.</value>
</data>
<data name="net_http_message_not_success_statuscode" xml:space="preserve">
<value>Response status code does not indicate success: {0} ({1}).</value>
</data>
<data name="net_http_content_field_too_long" xml:space="preserve">
<value>The field cannot be longer than {0} characters.</value>
</data>
<data name="net_http_log_headers_no_newlines" xml:space="preserve">
<value>Value for header '{0}' contains new-line characters. Value: '{1}'.</value>
</data>
<data name="net_http_log_headers_invalid_quality" xml:space="preserve">
<value>The 'q' value is invalid: '{0}'.</value>
</data>
<data name="net_http_handler_not_assigned" xml:space="preserve">
<value>The inner handler has not been assigned.</value>
</data>
<data name="net_http_invalid_enable_first" xml:space="preserve">
<value>The {0} property must be set to '{1}' to use this property.</value>
</data>
<data name="net_http_content_buffersize_limit" xml:space="preserve">
<value>Buffering more than {0} bytes is not supported.</value>
</data>
<data name="net_http_io_read" xml:space="preserve">
<value>The read operation failed, see inner exception.</value>
</data>
<data name="net_http_io_read_incomplete" xml:space="preserve">
<value>Unable to read data from the transport connection. The connection was closed before all data could be read. Expected {0} bytes, read {1} bytes.</value>
</data>
<data name="net_http_io_write" xml:space="preserve">
<value>The write operation failed, see inner exception.</value>
</data>
<data name="net_http_chunked_not_allowed_with_empty_content" xml:space="preserve">
<value>'Transfer-Encoding: chunked' header can not be used when content object is not specified.</value>
</data>
<data name="net_http_invalid_cookiecontainer" xml:space="preserve">
<value>When using CookieUsePolicy.UseSpecifiedCookieContainer, the CookieContainer property must not be null.</value>
</data>
<data name="net_http_invalid_proxyusepolicy" xml:space="preserve">
<value>When using a non-null Proxy, the WindowsProxyUsePolicy property must be set to WindowsProxyUsePolicy.UseCustomProxy.</value>
</data>
<data name="net_http_invalid_proxy" xml:space="preserve">
<value>When using WindowsProxyUsePolicy.UseCustomProxy, the Proxy property must not be null.</value>
</data>
<data name="net_http_value_must_be_greater_than" xml:space="preserve">
<value>The specified value must be greater than {0}.</value>
</data>
<data name="net_http_value_must_be_greater_than_or_equal" xml:space="preserve">
<value>The specified value '{0}' must be greater than or equal to '{1}'.</value>
</data>
<data name="net_cookie_attribute" xml:space="preserve">
<value>The '{0}'='{1}' part of the cookie is invalid.</value>
</data>
<data name="ArgumentOutOfRange_FileLengthTooBig" xml:space="preserve">
<value>Specified file length was too large for the file system.</value>
</data>
<data name="IO_FileExists_Name" xml:space="preserve">
<value>The file '{0}' already exists.</value>
</data>
<data name="IO_FileNotFound" xml:space="preserve">
<value>Unable to find the specified file.</value>
</data>
<data name="IO_FileNotFound_FileName" xml:space="preserve">
<value>Could not find file '{0}'.</value>
</data>
<data name="IO_PathNotFound_NoPathName" xml:space="preserve">
<value>Could not find a part of the path.</value>
</data>
<data name="IO_PathNotFound_Path" xml:space="preserve">
<value>Could not find a part of the path '{0}'.</value>
</data>
<data name="IO_PathTooLong" xml:space="preserve">
<value>The specified file name or path is too long, or a component of the specified path is too long.</value>
</data>
<data name="IO_SharingViolation_File" xml:space="preserve">
<value>The process cannot access the file '{0}' because it is being used by another process.</value>
</data>
<data name="IO_SharingViolation_NoFileName" xml:space="preserve">
<value>The process cannot access the file because it is being used by another process.</value>
</data>
<data name="UnauthorizedAccess_IODenied_NoPathName" xml:space="preserve">
<value>Access to the path is denied.</value>
</data>
<data name="UnauthorizedAccess_IODenied_Path" xml:space="preserve">
<value>Access to the path '{0}' is denied.</value>
</data>
<data name="net_http_username_empty_string" xml:space="preserve">
<value>The username for a credential object cannot be null or empty.</value>
</data>
<data name="net_http_no_concurrent_io_allowed" xml:space="preserve">
<value>The stream does not support concurrent I/O read or write operations.</value>
</data>
<data name="net_http_invalid_response" xml:space="preserve">
<value>The server returned an invalid or unrecognized response.</value>
</data>
<data name="net_http_request_content_length_mismatch" xml:space="preserve">
<value>Sent {0} request content bytes, but Content-Length promised {1}.</value>
</data>
<data name="net_http_invalid_response_premature_eof" xml:space="preserve">
<value>The response ended prematurely.</value>
</data>
<data name="net_http_invalid_response_missing_frame" xml:space="preserve">
<value>The response ended prematurely while waiting for the next frame from the server.</value>
</data>
<data name="net_http_invalid_response_premature_eof_bytecount" xml:space="preserve">
<value>The response ended prematurely, with at least {0} additional bytes expected.</value>
</data>
<data name="net_http_invalid_response_chunk_header_invalid" xml:space="preserve">
<value>Received chunk header length could not be parsed: '{0}'.</value>
</data>
<data name="net_http_invalid_response_chunk_extension_invalid" xml:space="preserve">
<value>Received an invalid chunk extension: '{0}'.</value>
</data>
<data name="net_http_invalid_response_chunk_terminator_invalid" xml:space="preserve">
<value>Received an invalid chunk terminator: '{0}'.</value>
</data>
<data name="net_http_invalid_response_status_line" xml:space="preserve">
<value>Received an invalid status line: '{0}'.</value>
</data>
<data name="net_http_invalid_response_status_code" xml:space="preserve">
<value>Received an invalid status code: '{0}'.</value>
</data>
<data name="net_http_invalid_response_status_reason" xml:space="preserve">
<value>Received status phrase could not be decoded with iso-8859-1: '{0}'.</value>
</data>
<data name="net_http_invalid_response_multiple_status_codes" xml:space="preserve">
<value>The response contained more than one status code.</value>
</data>
<data name="net_http_invalid_response_header_folder" xml:space="preserve">
<value>Received an invalid folded header.</value>
</data>
<data name="net_http_invalid_response_header_line" xml:space="preserve">
<value>Received an invalid header line: '{0}'.</value>
</data>
<data name="net_http_invalid_response_header_name" xml:space="preserve">
<value>Received an invalid header name: '{0}'.</value>
</data>
<data name="net_http_request_aborted" xml:space="preserve">
<value>The request was aborted.</value>
</data>
<data name="net_http_invalid_response_pseudo_header_in_trailer" xml:space="preserve">
<value>Received an HTTP/2 pseudo-header as a trailing header.</value>
</data>
<data name="net_http_buffer_insufficient_length" xml:space="preserve">
<value>The buffer was not long enough.</value>
</data>
<data name="net_http_response_headers_exceeded_length" xml:space="preserve">
<value>The HTTP response headers length exceeded the set limit of {0} bytes.</value>
</data>
<data name="ArgumentOutOfRange_NeedNonNegativeNum" xml:space="preserve">
<value>Non-negative number required.</value>
</data>
<data name="ArgumentOutOfRange_NeedPosNum" xml:space="preserve">
<value>Positive number required.</value>
</data>
<data name="NotSupported_UnreadableStream" xml:space="preserve">
<value>Stream does not support reading.</value>
</data>
<data name="NotSupported_UnwritableStream" xml:space="preserve">
<value>Stream does not support writing.</value>
</data>
<data name="ObjectDisposed_StreamClosed" xml:space="preserve">
<value>Cannot access a closed stream.</value>
</data>
<data name="net_http_invalid_proxy_scheme" xml:space="preserve">
<value>Only the 'http', 'socks4', 'socks4a' and 'socks5' schemes are allowed for proxies.</value>
</data>
<data name="net_http_request_invalid_char_encoding" xml:space="preserve">
<value>Request headers must contain only ASCII characters.</value>
</data>
<data name="net_http_ssl_connection_failed" xml:space="preserve">
<value>The SSL connection could not be established, see inner exception.</value>
</data>
<data name="net_http_unsupported_chunking" xml:space="preserve">
<value>HTTP 1.0 does not support chunking.</value>
</data>
<data name="net_http_unsupported_version" xml:space="preserve">
<value>Request HttpVersion 0.X is not supported. Use 1.0 or above.</value>
</data>
<data name="IO_SeekBeforeBegin" xml:space="preserve">
<value>An attempt was made to move the position before the beginning of the stream.</value>
</data>
<data name="net_ssl_app_protocols_invalid" xml:space="preserve">
<value>The application protocol list is invalid.</value>
</data>
<data name="net_ssl_http2_requires_tls12" xml:space="preserve">
<value>HTTP/2 requires TLS 1.2 or newer, but '{0}' was negotiated.</value>
</data>
<data name="IO_PathTooLong_Path" xml:space="preserve">
<value>The path '{0}' is too long, or a component of the specified path is too long.</value>
</data>
<data name="net_http_request_no_host" xml:space="preserve">
<value>CONNECT request must contain Host header.</value>
</data>
<data name="net_http_winhttp_error" xml:space="preserve">
<value>Error {0} calling {1}, '{2}'.</value>
</data>
<data name="net_http_http2_connection_error" xml:space="preserve">
<value>The HTTP/2 server sent invalid data on the connection. HTTP/2 error code '{0}' (0x{1}).</value>
</data>
<data name="net_http_http2_stream_error" xml:space="preserve">
<value>The HTTP/2 server reset the stream. HTTP/2 error code '{0}' (0x{1}).</value>
</data>
<data name="net_http_http2_connection_not_established" xml:space="preserve">
<value>An HTTP/2 connection could not be established because the server did not complete the HTTP/2 handshake.</value>
</data>
<data name="net_http_http2_invalidinitialstreamwindowsize" xml:space="preserve">
<value>The initial HTTP/2 stream window size must be between {0} and {1}.</value>
</data>
<data name="net_MethodNotImplementedException" xml:space="preserve">
<value>This method is not implemented by this class.</value>
</data>
<data name="event_OperationReturnedSomething" xml:space="preserve">
<value>{0} returned {1}.</value>
</data>
<data name="net_log_operation_failed_with_error" xml:space="preserve">
<value>{0} failed with error {1}.</value>
</data>
<data name="net_completed_result" xml:space="preserve">
<value>This operation cannot be performed on a completed asynchronous result object.</value>
</data>
<data name="net_invalid_enum" xml:space="preserve">
<value>The specified value is not valid in the '{0}' enumeration.</value>
</data>
<data name="net_auth_message_not_encrypted" xml:space="preserve">
<value>Protocol error: A received message contains a valid signature but it was not encrypted as required by the effective Protection Level.</value>
</data>
<data name="net_securitypackagesupport" xml:space="preserve">
<value>The requested security package is not supported.</value>
</data>
<data name="SSPIInvalidHandleType" xml:space="preserve">
<value>'{0}' is not a supported handle type.</value>
</data>
<data name="net_http_authconnectionfailure" xml:space="preserve">
<value>Authentication failed because the connection could not be reused.</value>
</data>
<data name="net_nego_server_not_supported" xml:space="preserve">
<value>Server implementation is not supported</value>
</data>
<data name="net_nego_protection_level_not_supported" xml:space="preserve">
<value>Requested protection level is not supported with the GSSAPI implementation currently installed.</value>
</data>
<data name="net_context_buffer_too_small" xml:space="preserve">
<value>Insufficient buffer space. Required: {0} Actual: {1}.</value>
</data>
<data name="net_gssapi_operation_failed_detailed" xml:space="preserve">
<value>GSSAPI operation failed with error - {0} ({1}).</value>
</data>
<data name="net_gssapi_operation_failed" xml:space="preserve">
<value>GSSAPI operation failed with status: {0} (Minor status: {1}).</value>
</data>
<data name="net_gssapi_operation_failed_detailed_majoronly" xml:space="preserve">
<value>GSSAPI operation failed with error - {0}.</value>
</data>
<data name="net_gssapi_operation_failed_majoronly" xml:space="preserve">
<value>GSSAPI operation failed with status: {0}.</value>
</data>
<data name="net_gssapi_ntlm_missing_plugin" xml:space="preserve">
<value>NTLM authentication requires the GSSAPI plugin 'gss-ntlmssp'.</value>
</data>
<data name="net_ntlm_not_possible_default_cred" xml:space="preserve">
<value>NTLM authentication is not possible with default credentials on this platform.</value>
</data>
<data name="net_nego_not_supported_empty_target_with_defaultcreds" xml:space="preserve">
<value>Target name should be non empty if default credentials are passed.</value>
</data>
<data name="net_http_hpack_huffman_decode_failed" xml:space="preserve">
<value>Huffman-coded literal string failed to decode.</value>
</data>
<data name="net_http_hpack_incomplete_header_block" xml:space="preserve">
<value>Incomplete header block received.</value>
</data>
<data name="net_http_hpack_late_dynamic_table_size_update" xml:space="preserve">
<value>Dynamic table size update received after beginning of header block.</value>
</data>
<data name="net_http_hpack_bad_integer" xml:space="preserve">
<value>HPACK integer exceeds limits or has an overlong encoding.</value>
</data>
<data name="net_http_disposed_while_in_use" xml:space="preserve">
<value>The object was disposed while operations were in progress.</value>
</data>
<data name="net_http_hpack_large_table_size_update" xml:space="preserve">
<value>Dynamic table size update to {0} bytes exceeds limit of {1} bytes.</value>
</data>
<data name="net_http_server_shutdown" xml:space="preserve">
<value>The server shut down the connection.</value>
</data>
<data name="net_http_hpack_invalid_index" xml:space="preserve">
<value>Invalid header index: {0} is outside of static table and no dynamic table entry found.</value>
</data>
<data name="net_http_hpack_unexpected_end" xml:space="preserve">
<value>End of headers reached with incomplete token.</value>
</data>
<data name="net_http_headers_exceeded_length" xml:space="preserve">
<value>The HTTP headers length exceeded the set limit of {0} bytes.</value>
</data>
<data name="net_http_invalid_header_name" xml:space="preserve">
<value>Received an invalid header name: '{0}'.</value>
</data>
<data name="net_http_http3_connection_error" xml:space="preserve">
<value>The HTTP/3 server sent invalid data on the connection. HTTP/3 error code '{0}' (0x{1}).</value>
</data>
<data name="net_http_retry_on_older_version" xml:space="preserve">
<value>The server is unable to process the request using the current HTTP version and indicates the request should be retried on an older HTTP version.</value>
</data>
<data name="net_http_content_write_larger_than_content_length" xml:space="preserve">
<value>Unable to write content to request stream; content would exceed Content-Length.</value>
</data>
<data name="net_http_qpack_no_dynamic_table" xml:space="preserve">
<value>The HTTP/3 server attempted to reference a dynamic table index that does not exist.</value>
</data>
<data name="net_http_request_timedout" xml:space="preserve">
<value>The request was canceled due to the configured HttpClient.Timeout of {0} seconds elapsing.</value>
</data>
<data name="net_http_connect_timedout" xml:space="preserve">
<value>A connection could not be established within the configured ConnectTimeout.</value>
</data>
<data name="net_quic_connectionaborted" xml:space="preserve">
<value>Connection aborted by peer ({0}).</value>
</data>
<data name="net_quic_operationaborted" xml:space="preserve">
<value>Operation aborted.</value>
</data>
<data name="net_quic_streamaborted" xml:space="preserve">
<value>Stream aborted by peer ({0}).</value>
</data>
<data name="net_http_missing_sync_implementation" xml:space="preserve">
<value>The synchronous method is not supported by '{0}'. If you're using a custom '{1}' and wish to use synchronous HTTP methods, you must override its '{2}' virtual method.</value>
</data>
<data name="net_http_http2_sync_not_supported" xml:space="preserve">
<value>The synchronous method is not supported by '{0}' for HTTP/2 or higher. Either use an asynchronous method or downgrade the request version to HTTP/1.1 or lower.</value>
</data>
<data name="net_http_upgrade_not_enabled_sync" xml:space="preserve">
<value>HTTP request version upgrade is not enabled for synchronous '{0}'. Do not use '{1}' version policy for synchronous HTTP methods.</value>
</data>
<data name="net_http_requested_version_cannot_establish" xml:space="preserve">
<value>Requesting HTTP version {0} with version policy {1} while unable to establish HTTP/{2} connection.</value>
</data>
<data name="net_http_requested_version_server_refused" xml:space="preserve">
<value>Requesting HTTP version {0} with version policy {1} while server offers only version fallback.</value>
</data>
<data name="net_http_exception_during_plaintext_filter" xml:space="preserve">
<value>An exception occurred while invoking the PlaintextStreamFilter.</value>
</data>
<data name="net_http_null_from_connect_callback" xml:space="preserve">
<value>The user-supplied ConnectCallback returned null.</value>
</data>
<data name="net_http_null_from_plaintext_filter" xml:space="preserve">
<value>The user-supplied PlaintextStreamFilter returned null.</value>
</data>
<data name="net_http_marshalling_response_promise_from_fetch" xml:space="preserve">
<value>Internal error marshalling the response Promise from `fetch`.</value>
</data>
<data name="net_http_synchronous_reads_not_supported" xml:space="preserve">
<value>Synchronous reads are not supported, use ReadAsync instead.</value>
</data>
<data name="net_socks_auth_failed" xml:space="preserve">
<value>Failed to authenticate with the SOCKS server.</value>
</data>
<data name="net_socks_bad_address_type" xml:space="preserve">
<value>SOCKS server returned an unknown address type.</value>
</data>
<data name="net_socks_connection_failed" xml:space="preserve">
<value>SOCKS server failed to connect to the destination.</value>
</data>
<data name="net_socks_ipv6_notsupported" xml:space="preserve">
<value>SOCKS4 does not support IPv6 addresses.</value>
</data>
<data name="net_socks_no_auth_method" xml:space="preserve">
<value>SOCKS server did not return a suitable authentication method.</value>
</data>
<data name="net_socks_no_ipv4_address" xml:space="preserve">
<value>Failed to resolve the destination host to an IPv4 address.</value>
</data>
<data name="net_socks_unexpected_version" xml:space="preserve">
<value>Unexpected SOCKS protocol version. Required {0}, got {1}.</value>
</data>
<data name="net_socks_string_too_long" xml:space="preserve">
<value>Encoding the {0} took more than the maximum of 255 bytes.</value>
</data>
<data name="net_socks_auth_required" xml:space="preserve">
<value>SOCKS server requested username & password authentication.</value>
</data>
<data name="net_http_proxy_tunnel_returned_failure_status_code" xml:space="preserve">
<value>The proxy tunnel request to proxy '{0}' failed with status code '{1}'."</value>
</data>
<data name="PlatformNotSupported_NetHttp" xml:space="preserve">
<value>System.Net.Http is not supported on this platform.</value>
</data>
</root>
| <?xml version="1.0" encoding="utf-8"?>
<root>
<!--
Microsoft ResX Schema
Version 2.0
The primary goals of this format is to allow a simple XML format
that is mostly human readable. The generation and parsing of the
various data types are done through the TypeConverter classes
associated with the data types.
Example:
... ado.net/XML headers & schema ...
<resheader name="resmimetype">text/microsoft-resx</resheader>
<resheader name="version">2.0</resheader>
<resheader name="reader">System.Resources.ResXResourceReader, System.Windows.Forms, ...</resheader>
<resheader name="writer">System.Resources.ResXResourceWriter, System.Windows.Forms, ...</resheader>
<data name="Name1"><value>this is my long string</value><comment>this is a comment</comment></data>
<data name="Color1" type="System.Drawing.Color, System.Drawing">Blue</data>
<data name="Bitmap1" mimetype="application/x-microsoft.net.object.binary.base64">
<value>[base64 mime encoded serialized .NET Framework object]</value>
</data>
<data name="Icon1" type="System.Drawing.Icon, System.Drawing" mimetype="application/x-microsoft.net.object.bytearray.base64">
<value>[base64 mime encoded string representing a byte array form of the .NET Framework object]</value>
<comment>This is a comment</comment>
</data>
There are any number of "resheader" rows that contain simple
name/value pairs.
Each data row contains a name, and value. The row also contains a
type or mimetype. Type corresponds to a .NET class that support
text/value conversion through the TypeConverter architecture.
Classes that don't support this are serialized and stored with the
mimetype set.
The mimetype is used for serialized objects, and tells the
ResXResourceReader how to depersist the object. This is currently not
extensible. For a given mimetype the value must be set accordingly:
Note - application/x-microsoft.net.object.binary.base64 is the format
that the ResXResourceWriter will generate, however the reader can
read any of the formats listed below.
mimetype: application/x-microsoft.net.object.binary.base64
value : The object must be serialized with
: System.Runtime.Serialization.Formatters.Binary.BinaryFormatter
: and then encoded with base64 encoding.
mimetype: application/x-microsoft.net.object.soap.base64
value : The object must be serialized with
: System.Runtime.Serialization.Formatters.Soap.SoapFormatter
: and then encoded with base64 encoding.
mimetype: application/x-microsoft.net.object.bytearray.base64
value : The object must be serialized into a byte array
: using a System.ComponentModel.TypeConverter
: and then encoded with base64 encoding.
-->
<xsd:schema id="root" xmlns="" xmlns:xsd="http://www.w3.org/2001/XMLSchema" xmlns:msdata="urn:schemas-microsoft-com:xml-msdata">
<xsd:import namespace="http://www.w3.org/XML/1998/namespace" />
<xsd:element name="root" msdata:IsDataSet="true">
<xsd:complexType>
<xsd:choice maxOccurs="unbounded">
<xsd:element name="metadata">
<xsd:complexType>
<xsd:sequence>
<xsd:element name="value" type="xsd:string" minOccurs="0" />
</xsd:sequence>
<xsd:attribute name="name" use="required" type="xsd:string" />
<xsd:attribute name="type" type="xsd:string" />
<xsd:attribute name="mimetype" type="xsd:string" />
<xsd:attribute ref="xml:space" />
</xsd:complexType>
</xsd:element>
<xsd:element name="assembly">
<xsd:complexType>
<xsd:attribute name="alias" type="xsd:string" />
<xsd:attribute name="name" type="xsd:string" />
</xsd:complexType>
</xsd:element>
<xsd:element name="data">
<xsd:complexType>
<xsd:sequence>
<xsd:element name="value" type="xsd:string" minOccurs="0" msdata:Ordinal="1" />
<xsd:element name="comment" type="xsd:string" minOccurs="0" msdata:Ordinal="2" />
</xsd:sequence>
<xsd:attribute name="name" type="xsd:string" use="required" msdata:Ordinal="1" />
<xsd:attribute name="type" type="xsd:string" msdata:Ordinal="3" />
<xsd:attribute name="mimetype" type="xsd:string" msdata:Ordinal="4" />
<xsd:attribute ref="xml:space" />
</xsd:complexType>
</xsd:element>
<xsd:element name="resheader">
<xsd:complexType>
<xsd:sequence>
<xsd:element name="value" type="xsd:string" minOccurs="0" msdata:Ordinal="1" />
</xsd:sequence>
<xsd:attribute name="name" type="xsd:string" use="required" />
</xsd:complexType>
</xsd:element>
</xsd:choice>
</xsd:complexType>
</xsd:element>
</xsd:schema>
<resheader name="resmimetype">
<value>text/microsoft-resx</value>
</resheader>
<resheader name="version">
<value>2.0</value>
</resheader>
<resheader name="reader">
<value>System.Resources.ResXResourceReader, System.Windows.Forms, Version=4.0.0.0, Culture=neutral, PublicKeyToken=b77a5c561934e089</value>
</resheader>
<resheader name="writer">
<value>System.Resources.ResXResourceWriter, System.Windows.Forms, Version=4.0.0.0, Culture=neutral, PublicKeyToken=b77a5c561934e089</value>
</resheader>
<data name="net_securityprotocolnotsupported" xml:space="preserve">
<value>The requested security protocol is not supported.</value>
</data>
<data name="net_http_httpmethod_format_error" xml:space="preserve">
<value>The format of the HTTP method is invalid.</value>
</data>
<data name="net_http_reasonphrase_format_error" xml:space="preserve">
<value>The reason phrase must not contain new-line characters.</value>
</data>
<data name="net_http_copyto_array_too_small" xml:space="preserve">
<value>The number of elements is greater than the available space from arrayIndex to the end of the destination array.</value>
</data>
<data name="net_http_headers_not_found" xml:space="preserve">
<value>The given header was not found.</value>
</data>
<data name="net_http_headers_single_value_header" xml:space="preserve">
<value>Cannot add value because header '{0}' does not support multiple values.</value>
</data>
<data name="net_http_headers_invalid_header_name" xml:space="preserve">
<value>The header name '{0}' has an invalid format.</value>
</data>
<data name="net_http_headers_invalid_value" xml:space="preserve">
<value>The format of value '{0}' is invalid.</value>
</data>
<data name="net_http_headers_not_allowed_header_name" xml:space="preserve">
<value>Misused header name, '{0}'. Make sure request headers are used with HttpRequestMessage, response headers with HttpResponseMessage, and content headers with HttpContent objects.</value>
</data>
<data name="net_http_headers_invalid_host_header" xml:space="preserve">
<value>The specified value is not a valid 'Host' header string.</value>
</data>
<data name="net_http_headers_invalid_etag_name" xml:space="preserve">
<value>The specified value is not a valid quoted string.</value>
</data>
<data name="net_http_headers_invalid_range" xml:space="preserve">
<value>Invalid range. At least one of the two parameters must not be null.</value>
</data>
<data name="net_http_headers_no_newlines" xml:space="preserve">
<value>New-line characters are not allowed in header values.</value>
</data>
<data name="net_http_content_buffersize_exceeded" xml:space="preserve">
<value>Cannot write more bytes to the buffer than the configured maximum buffer size: {0}.</value>
</data>
<data name="net_http_content_no_task_returned" xml:space="preserve">
<value>The async operation did not return a System.Threading.Tasks.Task object.</value>
</data>
<data name="net_http_content_stream_already_read" xml:space="preserve">
<value>The stream was already consumed. It cannot be read again.</value>
</data>
<data name="net_http_content_readonly_stream" xml:space="preserve">
<value>The stream does not support writing.</value>
</data>
<data name="net_http_content_invalid_charset" xml:space="preserve">
<value>The character set provided in ContentType is invalid. Cannot read content as string using an invalid character set.</value>
</data>
<data name="net_http_content_stream_copy_error" xml:space="preserve">
<value>Error while copying content to a stream.</value>
</data>
<data name="net_http_content_read_as_stream_has_task" xml:space="preserve">
<value>The content's stream has already been retrieved via async ReadAsStreamAsync and cannot be subsequently accessed synchronously.</value>
</data>
<data name="net_http_argument_empty_string" xml:space="preserve">
<value>The value cannot be null or empty.</value>
</data>
<data name="net_http_client_request_already_sent" xml:space="preserve">
<value>The request message was already sent. Cannot send the same request message multiple times.</value>
</data>
<data name="net_http_operation_started" xml:space="preserve">
<value>This instance has already started one or more requests. Properties can only be modified before sending the first request.</value>
</data>
<data name="net_http_client_execution_error" xml:space="preserve">
<value>An error occurred while sending the request.</value>
</data>
<data name="net_http_client_absolute_baseaddress_required" xml:space="preserve">
<value>The base address must be an absolute URI.</value>
</data>
<data name="net_http_client_invalid_requesturi" xml:space="preserve">
<value>An invalid request URI was provided. Either the request URI must be an absolute URI or BaseAddress must be set.</value>
</data>
<data name="net_http_unsupported_requesturi_scheme" xml:space="preserve">
<value>The '{0}' scheme is not supported.</value>
</data>
<data name="net_http_parser_invalid_base64_string" xml:space="preserve">
<value>Value '{0}' is not a valid Base64 string. Error: {1}</value>
</data>
<data name="net_http_handler_noresponse" xml:space="preserve">
<value>Handler did not return a response message.</value>
</data>
<data name="net_http_handler_norequest" xml:space="preserve">
<value>A request message must be provided. It cannot be null.</value>
</data>
<data name="net_http_message_not_success_statuscode" xml:space="preserve">
<value>Response status code does not indicate success: {0} ({1}).</value>
</data>
<data name="net_http_content_field_too_long" xml:space="preserve">
<value>The field cannot be longer than {0} characters.</value>
</data>
<data name="net_http_log_headers_no_newlines" xml:space="preserve">
<value>Value for header '{0}' contains new-line characters. Value: '{1}'.</value>
</data>
<data name="net_http_log_headers_invalid_quality" xml:space="preserve">
<value>The 'q' value is invalid: '{0}'.</value>
</data>
<data name="net_http_handler_not_assigned" xml:space="preserve">
<value>The inner handler has not been assigned.</value>
</data>
<data name="net_http_invalid_enable_first" xml:space="preserve">
<value>The {0} property must be set to '{1}' to use this property.</value>
</data>
<data name="net_http_content_buffersize_limit" xml:space="preserve">
<value>Buffering more than {0} bytes is not supported.</value>
</data>
<data name="net_http_io_read" xml:space="preserve">
<value>The read operation failed, see inner exception.</value>
</data>
<data name="net_http_io_read_incomplete" xml:space="preserve">
<value>Unable to read data from the transport connection. The connection was closed before all data could be read. Expected {0} bytes, read {1} bytes.</value>
</data>
<data name="net_http_io_write" xml:space="preserve">
<value>The write operation failed, see inner exception.</value>
</data>
<data name="net_http_chunked_not_allowed_with_empty_content" xml:space="preserve">
<value>'Transfer-Encoding: chunked' header can not be used when content object is not specified.</value>
</data>
<data name="net_http_invalid_cookiecontainer" xml:space="preserve">
<value>When using CookieUsePolicy.UseSpecifiedCookieContainer, the CookieContainer property must not be null.</value>
</data>
<data name="net_http_invalid_proxyusepolicy" xml:space="preserve">
<value>When using a non-null Proxy, the WindowsProxyUsePolicy property must be set to WindowsProxyUsePolicy.UseCustomProxy.</value>
</data>
<data name="net_http_invalid_proxy" xml:space="preserve">
<value>When using WindowsProxyUsePolicy.UseCustomProxy, the Proxy property must not be null.</value>
</data>
<data name="net_http_value_must_be_greater_than" xml:space="preserve">
<value>The specified value must be greater than {0}.</value>
</data>
<data name="net_http_value_must_be_greater_than_or_equal" xml:space="preserve">
<value>The specified value '{0}' must be greater than or equal to '{1}'.</value>
</data>
<data name="net_cookie_attribute" xml:space="preserve">
<value>The '{0}'='{1}' part of the cookie is invalid.</value>
</data>
<data name="ArgumentOutOfRange_FileLengthTooBig" xml:space="preserve">
<value>Specified file length was too large for the file system.</value>
</data>
<data name="IO_FileExists_Name" xml:space="preserve">
<value>The file '{0}' already exists.</value>
</data>
<data name="IO_FileNotFound" xml:space="preserve">
<value>Unable to find the specified file.</value>
</data>
<data name="IO_FileNotFound_FileName" xml:space="preserve">
<value>Could not find file '{0}'.</value>
</data>
<data name="IO_PathNotFound_NoPathName" xml:space="preserve">
<value>Could not find a part of the path.</value>
</data>
<data name="IO_PathNotFound_Path" xml:space="preserve">
<value>Could not find a part of the path '{0}'.</value>
</data>
<data name="IO_PathTooLong" xml:space="preserve">
<value>The specified file name or path is too long, or a component of the specified path is too long.</value>
</data>
<data name="IO_SharingViolation_File" xml:space="preserve">
<value>The process cannot access the file '{0}' because it is being used by another process.</value>
</data>
<data name="IO_SharingViolation_NoFileName" xml:space="preserve">
<value>The process cannot access the file because it is being used by another process.</value>
</data>
<data name="UnauthorizedAccess_IODenied_NoPathName" xml:space="preserve">
<value>Access to the path is denied.</value>
</data>
<data name="UnauthorizedAccess_IODenied_Path" xml:space="preserve">
<value>Access to the path '{0}' is denied.</value>
</data>
<data name="net_http_username_empty_string" xml:space="preserve">
<value>The username for a credential object cannot be null or empty.</value>
</data>
<data name="net_http_no_concurrent_io_allowed" xml:space="preserve">
<value>The stream does not support concurrent I/O read or write operations.</value>
</data>
<data name="net_http_invalid_response" xml:space="preserve">
<value>The server returned an invalid or unrecognized response.</value>
</data>
<data name="net_http_request_content_length_mismatch" xml:space="preserve">
<value>Sent {0} request content bytes, but Content-Length promised {1}.</value>
</data>
<data name="net_http_invalid_response_premature_eof" xml:space="preserve">
<value>The response ended prematurely.</value>
</data>
<data name="net_http_invalid_response_missing_frame" xml:space="preserve">
<value>The response ended prematurely while waiting for the next frame from the server.</value>
</data>
<data name="net_http_invalid_response_premature_eof_bytecount" xml:space="preserve">
<value>The response ended prematurely, with at least {0} additional bytes expected.</value>
</data>
<data name="net_http_invalid_response_chunk_header_invalid" xml:space="preserve">
<value>Received chunk header length could not be parsed: '{0}'.</value>
</data>
<data name="net_http_invalid_response_chunk_extension_invalid" xml:space="preserve">
<value>Received an invalid chunk extension: '{0}'.</value>
</data>
<data name="net_http_invalid_response_chunk_terminator_invalid" xml:space="preserve">
<value>Received an invalid chunk terminator: '{0}'.</value>
</data>
<data name="net_http_invalid_response_status_line" xml:space="preserve">
<value>Received an invalid status line: '{0}'.</value>
</data>
<data name="net_http_invalid_response_status_code" xml:space="preserve">
<value>Received an invalid status code: '{0}'.</value>
</data>
<data name="net_http_invalid_response_status_reason" xml:space="preserve">
<value>Received status phrase could not be decoded with iso-8859-1: '{0}'.</value>
</data>
<data name="net_http_invalid_response_multiple_status_codes" xml:space="preserve">
<value>The response contained more than one status code.</value>
</data>
<data name="net_http_invalid_response_header_folder" xml:space="preserve">
<value>Received an invalid folded header.</value>
</data>
<data name="net_http_invalid_response_header_line" xml:space="preserve">
<value>Received an invalid header line: '{0}'.</value>
</data>
<data name="net_http_invalid_response_header_name" xml:space="preserve">
<value>Received an invalid header name: '{0}'.</value>
</data>
<data name="net_http_request_aborted" xml:space="preserve">
<value>The request was aborted.</value>
</data>
<data name="net_http_invalid_response_pseudo_header_in_trailer" xml:space="preserve">
<value>Received an HTTP/2 pseudo-header as a trailing header.</value>
</data>
<data name="net_http_buffer_insufficient_length" xml:space="preserve">
<value>The buffer was not long enough.</value>
</data>
<data name="net_http_response_headers_exceeded_length" xml:space="preserve">
<value>The HTTP response headers length exceeded the set limit of {0} bytes.</value>
</data>
<data name="ArgumentOutOfRange_NeedNonNegativeNum" xml:space="preserve">
<value>Non-negative number required.</value>
</data>
<data name="ArgumentOutOfRange_NeedPosNum" xml:space="preserve">
<value>Positive number required.</value>
</data>
<data name="NotSupported_UnreadableStream" xml:space="preserve">
<value>Stream does not support reading.</value>
</data>
<data name="NotSupported_UnwritableStream" xml:space="preserve">
<value>Stream does not support writing.</value>
</data>
<data name="ObjectDisposed_StreamClosed" xml:space="preserve">
<value>Cannot access a closed stream.</value>
</data>
<data name="net_http_invalid_proxy_scheme" xml:space="preserve">
<value>Only the 'http', 'socks4', 'socks4a' and 'socks5' schemes are allowed for proxies.</value>
</data>
<data name="net_http_request_invalid_char_encoding" xml:space="preserve">
<value>Request headers must contain only ASCII characters.</value>
</data>
<data name="net_http_ssl_connection_failed" xml:space="preserve">
<value>The SSL connection could not be established, see inner exception.</value>
</data>
<data name="net_http_unsupported_chunking" xml:space="preserve">
<value>HTTP 1.0 does not support chunking.</value>
</data>
<data name="net_http_unsupported_version" xml:space="preserve">
<value>Request HttpVersion 0.X is not supported. Use 1.0 or above.</value>
</data>
<data name="IO_SeekBeforeBegin" xml:space="preserve">
<value>An attempt was made to move the position before the beginning of the stream.</value>
</data>
<data name="net_ssl_app_protocols_invalid" xml:space="preserve">
<value>The application protocol list is invalid.</value>
</data>
<data name="net_ssl_http2_requires_tls12" xml:space="preserve">
<value>HTTP/2 requires TLS 1.2 or newer, but '{0}' was negotiated.</value>
</data>
<data name="IO_PathTooLong_Path" xml:space="preserve">
<value>The path '{0}' is too long, or a component of the specified path is too long.</value>
</data>
<data name="net_http_request_no_host" xml:space="preserve">
<value>CONNECT request must contain Host header.</value>
</data>
<data name="net_http_winhttp_error" xml:space="preserve">
<value>Error {0} calling {1}, '{2}'.</value>
</data>
<data name="net_http_http2_connection_error" xml:space="preserve">
<value>The HTTP/2 server sent invalid data on the connection. HTTP/2 error code '{0}' (0x{1}).</value>
</data>
<data name="net_http_http2_stream_error" xml:space="preserve">
<value>The HTTP/2 server reset the stream. HTTP/2 error code '{0}' (0x{1}).</value>
</data>
<data name="net_http_http2_connection_not_established" xml:space="preserve">
<value>An HTTP/2 connection could not be established because the server did not complete the HTTP/2 handshake.</value>
</data>
<data name="net_http_http2_invalidinitialstreamwindowsize" xml:space="preserve">
<value>The initial HTTP/2 stream window size must be between {0} and {1}.</value>
</data>
<data name="net_MethodNotImplementedException" xml:space="preserve">
<value>This method is not implemented by this class.</value>
</data>
<data name="event_OperationReturnedSomething" xml:space="preserve">
<value>{0} returned {1}.</value>
</data>
<data name="net_log_operation_failed_with_error" xml:space="preserve">
<value>{0} failed with error {1}.</value>
</data>
<data name="net_completed_result" xml:space="preserve">
<value>This operation cannot be performed on a completed asynchronous result object.</value>
</data>
<data name="net_invalid_enum" xml:space="preserve">
<value>The specified value is not valid in the '{0}' enumeration.</value>
</data>
<data name="net_auth_message_not_encrypted" xml:space="preserve">
<value>Protocol error: A received message contains a valid signature but it was not encrypted as required by the effective Protection Level.</value>
</data>
<data name="net_securitypackagesupport" xml:space="preserve">
<value>The requested security package is not supported.</value>
</data>
<data name="SSPIInvalidHandleType" xml:space="preserve">
<value>'{0}' is not a supported handle type.</value>
</data>
<data name="net_http_authconnectionfailure" xml:space="preserve">
<value>Authentication failed because the connection could not be reused.</value>
</data>
<data name="net_nego_server_not_supported" xml:space="preserve">
<value>Server implementation is not supported</value>
</data>
<data name="net_nego_protection_level_not_supported" xml:space="preserve">
<value>Requested protection level is not supported with the GSSAPI implementation currently installed.</value>
</data>
<data name="net_context_buffer_too_small" xml:space="preserve">
<value>Insufficient buffer space. Required: {0} Actual: {1}.</value>
</data>
<data name="net_gssapi_operation_failed_detailed" xml:space="preserve">
<value>GSSAPI operation failed with error - {0} ({1}).</value>
</data>
<data name="net_gssapi_operation_failed" xml:space="preserve">
<value>GSSAPI operation failed with status: {0} (Minor status: {1}).</value>
</data>
<data name="net_gssapi_operation_failed_detailed_majoronly" xml:space="preserve">
<value>GSSAPI operation failed with error - {0}.</value>
</data>
<data name="net_gssapi_operation_failed_majoronly" xml:space="preserve">
<value>GSSAPI operation failed with status: {0}.</value>
</data>
<data name="net_gssapi_ntlm_missing_plugin" xml:space="preserve">
<value>NTLM authentication requires the GSSAPI plugin 'gss-ntlmssp'.</value>
</data>
<data name="net_ntlm_not_possible_default_cred" xml:space="preserve">
<value>NTLM authentication is not possible with default credentials on this platform.</value>
</data>
<data name="net_nego_not_supported_empty_target_with_defaultcreds" xml:space="preserve">
<value>Target name should be non empty if default credentials are passed.</value>
</data>
<data name="net_http_hpack_huffman_decode_failed" xml:space="preserve">
<value>Huffman-coded literal string failed to decode.</value>
</data>
<data name="net_http_hpack_incomplete_header_block" xml:space="preserve">
<value>Incomplete header block received.</value>
</data>
<data name="net_http_hpack_late_dynamic_table_size_update" xml:space="preserve">
<value>Dynamic table size update received after beginning of header block.</value>
</data>
<data name="net_http_hpack_bad_integer" xml:space="preserve">
<value>HPACK integer exceeds limits or has an overlong encoding.</value>
</data>
<data name="net_http_disposed_while_in_use" xml:space="preserve">
<value>The object was disposed while operations were in progress.</value>
</data>
<data name="net_http_hpack_large_table_size_update" xml:space="preserve">
<value>Dynamic table size update to {0} bytes exceeds limit of {1} bytes.</value>
</data>
<data name="net_http_server_shutdown" xml:space="preserve">
<value>The server shut down the connection.</value>
</data>
<data name="net_http_hpack_invalid_index" xml:space="preserve">
<value>Invalid header index: {0} is outside of static table and no dynamic table entry found.</value>
</data>
<data name="net_http_hpack_unexpected_end" xml:space="preserve">
<value>End of headers reached with incomplete token.</value>
</data>
<data name="net_http_headers_exceeded_length" xml:space="preserve">
<value>The HTTP headers length exceeded the set limit of {0} bytes.</value>
</data>
<data name="net_http_invalid_header_name" xml:space="preserve">
<value>Received an invalid header name: '{0}'.</value>
</data>
<data name="net_http_http3_connection_error" xml:space="preserve">
<value>The HTTP/3 server sent invalid data on the connection. HTTP/3 error code '{0}' (0x{1}).</value>
</data>
<data name="net_http_retry_on_older_version" xml:space="preserve">
<value>The server is unable to process the request using the current HTTP version and indicates the request should be retried on an older HTTP version.</value>
</data>
<data name="net_http_content_write_larger_than_content_length" xml:space="preserve">
<value>Unable to write content to request stream; content would exceed Content-Length.</value>
</data>
<data name="net_http_qpack_no_dynamic_table" xml:space="preserve">
<value>The HTTP/3 server attempted to reference a dynamic table index that does not exist.</value>
</data>
<data name="net_http_request_timedout" xml:space="preserve">
<value>The request was canceled due to the configured HttpClient.Timeout of {0} seconds elapsing.</value>
</data>
<data name="net_http_connect_timedout" xml:space="preserve">
<value>A connection could not be established within the configured ConnectTimeout.</value>
</data>
<data name="net_quic_connectionaborted" xml:space="preserve">
<value>Connection aborted by peer ({0}).</value>
</data>
<data name="net_quic_operationaborted" xml:space="preserve">
<value>Operation aborted.</value>
</data>
<data name="net_quic_streamaborted" xml:space="preserve">
<value>Stream aborted by peer ({0}).</value>
</data>
<data name="net_http_missing_sync_implementation" xml:space="preserve">
<value>The synchronous method is not supported by '{0}'. If you're using a custom '{1}' and wish to use synchronous HTTP methods, you must override its '{2}' virtual method.</value>
</data>
<data name="net_http_http2_sync_not_supported" xml:space="preserve">
<value>The synchronous method is not supported by '{0}' for HTTP/2 or higher. Either use an asynchronous method or downgrade the request version to HTTP/1.1 or lower.</value>
</data>
<data name="net_http_upgrade_not_enabled_sync" xml:space="preserve">
<value>HTTP request version upgrade is not enabled for synchronous '{0}'. Do not use '{1}' version policy for synchronous HTTP methods.</value>
</data>
<data name="net_http_requested_version_cannot_establish" xml:space="preserve">
<value>Requesting HTTP version {0} with version policy {1} while unable to establish HTTP/{2} connection.</value>
</data>
<data name="net_http_requested_version_server_refused" xml:space="preserve">
<value>Requesting HTTP version {0} with version policy {1} while server offers only version fallback.</value>
</data>
<data name="net_http_exception_during_plaintext_filter" xml:space="preserve">
<value>An exception occurred while invoking the PlaintextStreamFilter.</value>
</data>
<data name="net_http_null_from_connect_callback" xml:space="preserve">
<value>The user-supplied ConnectCallback returned null.</value>
</data>
<data name="net_http_null_from_plaintext_filter" xml:space="preserve">
<value>The user-supplied PlaintextStreamFilter returned null.</value>
</data>
<data name="net_http_marshalling_response_promise_from_fetch" xml:space="preserve">
<value>Internal error marshalling the response Promise from `fetch`.</value>
</data>
<data name="net_http_synchronous_reads_not_supported" xml:space="preserve">
<value>Synchronous reads are not supported, use ReadAsync instead.</value>
</data>
<data name="net_socks_auth_failed" xml:space="preserve">
<value>Failed to authenticate with the SOCKS server.</value>
</data>
<data name="net_socks_bad_address_type" xml:space="preserve">
<value>SOCKS server returned an unknown address type.</value>
</data>
<data name="net_socks_connection_failed" xml:space="preserve">
<value>SOCKS server failed to connect to the destination.</value>
</data>
<data name="net_socks_ipv6_notsupported" xml:space="preserve">
<value>SOCKS4 does not support IPv6 addresses.</value>
</data>
<data name="net_socks_no_auth_method" xml:space="preserve">
<value>SOCKS server did not return a suitable authentication method.</value>
</data>
<data name="net_socks_no_ipv4_address" xml:space="preserve">
<value>Failed to resolve the destination host to an IPv4 address.</value>
</data>
<data name="net_socks_unexpected_version" xml:space="preserve">
<value>Unexpected SOCKS protocol version. Required {0}, got {1}.</value>
</data>
<data name="net_socks_string_too_long" xml:space="preserve">
<value>Encoding the {0} took more than the maximum of 255 bytes.</value>
</data>
<data name="net_socks_auth_required" xml:space="preserve">
<value>SOCKS server requested username & password authentication.</value>
</data>
<data name="net_http_proxy_tunnel_returned_failure_status_code" xml:space="preserve">
<value>The proxy tunnel request to proxy '{0}' failed with status code '{1}'."</value>
</data>
<data name="PlatformNotSupported_NetHttp" xml:space="preserve">
<value>System.Net.Http is not supported on this platform.</value>
</data>
</root>
| 1 |
dotnet/runtime | 66,250 | use the name of the header when throwing exception for invalid chars | Fixes #65136 | pedrobsaila | 2022-03-05T20:18:11Z | 2022-03-08T16:15:18Z | 8fe2d2b0bbb7bec841c97372528531c071b29c16 | fa5dcda23c681031b5d2bd68482702baef80b836 | use the name of the header when throwing exception for invalid chars. Fixes #65136 | ./src/libraries/System.Net.Http/src/System/Net/Http/Headers/HttpHeaders.cs | // Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
using System.Collections;
using System.Collections.Generic;
using System.Diagnostics;
using System.Diagnostics.CodeAnalysis;
using System.Runtime.CompilerServices;
using System.Runtime.InteropServices;
using System.Text;
namespace System.Net.Http.Headers
{
/// <summary>
/// Key/value pairs of headers. The value is either a raw <see cref="string"/> or a <see cref="HttpHeaders.HeaderStoreItemInfo"/>.
/// We're using a custom type instead of <see cref="KeyValuePair{TKey, TValue}"/> because we need ref access to fields.
/// </summary>
internal struct HeaderEntry
{
public HeaderDescriptor Key;
public object Value;
public HeaderEntry(HeaderDescriptor key, object value)
{
Key = key;
Value = value;
}
}
public abstract class HttpHeaders : IEnumerable<KeyValuePair<string, IEnumerable<string>>>
{
// This type is used to store a collection of headers in 'headerStore':
// - A header can have multiple values.
// - A header can have an associated parser which is able to parse the raw string value into a strongly typed object.
// - If a header has an associated parser and the provided raw value can't be parsed, the value is considered
// invalid. Invalid values are stored if added using TryAddWithoutValidation(). If the value was added using Add(),
// Add() will throw FormatException.
// - Since parsing header values is expensive and users usually only care about a few headers, header values are
// lazily initialized.
//
// Given the properties above, a header value can have three states:
// - 'raw': The header value was added using TryAddWithoutValidation() and it wasn't parsed yet.
// - 'parsed': The header value was successfully parsed. It was either added using Add() where the value was parsed
// immediately, or if added using TryAddWithoutValidation() a user already accessed a property/method triggering the
// value to be parsed.
// - 'invalid': The header value was parsed, but parsing failed because the value is invalid. Storing invalid values
// allows users to still retrieve the value (by calling GetValues()), but it will not be exposed as strongly typed
// object. E.g. the client receives a response with the following header: 'Via: 1.1 proxy, invalid'
// - HttpHeaders.GetValues() will return "1.1 proxy", "invalid"
// - HttpResponseHeaders.Via collection will only contain one ViaHeaderValue object with value "1.1 proxy"
/// <summary>Either a <see cref="HeaderEntry"/> array or a Dictionary<<see cref="HeaderDescriptor"/>, <see cref="object"/>> </summary>
private object? _headerStore;
private int _count;
private readonly HttpHeaderType _allowedHeaderTypes;
private readonly HttpHeaderType _treatAsCustomHeaderTypes;
protected HttpHeaders()
: this(HttpHeaderType.All, HttpHeaderType.None)
{
}
internal HttpHeaders(HttpHeaderType allowedHeaderTypes, HttpHeaderType treatAsCustomHeaderTypes)
{
// Should be no overlap
Debug.Assert((allowedHeaderTypes & treatAsCustomHeaderTypes) == 0);
_allowedHeaderTypes = allowedHeaderTypes & ~HttpHeaderType.NonTrailing;
_treatAsCustomHeaderTypes = treatAsCustomHeaderTypes & ~HttpHeaderType.NonTrailing;
}
/// <summary>Gets a view of the contents of this headers collection that does not parse nor validate the data upon access.</summary>
public HttpHeadersNonValidated NonValidated => new HttpHeadersNonValidated(this);
public void Add(string name, string? value) => Add(GetHeaderDescriptor(name), value);
internal void Add(HeaderDescriptor descriptor, string? value)
{
// We don't use GetOrCreateHeaderInfo() here, since this would create a new header in the store. If parsing
// the value then throws, we would have to remove the header from the store again. So just get a
// HeaderStoreItemInfo object and try to parse the value. If it works, we'll add the header.
PrepareHeaderInfoForAdd(descriptor, out HeaderStoreItemInfo info, out bool addToStore);
ParseAndAddValue(descriptor, info, value);
// If we get here, then the value could be parsed correctly. If we created a new HeaderStoreItemInfo, add
// it to the store if we added at least one value.
if (addToStore && (info.ParsedValue != null))
{
Debug.Assert(!ContainsKey(descriptor));
AddEntryToStore(new HeaderEntry(descriptor, info));
}
}
public void Add(string name, IEnumerable<string?> values) => Add(GetHeaderDescriptor(name), values);
internal void Add(HeaderDescriptor descriptor, IEnumerable<string?> values!!)
{
PrepareHeaderInfoForAdd(descriptor, out HeaderStoreItemInfo info, out bool addToStore);
try
{
// Note that if the first couple of values are valid followed by an invalid value, the valid values
// will be added to the store before the exception for the invalid value is thrown.
foreach (string? value in values)
{
ParseAndAddValue(descriptor, info, value);
}
}
finally
{
// Even if one of the values was invalid, make sure we add the header for the valid ones. We need to be
// consistent here: If values get added to an _existing_ header, then all values until the invalid one
// get added. Same here: If multiple values get added to a _new_ header, make sure the header gets added
// with the valid values.
// However, if all values for a _new_ header were invalid, then don't add the header.
if (addToStore && (info.ParsedValue != null))
{
Debug.Assert(!ContainsKey(descriptor));
AddEntryToStore(new HeaderEntry(descriptor, info));
}
}
}
public bool TryAddWithoutValidation(string name, string? value) =>
TryGetHeaderDescriptor(name, out HeaderDescriptor descriptor) &&
TryAddWithoutValidation(descriptor, value);
internal bool TryAddWithoutValidation(HeaderDescriptor descriptor, string? value)
{
// Normalize null values to be empty values, which are allowed. If the user adds multiple
// null/empty values, all of them are added to the collection. This will result in delimiter-only
// values, e.g. adding two null-strings (or empty, or whitespace-only) results in "My-Header: ,".
value ??= string.Empty;
ref object? storeValueRef = ref GetValueRefOrAddDefault(descriptor);
object? currentValue = storeValueRef;
if (currentValue is null)
{
storeValueRef = value;
}
else
{
if (currentValue is not HeaderStoreItemInfo info)
{
// The header store contained a single raw string value, so promote it
// to being a HeaderStoreItemInfo and add to it.
Debug.Assert(currentValue is string);
storeValueRef = info = new HeaderStoreItemInfo() { RawValue = currentValue };
}
AddRawValue(info, value);
}
return true;
}
public bool TryAddWithoutValidation(string name, IEnumerable<string?> values) =>
TryGetHeaderDescriptor(name, out HeaderDescriptor descriptor) &&
TryAddWithoutValidation(descriptor, values);
internal bool TryAddWithoutValidation(HeaderDescriptor descriptor, IEnumerable<string?> values!!)
{
using IEnumerator<string?> enumerator = values.GetEnumerator();
if (enumerator.MoveNext())
{
TryAddWithoutValidation(descriptor, enumerator.Current);
if (enumerator.MoveNext())
{
ref object? storeValueRef = ref GetValueRefOrAddDefault(descriptor);
Debug.Assert(storeValueRef is not null);
object value = storeValueRef;
if (value is not HeaderStoreItemInfo info)
{
Debug.Assert(value is string);
storeValueRef = info = new HeaderStoreItemInfo { RawValue = value };
}
do
{
AddRawValue(info, enumerator.Current ?? string.Empty);
}
while (enumerator.MoveNext());
}
}
return true;
}
public IEnumerable<string> GetValues(string name) => GetValues(GetHeaderDescriptor(name));
internal IEnumerable<string> GetValues(HeaderDescriptor descriptor)
{
if (TryGetValues(descriptor, out IEnumerable<string>? values))
{
return values;
}
throw new InvalidOperationException(SR.net_http_headers_not_found);
}
public bool TryGetValues(string name, [NotNullWhen(true)] out IEnumerable<string>? values)
{
if (TryGetHeaderDescriptor(name, out HeaderDescriptor descriptor))
{
return TryGetValues(descriptor, out values);
}
values = null;
return false;
}
internal bool TryGetValues(HeaderDescriptor descriptor, [NotNullWhen(true)] out IEnumerable<string>? values)
{
if (TryGetAndParseHeaderInfo(descriptor, out HeaderStoreItemInfo? info))
{
values = GetStoreValuesAsStringArray(descriptor, info);
return true;
}
values = null;
return false;
}
public bool Contains(string name) => Contains(GetHeaderDescriptor(name));
internal bool Contains(HeaderDescriptor descriptor)
{
// We can't just call headerStore.ContainsKey() since after parsing the value the header may not exist
// anymore (if the value contains newline chars, we remove the header). So try to parse the
// header value.
return TryGetAndParseHeaderInfo(descriptor, out _);
}
public override string ToString()
{
// Return all headers as string similar to:
// HeaderName1: Value1, Value2
// HeaderName2: Value1
// ...
var vsb = new ValueStringBuilder(stackalloc char[512]);
foreach (HeaderEntry entry in GetEntries())
{
vsb.Append(entry.Key.Name);
vsb.Append(": ");
GetStoreValuesAsStringOrStringArray(entry.Key, entry.Value, out string? singleValue, out string[]? multiValue);
Debug.Assert(singleValue is not null ^ multiValue is not null);
if (singleValue is not null)
{
vsb.Append(singleValue);
}
else
{
// Note that if we get multiple values for a header that doesn't support multiple values, we'll
// just separate the values using a comma (default separator).
string? separator = entry.Key.Parser is HttpHeaderParser parser && parser.SupportsMultipleValues ? parser.Separator : HttpHeaderParser.DefaultSeparator;
Debug.Assert(multiValue is not null && multiValue.Length > 0);
vsb.Append(multiValue[0]);
for (int i = 1; i < multiValue.Length; i++)
{
vsb.Append(separator);
vsb.Append(multiValue[i]);
}
}
vsb.Append(Environment.NewLine);
}
return vsb.ToString();
}
internal string GetHeaderString(HeaderDescriptor descriptor)
{
if (TryGetHeaderValue(descriptor, out object? info))
{
GetStoreValuesAsStringOrStringArray(descriptor, info, out string? singleValue, out string[]? multiValue);
Debug.Assert(singleValue is not null ^ multiValue is not null);
if (singleValue is not null)
{
return singleValue;
}
// Note that if we get multiple values for a header that doesn't support multiple values, we'll
// just separate the values using a comma (default separator).
string? separator = descriptor.Parser != null && descriptor.Parser.SupportsMultipleValues ? descriptor.Parser.Separator : HttpHeaderParser.DefaultSeparator;
return string.Join(separator, multiValue!);
}
return string.Empty;
}
#region IEnumerable<KeyValuePair<string, IEnumerable<string>>> Members
public IEnumerator<KeyValuePair<string, IEnumerable<string>>> GetEnumerator() => _count == 0 ?
((IEnumerable<KeyValuePair<string, IEnumerable<string>>>)Array.Empty<KeyValuePair<string, IEnumerable<string>>>()).GetEnumerator() :
GetEnumeratorCore();
private IEnumerator<KeyValuePair<string, IEnumerable<string>>> GetEnumeratorCore()
{
HeaderEntry[]? entries = GetEntriesArray();
Debug.Assert(_count != 0 && entries is not null, "Caller should have validated the collection is not empty");
int count = _count;
for (int i = 0; i < count; i++)
{
HeaderEntry entry = entries[i];
if (entry.Value is not HeaderStoreItemInfo info)
{
// To retain consistent semantics, we need to upgrade a raw string to a HeaderStoreItemInfo
// during enumeration so that we can parse the raw value in order to a) return
// the correct set of parsed values, and b) update the instance for subsequent enumerations
// to reflect that parsing.
info = new HeaderStoreItemInfo() { RawValue = entry.Value };
if (EntriesAreLiveView)
{
entries[i].Value = info;
}
else
{
Debug.Assert(ContainsKey(entry.Key));
((Dictionary<HeaderDescriptor, object>)_headerStore!)[entry.Key] = info;
}
}
// Make sure we parse all raw values before returning the result. Note that this has to be
// done before we calculate the array length (next line): A raw value may contain a list of
// values.
if (!ParseRawHeaderValues(entry.Key, info))
{
// We saw an invalid header value (contains newline chars) and deleted it.
// If the HeaderEntry[] we are enumerating is the live header store, the entries have shifted.
if (EntriesAreLiveView)
{
i--;
count--;
}
}
else
{
string[] values = GetStoreValuesAsStringArray(entry.Key, info);
yield return new KeyValuePair<string, IEnumerable<string>>(entry.Key.Name, values);
}
}
}
#endregion
#region IEnumerable Members
Collections.IEnumerator Collections.IEnumerable.GetEnumerator() => GetEnumerator();
#endregion
internal void AddParsedValue(HeaderDescriptor descriptor, object value)
{
Debug.Assert(value != null);
Debug.Assert(descriptor.Parser != null, "Can't add parsed value if there is no parser available.");
HeaderStoreItemInfo info = GetOrCreateHeaderInfo(descriptor);
// If the current header has only one value, we can't add another value. The strongly typed property
// must not call AddParsedValue(), but SetParsedValue(). E.g. for headers like 'Date', 'Host'.
Debug.Assert(descriptor.Parser.SupportsMultipleValues, $"Header '{descriptor.Name}' doesn't support multiple values");
AddParsedValue(info, value);
}
internal void SetParsedValue(HeaderDescriptor descriptor, object value)
{
Debug.Assert(value != null);
Debug.Assert(descriptor.Parser != null, "Can't add parsed value if there is no parser available.");
// This method will first clear all values. This is used e.g. when setting the 'Date' or 'Host' header.
// i.e. headers not supporting collections.
HeaderStoreItemInfo info = GetOrCreateHeaderInfo(descriptor);
info.InvalidValue = null;
info.ParsedValue = null;
info.RawValue = null;
AddParsedValue(info, value);
}
internal void SetOrRemoveParsedValue(HeaderDescriptor descriptor, object? value)
{
if (value == null)
{
Remove(descriptor);
}
else
{
SetParsedValue(descriptor, value);
}
}
public bool Remove(string name) => Remove(GetHeaderDescriptor(name));
internal bool RemoveParsedValue(HeaderDescriptor descriptor, object value)
{
Debug.Assert(value != null);
// If we have a value for this header, then verify if we have a single value. If so, compare that
// value with 'item'. If we have a list of values, then remove 'item' from the list.
if (TryGetAndParseHeaderInfo(descriptor, out HeaderStoreItemInfo? info))
{
Debug.Assert(descriptor.Parser != null, "Can't add parsed value if there is no parser available.");
Debug.Assert(descriptor.Parser.SupportsMultipleValues,
"This method should not be used for single-value headers. Use Remove(string) instead.");
// If there is no entry, just return.
if (info.ParsedValue == null)
{
return false;
}
bool result = false;
IEqualityComparer? comparer = descriptor.Parser.Comparer;
List<object>? parsedValues = info.ParsedValue as List<object>;
if (parsedValues == null)
{
Debug.Assert(info.ParsedValue.GetType() == value.GetType(),
"Stored value does not have the same type as 'value'.");
if (AreEqual(value, info.ParsedValue, comparer))
{
info.ParsedValue = null;
result = true;
}
}
else
{
foreach (object item in parsedValues)
{
Debug.Assert(item.GetType() == value.GetType(),
"One of the stored values does not have the same type as 'value'.");
if (AreEqual(value, item, comparer))
{
// Remove 'item' rather than 'value', since the 'comparer' may consider two values
// equal even though the default obj.Equals() may not (e.g. if 'comparer' does
// case-insensitive comparison for strings, but string.Equals() is case-sensitive).
result = parsedValues.Remove(item);
break;
}
}
// If we removed the last item in a list, remove the list.
if (parsedValues.Count == 0)
{
info.ParsedValue = null;
}
}
// If there is no value for the header left, remove the header.
if (info.IsEmpty)
{
bool headerRemoved = Remove(descriptor);
Debug.Assert(headerRemoved, $"Existing header '{descriptor.Name}' couldn't be removed.");
}
return result;
}
return false;
}
internal bool ContainsParsedValue(HeaderDescriptor descriptor, object value)
{
Debug.Assert(value != null);
// If we have a value for this header, then verify if we have a single value. If so, compare that
// value with 'item'. If we have a list of values, then compare each item in the list with 'item'.
if (TryGetAndParseHeaderInfo(descriptor, out HeaderStoreItemInfo? info))
{
Debug.Assert(descriptor.Parser != null, "Can't add parsed value if there is no parser available.");
Debug.Assert(descriptor.Parser.SupportsMultipleValues,
"This method should not be used for single-value headers. Use equality comparer instead.");
// If there is no entry, just return.
if (info.ParsedValue == null)
{
return false;
}
List<object>? parsedValues = info.ParsedValue as List<object>;
IEqualityComparer? comparer = descriptor.Parser.Comparer;
if (parsedValues == null)
{
Debug.Assert(info.ParsedValue.GetType() == value.GetType(),
"Stored value does not have the same type as 'value'.");
return AreEqual(value, info.ParsedValue, comparer);
}
else
{
foreach (object item in parsedValues)
{
Debug.Assert(item.GetType() == value.GetType(),
"One of the stored values does not have the same type as 'value'.");
if (AreEqual(value, item, comparer))
{
return true;
}
}
return false;
}
}
return false;
}
internal virtual void AddHeaders(HttpHeaders sourceHeaders)
{
Debug.Assert(sourceHeaders != null);
Debug.Assert(GetType() == sourceHeaders.GetType(), "Can only copy headers from an instance of the same type.");
// Only add header values if they're not already set on the message. Note that we don't merge
// collections: If both the default headers and the message have set some values for a certain
// header, then we don't try to merge the values.
if (_count == 0 && sourceHeaders._headerStore is HeaderEntry[] sourceEntries)
{
// If the target collection is empty, we don't have to search for existing values
_count = sourceHeaders._count;
if (_headerStore is not HeaderEntry[] entries || entries.Length < _count)
{
entries = new HeaderEntry[sourceEntries.Length];
_headerStore = entries;
}
for (int i = 0; i < _count && i < sourceEntries.Length; i++)
{
HeaderEntry entry = sourceEntries[i];
if (entry.Value is HeaderStoreItemInfo info)
{
entry.Value = CloneHeaderInfo(entry.Key, info);
}
entries[i] = entry;
}
}
else
{
foreach (HeaderEntry entry in sourceHeaders.GetEntries())
{
ref object? storeValueRef = ref GetValueRefOrAddDefault(entry.Key);
if (storeValueRef is null)
{
object sourceValue = entry.Value;
if (sourceValue is HeaderStoreItemInfo info)
{
storeValueRef = CloneHeaderInfo(entry.Key, info);
}
else
{
Debug.Assert(sourceValue is string);
storeValueRef = sourceValue;
}
}
}
}
}
private HeaderStoreItemInfo CloneHeaderInfo(HeaderDescriptor descriptor, HeaderStoreItemInfo sourceInfo)
{
var destinationInfo = new HeaderStoreItemInfo
{
// Always copy raw values
RawValue = CloneStringHeaderInfoValues(sourceInfo.RawValue)
};
if (descriptor.Parser == null)
{
// We have custom header values. The parsed values are strings.
// Custom header values are always stored as string or list of strings.
Debug.Assert(sourceInfo.InvalidValue == null, "No invalid values expected for custom headers.");
destinationInfo.ParsedValue = CloneStringHeaderInfoValues(sourceInfo.ParsedValue);
}
else
{
// We have a parser, so we also have to copy invalid values and clone parsed values.
// Invalid values are always strings. Strings are immutable. So we only have to clone the
// collection (if there is one).
destinationInfo.InvalidValue = CloneStringHeaderInfoValues(sourceInfo.InvalidValue);
// Now clone and add parsed values (if any).
if (sourceInfo.ParsedValue != null)
{
List<object>? sourceValues = sourceInfo.ParsedValue as List<object>;
if (sourceValues == null)
{
CloneAndAddValue(destinationInfo, sourceInfo.ParsedValue);
}
else
{
foreach (object item in sourceValues)
{
CloneAndAddValue(destinationInfo, item);
}
}
}
}
return destinationInfo;
}
private static void CloneAndAddValue(HeaderStoreItemInfo destinationInfo, object source)
{
// We only have one value. Clone it and assign it to the store.
if (source is ICloneable cloneableValue)
{
AddParsedValue(destinationInfo, cloneableValue.Clone());
}
else
{
// If it doesn't implement ICloneable, it's a value type or an immutable type like String/Uri.
AddParsedValue(destinationInfo, source);
}
}
[return: NotNullIfNotNull("source")]
private static object? CloneStringHeaderInfoValues(object? source)
{
if (source == null)
{
return null;
}
List<object>? sourceValues = source as List<object>;
if (sourceValues == null)
{
// If we just have one value, return the reference to the string (strings are immutable so it's OK
// to use the reference).
return source;
}
else
{
// If we have a list of strings, create a new list and copy all strings to the new list.
return new List<object>(sourceValues);
}
}
private HeaderStoreItemInfo GetOrCreateHeaderInfo(HeaderDescriptor descriptor)
{
if (TryGetAndParseHeaderInfo(descriptor, out HeaderStoreItemInfo? info))
{
return info;
}
else
{
return CreateAndAddHeaderToStore(descriptor);
}
}
private HeaderStoreItemInfo CreateAndAddHeaderToStore(HeaderDescriptor descriptor)
{
Debug.Assert(!ContainsKey(descriptor));
// If we don't have the header in the store yet, add it now.
HeaderStoreItemInfo result = new HeaderStoreItemInfo();
// If the descriptor header type is in _treatAsCustomHeaderTypes, it must be converted to a custom header before calling this method
Debug.Assert((descriptor.HeaderType & _treatAsCustomHeaderTypes) == 0);
AddEntryToStore(new HeaderEntry(descriptor, result));
return result;
}
internal bool TryGetHeaderValue(HeaderDescriptor descriptor, [NotNullWhen(true)] out object? value)
{
ref object storeValueRef = ref GetValueRefOrNullRef(descriptor);
if (Unsafe.IsNullRef(ref storeValueRef))
{
value = null;
return false;
}
else
{
value = storeValueRef;
return true;
}
}
private bool TryGetAndParseHeaderInfo(HeaderDescriptor key, [NotNullWhen(true)] out HeaderStoreItemInfo? info)
{
ref object storeValueRef = ref GetValueRefOrNullRef(key);
if (!Unsafe.IsNullRef(ref storeValueRef))
{
object value = storeValueRef;
if (value is HeaderStoreItemInfo hsi)
{
info = hsi;
}
else
{
Debug.Assert(value is string);
storeValueRef = info = new HeaderStoreItemInfo() { RawValue = value };
}
return ParseRawHeaderValues(key, info);
}
info = null;
return false;
}
private bool ParseRawHeaderValues(HeaderDescriptor descriptor, HeaderStoreItemInfo info)
{
// Unlike TryGetHeaderInfo() this method tries to parse all non-validated header values (if any)
// before returning to the caller.
Debug.Assert(!info.IsEmpty);
if (info.RawValue != null)
{
List<string>? rawValues = info.RawValue as List<string>;
if (rawValues == null)
{
ParseSingleRawHeaderValue(descriptor, info);
}
else
{
ParseMultipleRawHeaderValues(descriptor, info, rawValues);
}
// At this point all values are either in info.ParsedValue, info.InvalidValue, or were removed since they
// contain newline chars. Reset RawValue.
info.RawValue = null;
// During parsing, we removed the value since it contains newline chars. Return false to indicate that
// this is an empty header.
if ((info.InvalidValue == null) && (info.ParsedValue == null))
{
// After parsing the raw value, no value is left because all values contain newline chars.
Debug.Assert(_count > 0);
Remove(descriptor);
return false;
}
}
return true;
}
private static void ParseMultipleRawHeaderValues(HeaderDescriptor descriptor, HeaderStoreItemInfo info, List<string> rawValues)
{
if (descriptor.Parser == null)
{
foreach (string rawValue in rawValues)
{
if (!ContainsNewLine(rawValue, descriptor))
{
AddParsedValue(info, rawValue);
}
}
}
else
{
foreach (string rawValue in rawValues)
{
if (!TryParseAndAddRawHeaderValue(descriptor, info, rawValue, true))
{
if (NetEventSource.Log.IsEnabled()) NetEventSource.Log.HeadersInvalidValue(descriptor.Name, rawValue);
}
}
}
}
private static void ParseSingleRawHeaderValue(HeaderDescriptor descriptor, HeaderStoreItemInfo info)
{
string? rawValue = info.RawValue as string;
Debug.Assert(rawValue != null, "RawValue must either be List<string> or string.");
if (descriptor.Parser == null)
{
if (!ContainsNewLine(rawValue, descriptor))
{
AddParsedValue(info, rawValue);
}
}
else
{
if (!TryParseAndAddRawHeaderValue(descriptor, info, rawValue, true))
{
if (NetEventSource.Log.IsEnabled()) NetEventSource.Log.HeadersInvalidValue(descriptor.Name, rawValue);
}
}
}
// See Add(name, string)
internal bool TryParseAndAddValue(HeaderDescriptor descriptor, string? value)
{
// We don't use GetOrCreateHeaderInfo() here, since this would create a new header in the store. If parsing
// the value then throws, we would have to remove the header from the store again. So just get a
// HeaderStoreItemInfo object and try to parse the value. If it works, we'll add the header.
HeaderStoreItemInfo info;
bool addToStore;
PrepareHeaderInfoForAdd(descriptor, out info, out addToStore);
bool result = TryParseAndAddRawHeaderValue(descriptor, info, value, false);
if (result && addToStore && (info.ParsedValue != null))
{
// If we get here, then the value could be parsed correctly. If we created a new HeaderStoreItemInfo, add
// it to the store if we added at least one value.
Debug.Assert(!ContainsKey(descriptor));
AddEntryToStore(new HeaderEntry(descriptor, info));
}
return result;
}
// See ParseAndAddValue
private static bool TryParseAndAddRawHeaderValue(HeaderDescriptor descriptor, HeaderStoreItemInfo info, string? value, bool addWhenInvalid)
{
Debug.Assert(info != null);
Debug.Assert(descriptor.Parser != null);
// Values are added as 'invalid' if we either can't parse the value OR if we already have a value
// and the current header doesn't support multiple values: e.g. trying to add a date/time value
// to the 'Date' header if we already have a date/time value will result in the second value being
// added to the 'invalid' header values.
if (!info.CanAddParsedValue(descriptor.Parser))
{
if (addWhenInvalid)
{
AddInvalidValue(info, value ?? string.Empty);
}
return false;
}
int index = 0;
if (descriptor.Parser.TryParseValue(value, info.ParsedValue, ref index, out object? parsedValue))
{
// The raw string only represented one value (which was successfully parsed). Add the value and return.
if ((value == null) || (index == value.Length))
{
if (parsedValue != null)
{
AddParsedValue(info, parsedValue);
}
return true;
}
Debug.Assert(index < value.Length, "Parser must return an index value within the string length.");
// If we successfully parsed a value, but there are more left to read, store the results in a temp
// list. Only when all values are parsed successfully write the list to the store.
List<object> parsedValues = new List<object>();
if (parsedValue != null)
{
parsedValues.Add(parsedValue);
}
while (index < value.Length)
{
if (descriptor.Parser.TryParseValue(value, info.ParsedValue, ref index, out parsedValue))
{
if (parsedValue != null)
{
parsedValues.Add(parsedValue);
}
}
else
{
if (!ContainsNewLine(value, descriptor) && addWhenInvalid)
{
AddInvalidValue(info, value);
}
return false;
}
}
// All values were parsed correctly. Copy results to the store.
foreach (object item in parsedValues)
{
AddParsedValue(info, item);
}
return true;
}
Debug.Assert(value != null);
if (!ContainsNewLine(value, descriptor) && addWhenInvalid)
{
AddInvalidValue(info, value ?? string.Empty);
}
return false;
}
private static void AddParsedValue(HeaderStoreItemInfo info, object value)
{
Debug.Assert(!(value is List<object>),
"Header value types must not derive from List<object> since this type is used internally to store " +
"lists of values. So we would not be able to distinguish between a single value and a list of values.");
AddValueToStoreValue<object>(value, ref info.ParsedValue);
}
private static void AddInvalidValue(HeaderStoreItemInfo info, string value)
{
AddValueToStoreValue<string>(value, ref info.InvalidValue);
}
private static void AddRawValue(HeaderStoreItemInfo info, string value)
{
AddValueToStoreValue<string>(value, ref info.RawValue);
}
private static void AddValueToStoreValue<T>(T value, ref object? currentStoreValue) where T : class
{
// If there is no value set yet, then add current item as value (we don't create a list
// if not required). If 'info.Value' is already assigned then make sure 'info.Value' is a
// List<T> and append 'item' to the list.
if (currentStoreValue == null)
{
currentStoreValue = value;
}
else
{
List<T>? storeValues = currentStoreValue as List<T>;
if (storeValues == null)
{
storeValues = new List<T>(2);
Debug.Assert(currentStoreValue is T);
storeValues.Add((T)currentStoreValue);
currentStoreValue = storeValues;
}
Debug.Assert(value is T);
storeValues.Add((T)value);
}
}
// Since most of the time we just have 1 value, we don't create a List<object> for one value, but we change
// the return type to 'object'. The caller has to deal with the return type (object vs. List<object>). This
// is to optimize the most common scenario where a header has only one value.
internal object? GetParsedValues(HeaderDescriptor descriptor)
{
if (!TryGetAndParseHeaderInfo(descriptor, out HeaderStoreItemInfo? info))
{
return null;
}
return info.ParsedValue;
}
internal virtual bool IsAllowedHeaderName(HeaderDescriptor descriptor) => true;
private void PrepareHeaderInfoForAdd(HeaderDescriptor descriptor, out HeaderStoreItemInfo info, out bool addToStore)
{
if (!IsAllowedHeaderName(descriptor))
{
throw new InvalidOperationException(SR.Format(SR.net_http_headers_not_allowed_header_name, descriptor.Name));
}
addToStore = false;
if (!TryGetAndParseHeaderInfo(descriptor, out info!))
{
info = new HeaderStoreItemInfo();
addToStore = true;
}
}
private void ParseAndAddValue(HeaderDescriptor descriptor, HeaderStoreItemInfo info, string? value)
{
Debug.Assert(info != null);
if (descriptor.Parser == null)
{
// If we don't have a parser for the header, we consider the value valid if it doesn't contains
// newline characters. We add the values as "parsed value". Note that we allow empty values.
CheckContainsNewLine(value);
AddParsedValue(info, value ?? string.Empty);
return;
}
// If the header only supports 1 value, we can add the current value only if there is no
// value already set.
if (!info.CanAddParsedValue(descriptor.Parser))
{
throw new FormatException(SR.Format(System.Globalization.CultureInfo.InvariantCulture, SR.net_http_headers_single_value_header, descriptor.Name));
}
int index = 0;
object parsedValue = descriptor.Parser.ParseValue(value, info.ParsedValue, ref index);
// The raw string only represented one value (which was successfully parsed). Add the value and return.
// If value is null we still have to first call ParseValue() to allow the parser to decide whether null is
// a valid value. If it is (i.e. no exception thrown), we set the parsed value (if any) and return.
if ((value == null) || (index == value.Length))
{
// If the returned value is null, then it means the header accepts empty values. i.e. we don't throw
// but we don't add 'null' to the store either.
if (parsedValue != null)
{
AddParsedValue(info, parsedValue);
}
return;
}
Debug.Assert(index < value.Length, "Parser must return an index value within the string length.");
// If we successfully parsed a value, but there are more left to read, store the results in a temp
// list. Only when all values are parsed successfully write the list to the store.
List<object> parsedValues = new List<object>();
if (parsedValue != null)
{
parsedValues.Add(parsedValue);
}
while (index < value.Length)
{
parsedValue = descriptor.Parser.ParseValue(value, info.ParsedValue, ref index);
if (parsedValue != null)
{
parsedValues.Add(parsedValue);
}
}
// All values were parsed correctly. Copy results to the store.
foreach (object item in parsedValues)
{
AddParsedValue(info, item);
}
}
private HeaderDescriptor GetHeaderDescriptor(string name)
{
if (string.IsNullOrEmpty(name))
{
throw new ArgumentException(SR.net_http_argument_empty_string, nameof(name));
}
if (!HeaderDescriptor.TryGet(name, out HeaderDescriptor descriptor))
{
throw new FormatException(SR.net_http_headers_invalid_header_name);
}
if ((descriptor.HeaderType & _allowedHeaderTypes) != 0)
{
return descriptor;
}
else if ((descriptor.HeaderType & _treatAsCustomHeaderTypes) != 0)
{
return descriptor.AsCustomHeader();
}
throw new InvalidOperationException(SR.Format(SR.net_http_headers_not_allowed_header_name, name));
}
internal bool TryGetHeaderDescriptor(string name, out HeaderDescriptor descriptor)
{
if (string.IsNullOrEmpty(name))
{
descriptor = default;
return false;
}
if (HeaderDescriptor.TryGet(name, out descriptor))
{
HttpHeaderType headerType = descriptor.HeaderType;
if ((headerType & _allowedHeaderTypes) != 0)
{
return true;
}
if ((headerType & _treatAsCustomHeaderTypes) != 0)
{
descriptor = descriptor.AsCustomHeader();
return true;
}
}
return false;
}
internal static void CheckContainsNewLine(string? value)
{
if (value == null)
{
return;
}
if (HttpRuleParser.ContainsNewLine(value))
{
throw new FormatException(SR.net_http_headers_no_newlines);
}
}
private static bool ContainsNewLine(string value, HeaderDescriptor descriptor)
{
if (HttpRuleParser.ContainsNewLine(value))
{
if (NetEventSource.Log.IsEnabled()) NetEventSource.Error(null, SR.Format(SR.net_http_log_headers_no_newlines, descriptor.Name, value));
return true;
}
return false;
}
internal static string[] GetStoreValuesAsStringArray(HeaderDescriptor descriptor, HeaderStoreItemInfo info)
{
GetStoreValuesAsStringOrStringArray(descriptor, info, out string? singleValue, out string[]? multiValue);
Debug.Assert(singleValue is not null ^ multiValue is not null);
return multiValue ?? new[] { singleValue! };
}
internal static void GetStoreValuesAsStringOrStringArray(HeaderDescriptor descriptor, object sourceValues, out string? singleValue, out string[]? multiValue)
{
HeaderStoreItemInfo? info = sourceValues as HeaderStoreItemInfo;
if (info is null)
{
Debug.Assert(sourceValues is string);
singleValue = (string)sourceValues;
multiValue = null;
return;
}
int length = GetValueCount(info);
Span<string?> values;
singleValue = null;
if (length == 1)
{
multiValue = null;
values = MemoryMarshal.CreateSpan(ref singleValue, 1);
}
else
{
Debug.Assert(length > 1, "The header should have been removed when it became empty");
values = multiValue = new string[length];
}
int currentIndex = 0;
ReadStoreValues<string?>(values, info.RawValue, null, ref currentIndex);
ReadStoreValues<object?>(values, info.ParsedValue, descriptor.Parser, ref currentIndex);
ReadStoreValues<string?>(values, info.InvalidValue, null, ref currentIndex);
Debug.Assert(currentIndex == length);
}
internal static int GetStoreValuesIntoStringArray(HeaderDescriptor descriptor, object sourceValues, [NotNull] ref string[]? values)
{
values ??= Array.Empty<string>();
HeaderStoreItemInfo? info = sourceValues as HeaderStoreItemInfo;
if (info is null)
{
Debug.Assert(sourceValues is string);
if (values.Length == 0)
{
values = new string[1];
}
values[0] = (string)sourceValues;
return 1;
}
int length = GetValueCount(info);
if (length > 0)
{
if (values.Length < length)
{
values = new string[length];
}
int currentIndex = 0;
ReadStoreValues<string?>(values, info.RawValue, null, ref currentIndex);
ReadStoreValues<object?>(values, info.ParsedValue, descriptor.Parser, ref currentIndex);
ReadStoreValues<string?>(values, info.InvalidValue, null, ref currentIndex);
Debug.Assert(currentIndex == length);
}
return length;
}
private static int GetValueCount(HeaderStoreItemInfo info)
{
Debug.Assert(info != null);
int valueCount = Count<string>(info.RawValue);
valueCount += Count<string>(info.InvalidValue);
valueCount += Count<object>(info.ParsedValue);
return valueCount;
static int Count<T>(object? valueStore) =>
valueStore is null ? 0 :
valueStore is List<T> list ? list.Count :
1;
}
private static void ReadStoreValues<T>(Span<string?> values, object? storeValue, HttpHeaderParser? parser, ref int currentIndex)
{
if (storeValue != null)
{
List<T>? storeValues = storeValue as List<T>;
if (storeValues == null)
{
values[currentIndex] = parser == null ? storeValue.ToString() : parser.ToString(storeValue);
currentIndex++;
}
else
{
foreach (object? item in storeValues)
{
Debug.Assert(item != null);
values[currentIndex] = parser == null ? item.ToString() : parser.ToString(item);
currentIndex++;
}
}
}
}
private bool AreEqual(object value, object? storeValue, IEqualityComparer? comparer)
{
Debug.Assert(value != null);
if (comparer != null)
{
return comparer.Equals(value, storeValue);
}
// We don't have a comparer, so use the Equals() method.
return value.Equals(storeValue);
}
internal sealed class HeaderStoreItemInfo
{
internal HeaderStoreItemInfo() { }
internal object? RawValue;
internal object? InvalidValue;
internal object? ParsedValue;
internal bool CanAddParsedValue(HttpHeaderParser parser)
{
Debug.Assert(parser != null, "There should be no reason to call CanAddValue if there is no parser for the current header.");
// If the header only supports one value, and we have already a value set, then we can't add
// another value. E.g. the 'Date' header only supports one value. We can't add multiple timestamps
// to 'Date'.
// So if this is a known header, ask the parser if it supports multiple values and check whether
// we already have a (valid or invalid) value.
// Note that we ignore the rawValue by purpose: E.g. we are parsing 2 raw values for a header only
// supporting 1 value. When the first value gets parsed, CanAddValue returns true and we add the
// parsed value to ParsedValue. When the second value is parsed, CanAddValue returns false, because
// we have already a parsed value.
return parser.SupportsMultipleValues || ((InvalidValue == null) && (ParsedValue == null));
}
internal bool IsEmpty => (RawValue == null) && (InvalidValue == null) && (ParsedValue == null);
}
#region Low-level implementation details that work with _headerStore directly
// Used to store the CollectionsMarshal.GetValueRefOrAddDefault out parameter.
// This is a workaround for the Roslyn bug where we can't use a discard instead:
// https://github.com/dotnet/roslyn/issues/56587#issuecomment-934955526
private static bool s_dictionaryGetValueRefOrAddDefaultExistsDummy;
private const int InitialCapacity = 4;
internal const int ArrayThreshold = 64; // Above this threshold, header ordering will not be preserved
internal HeaderEntry[]? GetEntriesArray()
{
object? store = _headerStore;
if (store is null)
{
return null;
}
else if (store is HeaderEntry[] entries)
{
return entries;
}
else
{
return GetEntriesFromDictionary();
}
HeaderEntry[] GetEntriesFromDictionary()
{
var dictionary = (Dictionary<HeaderDescriptor, object>)_headerStore!;
var entries = new HeaderEntry[dictionary.Count];
int i = 0;
foreach (KeyValuePair<HeaderDescriptor, object> entry in dictionary)
{
entries[i++] = new HeaderEntry
{
Key = entry.Key,
Value = entry.Value
};
}
return entries;
}
}
internal ReadOnlySpan<HeaderEntry> GetEntries()
{
return new ReadOnlySpan<HeaderEntry>(GetEntriesArray(), 0, _count);
}
internal int Count => _count;
private bool EntriesAreLiveView => _headerStore is HeaderEntry[];
private ref object GetValueRefOrNullRef(HeaderDescriptor key)
{
ref object valueRef = ref Unsafe.NullRef<object>();
object? store = _headerStore;
if (store is HeaderEntry[] entries)
{
for (int i = 0; i < _count && i < entries.Length; i++)
{
if (key.Equals(entries[i].Key))
{
valueRef = ref entries[i].Value;
break;
}
}
}
else if (store is not null)
{
valueRef = ref CollectionsMarshal.GetValueRefOrNullRef(Unsafe.As<Dictionary<HeaderDescriptor, object>>(store), key);
}
return ref valueRef;
}
private ref object? GetValueRefOrAddDefault(HeaderDescriptor key)
{
object? store = _headerStore;
if (store is HeaderEntry[] entries)
{
for (int i = 0; i < _count && i < entries.Length; i++)
{
if (key.Equals(entries[i].Key))
{
return ref entries[i].Value!;
}
}
int count = _count;
_count++;
if ((uint)count < (uint)entries.Length)
{
entries[count].Key = key;
return ref entries[count].Value!;
}
return ref GrowEntriesAndAddDefault(key);
}
else if (store is null)
{
_count++;
entries = new HeaderEntry[InitialCapacity];
_headerStore = entries;
ref HeaderEntry firstEntry = ref MemoryMarshal.GetArrayDataReference(entries);
firstEntry.Key = key;
return ref firstEntry.Value!;
}
else
{
return ref DictionaryGetValueRefOrAddDefault(key);
}
ref object? GrowEntriesAndAddDefault(HeaderDescriptor key)
{
var entries = (HeaderEntry[])_headerStore!;
if (entries.Length == ArrayThreshold)
{
return ref ConvertToDictionaryAndAddDefault(key);
}
else
{
Array.Resize(ref entries, entries.Length << 1);
_headerStore = entries;
ref HeaderEntry firstNewEntry = ref entries[entries.Length >> 1];
firstNewEntry.Key = key;
return ref firstNewEntry.Value!;
}
}
ref object? ConvertToDictionaryAndAddDefault(HeaderDescriptor key)
{
var entries = (HeaderEntry[])_headerStore!;
var dictionary = new Dictionary<HeaderDescriptor, object>(ArrayThreshold);
_headerStore = dictionary;
foreach (HeaderEntry entry in entries)
{
dictionary.Add(entry.Key, entry.Value);
}
Debug.Assert(dictionary.Count == _count - 1);
return ref CollectionsMarshal.GetValueRefOrAddDefault(dictionary, key, out s_dictionaryGetValueRefOrAddDefaultExistsDummy);
}
ref object? DictionaryGetValueRefOrAddDefault(HeaderDescriptor key)
{
var dictionary = (Dictionary<HeaderDescriptor, object>)_headerStore!;
ref object? value = ref CollectionsMarshal.GetValueRefOrAddDefault(dictionary, key, out s_dictionaryGetValueRefOrAddDefaultExistsDummy);
if (value is null)
{
_count++;
}
return ref value;
}
}
private void AddEntryToStore(HeaderEntry entry)
{
Debug.Assert(!ContainsKey(entry.Key));
if (_headerStore is HeaderEntry[] entries)
{
int count = _count;
if ((uint)count < (uint)entries.Length)
{
entries[count] = entry;
_count++;
return;
}
}
GetValueRefOrAddDefault(entry.Key) = entry.Value;
}
internal bool ContainsKey(HeaderDescriptor key)
{
return !Unsafe.IsNullRef(ref GetValueRefOrNullRef(key));
}
public void Clear()
{
if (_headerStore is HeaderEntry[] entries)
{
Array.Clear(entries, 0, _count);
}
else
{
_headerStore = null;
}
_count = 0;
}
internal bool Remove(HeaderDescriptor key)
{
bool removed = false;
object? store = _headerStore;
if (store is HeaderEntry[] entries)
{
for (int i = 0; i < _count && i < entries.Length; i++)
{
if (key.Equals(entries[i].Key))
{
while (i + 1 < _count && (uint)(i + 1) < (uint)entries.Length)
{
entries[i] = entries[i + 1];
i++;
}
entries[i] = default;
removed = true;
break;
}
}
}
else if (store is not null)
{
removed = Unsafe.As<Dictionary<HeaderDescriptor, object>>(store).Remove(key);
}
if (removed)
{
_count--;
}
return removed;
}
#endregion // _headerStore implementation
}
}
| // Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
using System.Collections;
using System.Collections.Generic;
using System.Diagnostics;
using System.Diagnostics.CodeAnalysis;
using System.Runtime.CompilerServices;
using System.Runtime.InteropServices;
using System.Text;
namespace System.Net.Http.Headers
{
/// <summary>
/// Key/value pairs of headers. The value is either a raw <see cref="string"/> or a <see cref="HttpHeaders.HeaderStoreItemInfo"/>.
/// We're using a custom type instead of <see cref="KeyValuePair{TKey, TValue}"/> because we need ref access to fields.
/// </summary>
internal struct HeaderEntry
{
public HeaderDescriptor Key;
public object Value;
public HeaderEntry(HeaderDescriptor key, object value)
{
Key = key;
Value = value;
}
}
public abstract class HttpHeaders : IEnumerable<KeyValuePair<string, IEnumerable<string>>>
{
// This type is used to store a collection of headers in 'headerStore':
// - A header can have multiple values.
// - A header can have an associated parser which is able to parse the raw string value into a strongly typed object.
// - If a header has an associated parser and the provided raw value can't be parsed, the value is considered
// invalid. Invalid values are stored if added using TryAddWithoutValidation(). If the value was added using Add(),
// Add() will throw FormatException.
// - Since parsing header values is expensive and users usually only care about a few headers, header values are
// lazily initialized.
//
// Given the properties above, a header value can have three states:
// - 'raw': The header value was added using TryAddWithoutValidation() and it wasn't parsed yet.
// - 'parsed': The header value was successfully parsed. It was either added using Add() where the value was parsed
// immediately, or if added using TryAddWithoutValidation() a user already accessed a property/method triggering the
// value to be parsed.
// - 'invalid': The header value was parsed, but parsing failed because the value is invalid. Storing invalid values
// allows users to still retrieve the value (by calling GetValues()), but it will not be exposed as strongly typed
// object. E.g. the client receives a response with the following header: 'Via: 1.1 proxy, invalid'
// - HttpHeaders.GetValues() will return "1.1 proxy", "invalid"
// - HttpResponseHeaders.Via collection will only contain one ViaHeaderValue object with value "1.1 proxy"
/// <summary>Either a <see cref="HeaderEntry"/> array or a Dictionary<<see cref="HeaderDescriptor"/>, <see cref="object"/>> </summary>
private object? _headerStore;
private int _count;
private readonly HttpHeaderType _allowedHeaderTypes;
private readonly HttpHeaderType _treatAsCustomHeaderTypes;
protected HttpHeaders()
: this(HttpHeaderType.All, HttpHeaderType.None)
{
}
internal HttpHeaders(HttpHeaderType allowedHeaderTypes, HttpHeaderType treatAsCustomHeaderTypes)
{
// Should be no overlap
Debug.Assert((allowedHeaderTypes & treatAsCustomHeaderTypes) == 0);
_allowedHeaderTypes = allowedHeaderTypes & ~HttpHeaderType.NonTrailing;
_treatAsCustomHeaderTypes = treatAsCustomHeaderTypes & ~HttpHeaderType.NonTrailing;
}
/// <summary>Gets a view of the contents of this headers collection that does not parse nor validate the data upon access.</summary>
public HttpHeadersNonValidated NonValidated => new HttpHeadersNonValidated(this);
public void Add(string name, string? value) => Add(GetHeaderDescriptor(name), value);
internal void Add(HeaderDescriptor descriptor, string? value)
{
// We don't use GetOrCreateHeaderInfo() here, since this would create a new header in the store. If parsing
// the value then throws, we would have to remove the header from the store again. So just get a
// HeaderStoreItemInfo object and try to parse the value. If it works, we'll add the header.
PrepareHeaderInfoForAdd(descriptor, out HeaderStoreItemInfo info, out bool addToStore);
ParseAndAddValue(descriptor, info, value);
// If we get here, then the value could be parsed correctly. If we created a new HeaderStoreItemInfo, add
// it to the store if we added at least one value.
if (addToStore && (info.ParsedValue != null))
{
Debug.Assert(!ContainsKey(descriptor));
AddEntryToStore(new HeaderEntry(descriptor, info));
}
}
public void Add(string name, IEnumerable<string?> values) => Add(GetHeaderDescriptor(name), values);
internal void Add(HeaderDescriptor descriptor, IEnumerable<string?> values!!)
{
PrepareHeaderInfoForAdd(descriptor, out HeaderStoreItemInfo info, out bool addToStore);
try
{
// Note that if the first couple of values are valid followed by an invalid value, the valid values
// will be added to the store before the exception for the invalid value is thrown.
foreach (string? value in values)
{
ParseAndAddValue(descriptor, info, value);
}
}
finally
{
// Even if one of the values was invalid, make sure we add the header for the valid ones. We need to be
// consistent here: If values get added to an _existing_ header, then all values until the invalid one
// get added. Same here: If multiple values get added to a _new_ header, make sure the header gets added
// with the valid values.
// However, if all values for a _new_ header were invalid, then don't add the header.
if (addToStore && (info.ParsedValue != null))
{
Debug.Assert(!ContainsKey(descriptor));
AddEntryToStore(new HeaderEntry(descriptor, info));
}
}
}
public bool TryAddWithoutValidation(string name, string? value) =>
TryGetHeaderDescriptor(name, out HeaderDescriptor descriptor) &&
TryAddWithoutValidation(descriptor, value);
internal bool TryAddWithoutValidation(HeaderDescriptor descriptor, string? value)
{
// Normalize null values to be empty values, which are allowed. If the user adds multiple
// null/empty values, all of them are added to the collection. This will result in delimiter-only
// values, e.g. adding two null-strings (or empty, or whitespace-only) results in "My-Header: ,".
value ??= string.Empty;
ref object? storeValueRef = ref GetValueRefOrAddDefault(descriptor);
object? currentValue = storeValueRef;
if (currentValue is null)
{
storeValueRef = value;
}
else
{
if (currentValue is not HeaderStoreItemInfo info)
{
// The header store contained a single raw string value, so promote it
// to being a HeaderStoreItemInfo and add to it.
Debug.Assert(currentValue is string);
storeValueRef = info = new HeaderStoreItemInfo() { RawValue = currentValue };
}
AddRawValue(info, value);
}
return true;
}
public bool TryAddWithoutValidation(string name, IEnumerable<string?> values) =>
TryGetHeaderDescriptor(name, out HeaderDescriptor descriptor) &&
TryAddWithoutValidation(descriptor, values);
internal bool TryAddWithoutValidation(HeaderDescriptor descriptor, IEnumerable<string?> values!!)
{
using IEnumerator<string?> enumerator = values.GetEnumerator();
if (enumerator.MoveNext())
{
TryAddWithoutValidation(descriptor, enumerator.Current);
if (enumerator.MoveNext())
{
ref object? storeValueRef = ref GetValueRefOrAddDefault(descriptor);
Debug.Assert(storeValueRef is not null);
object value = storeValueRef;
if (value is not HeaderStoreItemInfo info)
{
Debug.Assert(value is string);
storeValueRef = info = new HeaderStoreItemInfo { RawValue = value };
}
do
{
AddRawValue(info, enumerator.Current ?? string.Empty);
}
while (enumerator.MoveNext());
}
}
return true;
}
public IEnumerable<string> GetValues(string name) => GetValues(GetHeaderDescriptor(name));
internal IEnumerable<string> GetValues(HeaderDescriptor descriptor)
{
if (TryGetValues(descriptor, out IEnumerable<string>? values))
{
return values;
}
throw new InvalidOperationException(SR.net_http_headers_not_found);
}
public bool TryGetValues(string name, [NotNullWhen(true)] out IEnumerable<string>? values)
{
if (TryGetHeaderDescriptor(name, out HeaderDescriptor descriptor))
{
return TryGetValues(descriptor, out values);
}
values = null;
return false;
}
internal bool TryGetValues(HeaderDescriptor descriptor, [NotNullWhen(true)] out IEnumerable<string>? values)
{
if (TryGetAndParseHeaderInfo(descriptor, out HeaderStoreItemInfo? info))
{
values = GetStoreValuesAsStringArray(descriptor, info);
return true;
}
values = null;
return false;
}
public bool Contains(string name) => Contains(GetHeaderDescriptor(name));
internal bool Contains(HeaderDescriptor descriptor)
{
// We can't just call headerStore.ContainsKey() since after parsing the value the header may not exist
// anymore (if the value contains newline chars, we remove the header). So try to parse the
// header value.
return TryGetAndParseHeaderInfo(descriptor, out _);
}
public override string ToString()
{
// Return all headers as string similar to:
// HeaderName1: Value1, Value2
// HeaderName2: Value1
// ...
var vsb = new ValueStringBuilder(stackalloc char[512]);
foreach (HeaderEntry entry in GetEntries())
{
vsb.Append(entry.Key.Name);
vsb.Append(": ");
GetStoreValuesAsStringOrStringArray(entry.Key, entry.Value, out string? singleValue, out string[]? multiValue);
Debug.Assert(singleValue is not null ^ multiValue is not null);
if (singleValue is not null)
{
vsb.Append(singleValue);
}
else
{
// Note that if we get multiple values for a header that doesn't support multiple values, we'll
// just separate the values using a comma (default separator).
string? separator = entry.Key.Parser is HttpHeaderParser parser && parser.SupportsMultipleValues ? parser.Separator : HttpHeaderParser.DefaultSeparator;
Debug.Assert(multiValue is not null && multiValue.Length > 0);
vsb.Append(multiValue[0]);
for (int i = 1; i < multiValue.Length; i++)
{
vsb.Append(separator);
vsb.Append(multiValue[i]);
}
}
vsb.Append(Environment.NewLine);
}
return vsb.ToString();
}
internal string GetHeaderString(HeaderDescriptor descriptor)
{
if (TryGetHeaderValue(descriptor, out object? info))
{
GetStoreValuesAsStringOrStringArray(descriptor, info, out string? singleValue, out string[]? multiValue);
Debug.Assert(singleValue is not null ^ multiValue is not null);
if (singleValue is not null)
{
return singleValue;
}
// Note that if we get multiple values for a header that doesn't support multiple values, we'll
// just separate the values using a comma (default separator).
string? separator = descriptor.Parser != null && descriptor.Parser.SupportsMultipleValues ? descriptor.Parser.Separator : HttpHeaderParser.DefaultSeparator;
return string.Join(separator, multiValue!);
}
return string.Empty;
}
#region IEnumerable<KeyValuePair<string, IEnumerable<string>>> Members
public IEnumerator<KeyValuePair<string, IEnumerable<string>>> GetEnumerator() => _count == 0 ?
((IEnumerable<KeyValuePair<string, IEnumerable<string>>>)Array.Empty<KeyValuePair<string, IEnumerable<string>>>()).GetEnumerator() :
GetEnumeratorCore();
private IEnumerator<KeyValuePair<string, IEnumerable<string>>> GetEnumeratorCore()
{
HeaderEntry[]? entries = GetEntriesArray();
Debug.Assert(_count != 0 && entries is not null, "Caller should have validated the collection is not empty");
int count = _count;
for (int i = 0; i < count; i++)
{
HeaderEntry entry = entries[i];
if (entry.Value is not HeaderStoreItemInfo info)
{
// To retain consistent semantics, we need to upgrade a raw string to a HeaderStoreItemInfo
// during enumeration so that we can parse the raw value in order to a) return
// the correct set of parsed values, and b) update the instance for subsequent enumerations
// to reflect that parsing.
info = new HeaderStoreItemInfo() { RawValue = entry.Value };
if (EntriesAreLiveView)
{
entries[i].Value = info;
}
else
{
Debug.Assert(ContainsKey(entry.Key));
((Dictionary<HeaderDescriptor, object>)_headerStore!)[entry.Key] = info;
}
}
// Make sure we parse all raw values before returning the result. Note that this has to be
// done before we calculate the array length (next line): A raw value may contain a list of
// values.
if (!ParseRawHeaderValues(entry.Key, info))
{
// We saw an invalid header value (contains newline chars) and deleted it.
// If the HeaderEntry[] we are enumerating is the live header store, the entries have shifted.
if (EntriesAreLiveView)
{
i--;
count--;
}
}
else
{
string[] values = GetStoreValuesAsStringArray(entry.Key, info);
yield return new KeyValuePair<string, IEnumerable<string>>(entry.Key.Name, values);
}
}
}
#endregion
#region IEnumerable Members
Collections.IEnumerator Collections.IEnumerable.GetEnumerator() => GetEnumerator();
#endregion
internal void AddParsedValue(HeaderDescriptor descriptor, object value)
{
Debug.Assert(value != null);
Debug.Assert(descriptor.Parser != null, "Can't add parsed value if there is no parser available.");
HeaderStoreItemInfo info = GetOrCreateHeaderInfo(descriptor);
// If the current header has only one value, we can't add another value. The strongly typed property
// must not call AddParsedValue(), but SetParsedValue(). E.g. for headers like 'Date', 'Host'.
Debug.Assert(descriptor.Parser.SupportsMultipleValues, $"Header '{descriptor.Name}' doesn't support multiple values");
AddParsedValue(info, value);
}
internal void SetParsedValue(HeaderDescriptor descriptor, object value)
{
Debug.Assert(value != null);
Debug.Assert(descriptor.Parser != null, "Can't add parsed value if there is no parser available.");
// This method will first clear all values. This is used e.g. when setting the 'Date' or 'Host' header.
// i.e. headers not supporting collections.
HeaderStoreItemInfo info = GetOrCreateHeaderInfo(descriptor);
info.InvalidValue = null;
info.ParsedValue = null;
info.RawValue = null;
AddParsedValue(info, value);
}
internal void SetOrRemoveParsedValue(HeaderDescriptor descriptor, object? value)
{
if (value == null)
{
Remove(descriptor);
}
else
{
SetParsedValue(descriptor, value);
}
}
public bool Remove(string name) => Remove(GetHeaderDescriptor(name));
internal bool RemoveParsedValue(HeaderDescriptor descriptor, object value)
{
Debug.Assert(value != null);
// If we have a value for this header, then verify if we have a single value. If so, compare that
// value with 'item'. If we have a list of values, then remove 'item' from the list.
if (TryGetAndParseHeaderInfo(descriptor, out HeaderStoreItemInfo? info))
{
Debug.Assert(descriptor.Parser != null, "Can't add parsed value if there is no parser available.");
Debug.Assert(descriptor.Parser.SupportsMultipleValues,
"This method should not be used for single-value headers. Use Remove(string) instead.");
// If there is no entry, just return.
if (info.ParsedValue == null)
{
return false;
}
bool result = false;
IEqualityComparer? comparer = descriptor.Parser.Comparer;
List<object>? parsedValues = info.ParsedValue as List<object>;
if (parsedValues == null)
{
Debug.Assert(info.ParsedValue.GetType() == value.GetType(),
"Stored value does not have the same type as 'value'.");
if (AreEqual(value, info.ParsedValue, comparer))
{
info.ParsedValue = null;
result = true;
}
}
else
{
foreach (object item in parsedValues)
{
Debug.Assert(item.GetType() == value.GetType(),
"One of the stored values does not have the same type as 'value'.");
if (AreEqual(value, item, comparer))
{
// Remove 'item' rather than 'value', since the 'comparer' may consider two values
// equal even though the default obj.Equals() may not (e.g. if 'comparer' does
// case-insensitive comparison for strings, but string.Equals() is case-sensitive).
result = parsedValues.Remove(item);
break;
}
}
// If we removed the last item in a list, remove the list.
if (parsedValues.Count == 0)
{
info.ParsedValue = null;
}
}
// If there is no value for the header left, remove the header.
if (info.IsEmpty)
{
bool headerRemoved = Remove(descriptor);
Debug.Assert(headerRemoved, $"Existing header '{descriptor.Name}' couldn't be removed.");
}
return result;
}
return false;
}
internal bool ContainsParsedValue(HeaderDescriptor descriptor, object value)
{
Debug.Assert(value != null);
// If we have a value for this header, then verify if we have a single value. If so, compare that
// value with 'item'. If we have a list of values, then compare each item in the list with 'item'.
if (TryGetAndParseHeaderInfo(descriptor, out HeaderStoreItemInfo? info))
{
Debug.Assert(descriptor.Parser != null, "Can't add parsed value if there is no parser available.");
Debug.Assert(descriptor.Parser.SupportsMultipleValues,
"This method should not be used for single-value headers. Use equality comparer instead.");
// If there is no entry, just return.
if (info.ParsedValue == null)
{
return false;
}
List<object>? parsedValues = info.ParsedValue as List<object>;
IEqualityComparer? comparer = descriptor.Parser.Comparer;
if (parsedValues == null)
{
Debug.Assert(info.ParsedValue.GetType() == value.GetType(),
"Stored value does not have the same type as 'value'.");
return AreEqual(value, info.ParsedValue, comparer);
}
else
{
foreach (object item in parsedValues)
{
Debug.Assert(item.GetType() == value.GetType(),
"One of the stored values does not have the same type as 'value'.");
if (AreEqual(value, item, comparer))
{
return true;
}
}
return false;
}
}
return false;
}
internal virtual void AddHeaders(HttpHeaders sourceHeaders)
{
Debug.Assert(sourceHeaders != null);
Debug.Assert(GetType() == sourceHeaders.GetType(), "Can only copy headers from an instance of the same type.");
// Only add header values if they're not already set on the message. Note that we don't merge
// collections: If both the default headers and the message have set some values for a certain
// header, then we don't try to merge the values.
if (_count == 0 && sourceHeaders._headerStore is HeaderEntry[] sourceEntries)
{
// If the target collection is empty, we don't have to search for existing values
_count = sourceHeaders._count;
if (_headerStore is not HeaderEntry[] entries || entries.Length < _count)
{
entries = new HeaderEntry[sourceEntries.Length];
_headerStore = entries;
}
for (int i = 0; i < _count && i < sourceEntries.Length; i++)
{
HeaderEntry entry = sourceEntries[i];
if (entry.Value is HeaderStoreItemInfo info)
{
entry.Value = CloneHeaderInfo(entry.Key, info);
}
entries[i] = entry;
}
}
else
{
foreach (HeaderEntry entry in sourceHeaders.GetEntries())
{
ref object? storeValueRef = ref GetValueRefOrAddDefault(entry.Key);
if (storeValueRef is null)
{
object sourceValue = entry.Value;
if (sourceValue is HeaderStoreItemInfo info)
{
storeValueRef = CloneHeaderInfo(entry.Key, info);
}
else
{
Debug.Assert(sourceValue is string);
storeValueRef = sourceValue;
}
}
}
}
}
private HeaderStoreItemInfo CloneHeaderInfo(HeaderDescriptor descriptor, HeaderStoreItemInfo sourceInfo)
{
var destinationInfo = new HeaderStoreItemInfo
{
// Always copy raw values
RawValue = CloneStringHeaderInfoValues(sourceInfo.RawValue)
};
if (descriptor.Parser == null)
{
// We have custom header values. The parsed values are strings.
// Custom header values are always stored as string or list of strings.
Debug.Assert(sourceInfo.InvalidValue == null, "No invalid values expected for custom headers.");
destinationInfo.ParsedValue = CloneStringHeaderInfoValues(sourceInfo.ParsedValue);
}
else
{
// We have a parser, so we also have to copy invalid values and clone parsed values.
// Invalid values are always strings. Strings are immutable. So we only have to clone the
// collection (if there is one).
destinationInfo.InvalidValue = CloneStringHeaderInfoValues(sourceInfo.InvalidValue);
// Now clone and add parsed values (if any).
if (sourceInfo.ParsedValue != null)
{
List<object>? sourceValues = sourceInfo.ParsedValue as List<object>;
if (sourceValues == null)
{
CloneAndAddValue(destinationInfo, sourceInfo.ParsedValue);
}
else
{
foreach (object item in sourceValues)
{
CloneAndAddValue(destinationInfo, item);
}
}
}
}
return destinationInfo;
}
private static void CloneAndAddValue(HeaderStoreItemInfo destinationInfo, object source)
{
// We only have one value. Clone it and assign it to the store.
if (source is ICloneable cloneableValue)
{
AddParsedValue(destinationInfo, cloneableValue.Clone());
}
else
{
// If it doesn't implement ICloneable, it's a value type or an immutable type like String/Uri.
AddParsedValue(destinationInfo, source);
}
}
[return: NotNullIfNotNull("source")]
private static object? CloneStringHeaderInfoValues(object? source)
{
if (source == null)
{
return null;
}
List<object>? sourceValues = source as List<object>;
if (sourceValues == null)
{
// If we just have one value, return the reference to the string (strings are immutable so it's OK
// to use the reference).
return source;
}
else
{
// If we have a list of strings, create a new list and copy all strings to the new list.
return new List<object>(sourceValues);
}
}
private HeaderStoreItemInfo GetOrCreateHeaderInfo(HeaderDescriptor descriptor)
{
if (TryGetAndParseHeaderInfo(descriptor, out HeaderStoreItemInfo? info))
{
return info;
}
else
{
return CreateAndAddHeaderToStore(descriptor);
}
}
private HeaderStoreItemInfo CreateAndAddHeaderToStore(HeaderDescriptor descriptor)
{
Debug.Assert(!ContainsKey(descriptor));
// If we don't have the header in the store yet, add it now.
HeaderStoreItemInfo result = new HeaderStoreItemInfo();
// If the descriptor header type is in _treatAsCustomHeaderTypes, it must be converted to a custom header before calling this method
Debug.Assert((descriptor.HeaderType & _treatAsCustomHeaderTypes) == 0);
AddEntryToStore(new HeaderEntry(descriptor, result));
return result;
}
internal bool TryGetHeaderValue(HeaderDescriptor descriptor, [NotNullWhen(true)] out object? value)
{
ref object storeValueRef = ref GetValueRefOrNullRef(descriptor);
if (Unsafe.IsNullRef(ref storeValueRef))
{
value = null;
return false;
}
else
{
value = storeValueRef;
return true;
}
}
private bool TryGetAndParseHeaderInfo(HeaderDescriptor key, [NotNullWhen(true)] out HeaderStoreItemInfo? info)
{
ref object storeValueRef = ref GetValueRefOrNullRef(key);
if (!Unsafe.IsNullRef(ref storeValueRef))
{
object value = storeValueRef;
if (value is HeaderStoreItemInfo hsi)
{
info = hsi;
}
else
{
Debug.Assert(value is string);
storeValueRef = info = new HeaderStoreItemInfo() { RawValue = value };
}
return ParseRawHeaderValues(key, info);
}
info = null;
return false;
}
private bool ParseRawHeaderValues(HeaderDescriptor descriptor, HeaderStoreItemInfo info)
{
// Unlike TryGetHeaderInfo() this method tries to parse all non-validated header values (if any)
// before returning to the caller.
Debug.Assert(!info.IsEmpty);
if (info.RawValue != null)
{
List<string>? rawValues = info.RawValue as List<string>;
if (rawValues == null)
{
ParseSingleRawHeaderValue(descriptor, info);
}
else
{
ParseMultipleRawHeaderValues(descriptor, info, rawValues);
}
// At this point all values are either in info.ParsedValue, info.InvalidValue, or were removed since they
// contain newline chars. Reset RawValue.
info.RawValue = null;
// During parsing, we removed the value since it contains newline chars. Return false to indicate that
// this is an empty header.
if ((info.InvalidValue == null) && (info.ParsedValue == null))
{
// After parsing the raw value, no value is left because all values contain newline chars.
Debug.Assert(_count > 0);
Remove(descriptor);
return false;
}
}
return true;
}
private static void ParseMultipleRawHeaderValues(HeaderDescriptor descriptor, HeaderStoreItemInfo info, List<string> rawValues)
{
if (descriptor.Parser == null)
{
foreach (string rawValue in rawValues)
{
if (!ContainsNewLine(rawValue, descriptor))
{
AddParsedValue(info, rawValue);
}
}
}
else
{
foreach (string rawValue in rawValues)
{
if (!TryParseAndAddRawHeaderValue(descriptor, info, rawValue, true))
{
if (NetEventSource.Log.IsEnabled()) NetEventSource.Log.HeadersInvalidValue(descriptor.Name, rawValue);
}
}
}
}
private static void ParseSingleRawHeaderValue(HeaderDescriptor descriptor, HeaderStoreItemInfo info)
{
string? rawValue = info.RawValue as string;
Debug.Assert(rawValue != null, "RawValue must either be List<string> or string.");
if (descriptor.Parser == null)
{
if (!ContainsNewLine(rawValue, descriptor))
{
AddParsedValue(info, rawValue);
}
}
else
{
if (!TryParseAndAddRawHeaderValue(descriptor, info, rawValue, true))
{
if (NetEventSource.Log.IsEnabled()) NetEventSource.Log.HeadersInvalidValue(descriptor.Name, rawValue);
}
}
}
// See Add(name, string)
internal bool TryParseAndAddValue(HeaderDescriptor descriptor, string? value)
{
// We don't use GetOrCreateHeaderInfo() here, since this would create a new header in the store. If parsing
// the value then throws, we would have to remove the header from the store again. So just get a
// HeaderStoreItemInfo object and try to parse the value. If it works, we'll add the header.
HeaderStoreItemInfo info;
bool addToStore;
PrepareHeaderInfoForAdd(descriptor, out info, out addToStore);
bool result = TryParseAndAddRawHeaderValue(descriptor, info, value, false);
if (result && addToStore && (info.ParsedValue != null))
{
// If we get here, then the value could be parsed correctly. If we created a new HeaderStoreItemInfo, add
// it to the store if we added at least one value.
Debug.Assert(!ContainsKey(descriptor));
AddEntryToStore(new HeaderEntry(descriptor, info));
}
return result;
}
// See ParseAndAddValue
private static bool TryParseAndAddRawHeaderValue(HeaderDescriptor descriptor, HeaderStoreItemInfo info, string? value, bool addWhenInvalid)
{
Debug.Assert(info != null);
Debug.Assert(descriptor.Parser != null);
// Values are added as 'invalid' if we either can't parse the value OR if we already have a value
// and the current header doesn't support multiple values: e.g. trying to add a date/time value
// to the 'Date' header if we already have a date/time value will result in the second value being
// added to the 'invalid' header values.
if (!info.CanAddParsedValue(descriptor.Parser))
{
if (addWhenInvalid)
{
AddInvalidValue(info, value ?? string.Empty);
}
return false;
}
int index = 0;
if (descriptor.Parser.TryParseValue(value, info.ParsedValue, ref index, out object? parsedValue))
{
// The raw string only represented one value (which was successfully parsed). Add the value and return.
if ((value == null) || (index == value.Length))
{
if (parsedValue != null)
{
AddParsedValue(info, parsedValue);
}
return true;
}
Debug.Assert(index < value.Length, "Parser must return an index value within the string length.");
// If we successfully parsed a value, but there are more left to read, store the results in a temp
// list. Only when all values are parsed successfully write the list to the store.
List<object> parsedValues = new List<object>();
if (parsedValue != null)
{
parsedValues.Add(parsedValue);
}
while (index < value.Length)
{
if (descriptor.Parser.TryParseValue(value, info.ParsedValue, ref index, out parsedValue))
{
if (parsedValue != null)
{
parsedValues.Add(parsedValue);
}
}
else
{
if (!ContainsNewLine(value, descriptor) && addWhenInvalid)
{
AddInvalidValue(info, value);
}
return false;
}
}
// All values were parsed correctly. Copy results to the store.
foreach (object item in parsedValues)
{
AddParsedValue(info, item);
}
return true;
}
Debug.Assert(value != null);
if (!ContainsNewLine(value, descriptor) && addWhenInvalid)
{
AddInvalidValue(info, value ?? string.Empty);
}
return false;
}
private static void AddParsedValue(HeaderStoreItemInfo info, object value)
{
Debug.Assert(!(value is List<object>),
"Header value types must not derive from List<object> since this type is used internally to store " +
"lists of values. So we would not be able to distinguish between a single value and a list of values.");
AddValueToStoreValue<object>(value, ref info.ParsedValue);
}
private static void AddInvalidValue(HeaderStoreItemInfo info, string value)
{
AddValueToStoreValue<string>(value, ref info.InvalidValue);
}
private static void AddRawValue(HeaderStoreItemInfo info, string value)
{
AddValueToStoreValue<string>(value, ref info.RawValue);
}
private static void AddValueToStoreValue<T>(T value, ref object? currentStoreValue) where T : class
{
// If there is no value set yet, then add current item as value (we don't create a list
// if not required). If 'info.Value' is already assigned then make sure 'info.Value' is a
// List<T> and append 'item' to the list.
if (currentStoreValue == null)
{
currentStoreValue = value;
}
else
{
List<T>? storeValues = currentStoreValue as List<T>;
if (storeValues == null)
{
storeValues = new List<T>(2);
Debug.Assert(currentStoreValue is T);
storeValues.Add((T)currentStoreValue);
currentStoreValue = storeValues;
}
Debug.Assert(value is T);
storeValues.Add((T)value);
}
}
// Since most of the time we just have 1 value, we don't create a List<object> for one value, but we change
// the return type to 'object'. The caller has to deal with the return type (object vs. List<object>). This
// is to optimize the most common scenario where a header has only one value.
internal object? GetParsedValues(HeaderDescriptor descriptor)
{
if (!TryGetAndParseHeaderInfo(descriptor, out HeaderStoreItemInfo? info))
{
return null;
}
return info.ParsedValue;
}
internal virtual bool IsAllowedHeaderName(HeaderDescriptor descriptor) => true;
private void PrepareHeaderInfoForAdd(HeaderDescriptor descriptor, out HeaderStoreItemInfo info, out bool addToStore)
{
if (!IsAllowedHeaderName(descriptor))
{
throw new InvalidOperationException(SR.Format(SR.net_http_headers_not_allowed_header_name, descriptor.Name));
}
addToStore = false;
if (!TryGetAndParseHeaderInfo(descriptor, out info!))
{
info = new HeaderStoreItemInfo();
addToStore = true;
}
}
private void ParseAndAddValue(HeaderDescriptor descriptor, HeaderStoreItemInfo info, string? value)
{
Debug.Assert(info != null);
if (descriptor.Parser == null)
{
// If we don't have a parser for the header, we consider the value valid if it doesn't contains
// newline characters. We add the values as "parsed value". Note that we allow empty values.
CheckContainsNewLine(value);
AddParsedValue(info, value ?? string.Empty);
return;
}
// If the header only supports 1 value, we can add the current value only if there is no
// value already set.
if (!info.CanAddParsedValue(descriptor.Parser))
{
throw new FormatException(SR.Format(System.Globalization.CultureInfo.InvariantCulture, SR.net_http_headers_single_value_header, descriptor.Name));
}
int index = 0;
object parsedValue = descriptor.Parser.ParseValue(value, info.ParsedValue, ref index);
// The raw string only represented one value (which was successfully parsed). Add the value and return.
// If value is null we still have to first call ParseValue() to allow the parser to decide whether null is
// a valid value. If it is (i.e. no exception thrown), we set the parsed value (if any) and return.
if ((value == null) || (index == value.Length))
{
// If the returned value is null, then it means the header accepts empty values. i.e. we don't throw
// but we don't add 'null' to the store either.
if (parsedValue != null)
{
AddParsedValue(info, parsedValue);
}
return;
}
Debug.Assert(index < value.Length, "Parser must return an index value within the string length.");
// If we successfully parsed a value, but there are more left to read, store the results in a temp
// list. Only when all values are parsed successfully write the list to the store.
List<object> parsedValues = new List<object>();
if (parsedValue != null)
{
parsedValues.Add(parsedValue);
}
while (index < value.Length)
{
parsedValue = descriptor.Parser.ParseValue(value, info.ParsedValue, ref index);
if (parsedValue != null)
{
parsedValues.Add(parsedValue);
}
}
// All values were parsed correctly. Copy results to the store.
foreach (object item in parsedValues)
{
AddParsedValue(info, item);
}
}
private HeaderDescriptor GetHeaderDescriptor(string name)
{
if (string.IsNullOrEmpty(name))
{
throw new ArgumentException(SR.net_http_argument_empty_string, nameof(name));
}
if (!HeaderDescriptor.TryGet(name, out HeaderDescriptor descriptor))
{
throw new FormatException(string.Format(SR.net_http_headers_invalid_header_name, name));
}
if ((descriptor.HeaderType & _allowedHeaderTypes) != 0)
{
return descriptor;
}
else if ((descriptor.HeaderType & _treatAsCustomHeaderTypes) != 0)
{
return descriptor.AsCustomHeader();
}
throw new InvalidOperationException(SR.Format(SR.net_http_headers_not_allowed_header_name, name));
}
internal bool TryGetHeaderDescriptor(string name, out HeaderDescriptor descriptor)
{
if (string.IsNullOrEmpty(name))
{
descriptor = default;
return false;
}
if (HeaderDescriptor.TryGet(name, out descriptor))
{
HttpHeaderType headerType = descriptor.HeaderType;
if ((headerType & _allowedHeaderTypes) != 0)
{
return true;
}
if ((headerType & _treatAsCustomHeaderTypes) != 0)
{
descriptor = descriptor.AsCustomHeader();
return true;
}
}
return false;
}
internal static void CheckContainsNewLine(string? value)
{
if (value == null)
{
return;
}
if (HttpRuleParser.ContainsNewLine(value))
{
throw new FormatException(SR.net_http_headers_no_newlines);
}
}
private static bool ContainsNewLine(string value, HeaderDescriptor descriptor)
{
if (HttpRuleParser.ContainsNewLine(value))
{
if (NetEventSource.Log.IsEnabled()) NetEventSource.Error(null, SR.Format(SR.net_http_log_headers_no_newlines, descriptor.Name, value));
return true;
}
return false;
}
internal static string[] GetStoreValuesAsStringArray(HeaderDescriptor descriptor, HeaderStoreItemInfo info)
{
GetStoreValuesAsStringOrStringArray(descriptor, info, out string? singleValue, out string[]? multiValue);
Debug.Assert(singleValue is not null ^ multiValue is not null);
return multiValue ?? new[] { singleValue! };
}
internal static void GetStoreValuesAsStringOrStringArray(HeaderDescriptor descriptor, object sourceValues, out string? singleValue, out string[]? multiValue)
{
HeaderStoreItemInfo? info = sourceValues as HeaderStoreItemInfo;
if (info is null)
{
Debug.Assert(sourceValues is string);
singleValue = (string)sourceValues;
multiValue = null;
return;
}
int length = GetValueCount(info);
Span<string?> values;
singleValue = null;
if (length == 1)
{
multiValue = null;
values = MemoryMarshal.CreateSpan(ref singleValue, 1);
}
else
{
Debug.Assert(length > 1, "The header should have been removed when it became empty");
values = multiValue = new string[length];
}
int currentIndex = 0;
ReadStoreValues<string?>(values, info.RawValue, null, ref currentIndex);
ReadStoreValues<object?>(values, info.ParsedValue, descriptor.Parser, ref currentIndex);
ReadStoreValues<string?>(values, info.InvalidValue, null, ref currentIndex);
Debug.Assert(currentIndex == length);
}
internal static int GetStoreValuesIntoStringArray(HeaderDescriptor descriptor, object sourceValues, [NotNull] ref string[]? values)
{
values ??= Array.Empty<string>();
HeaderStoreItemInfo? info = sourceValues as HeaderStoreItemInfo;
if (info is null)
{
Debug.Assert(sourceValues is string);
if (values.Length == 0)
{
values = new string[1];
}
values[0] = (string)sourceValues;
return 1;
}
int length = GetValueCount(info);
if (length > 0)
{
if (values.Length < length)
{
values = new string[length];
}
int currentIndex = 0;
ReadStoreValues<string?>(values, info.RawValue, null, ref currentIndex);
ReadStoreValues<object?>(values, info.ParsedValue, descriptor.Parser, ref currentIndex);
ReadStoreValues<string?>(values, info.InvalidValue, null, ref currentIndex);
Debug.Assert(currentIndex == length);
}
return length;
}
private static int GetValueCount(HeaderStoreItemInfo info)
{
Debug.Assert(info != null);
int valueCount = Count<string>(info.RawValue);
valueCount += Count<string>(info.InvalidValue);
valueCount += Count<object>(info.ParsedValue);
return valueCount;
static int Count<T>(object? valueStore) =>
valueStore is null ? 0 :
valueStore is List<T> list ? list.Count :
1;
}
private static void ReadStoreValues<T>(Span<string?> values, object? storeValue, HttpHeaderParser? parser, ref int currentIndex)
{
if (storeValue != null)
{
List<T>? storeValues = storeValue as List<T>;
if (storeValues == null)
{
values[currentIndex] = parser == null ? storeValue.ToString() : parser.ToString(storeValue);
currentIndex++;
}
else
{
foreach (object? item in storeValues)
{
Debug.Assert(item != null);
values[currentIndex] = parser == null ? item.ToString() : parser.ToString(item);
currentIndex++;
}
}
}
}
private bool AreEqual(object value, object? storeValue, IEqualityComparer? comparer)
{
Debug.Assert(value != null);
if (comparer != null)
{
return comparer.Equals(value, storeValue);
}
// We don't have a comparer, so use the Equals() method.
return value.Equals(storeValue);
}
internal sealed class HeaderStoreItemInfo
{
internal HeaderStoreItemInfo() { }
internal object? RawValue;
internal object? InvalidValue;
internal object? ParsedValue;
internal bool CanAddParsedValue(HttpHeaderParser parser)
{
Debug.Assert(parser != null, "There should be no reason to call CanAddValue if there is no parser for the current header.");
// If the header only supports one value, and we have already a value set, then we can't add
// another value. E.g. the 'Date' header only supports one value. We can't add multiple timestamps
// to 'Date'.
// So if this is a known header, ask the parser if it supports multiple values and check whether
// we already have a (valid or invalid) value.
// Note that we ignore the rawValue by purpose: E.g. we are parsing 2 raw values for a header only
// supporting 1 value. When the first value gets parsed, CanAddValue returns true and we add the
// parsed value to ParsedValue. When the second value is parsed, CanAddValue returns false, because
// we have already a parsed value.
return parser.SupportsMultipleValues || ((InvalidValue == null) && (ParsedValue == null));
}
internal bool IsEmpty => (RawValue == null) && (InvalidValue == null) && (ParsedValue == null);
}
#region Low-level implementation details that work with _headerStore directly
// Used to store the CollectionsMarshal.GetValueRefOrAddDefault out parameter.
// This is a workaround for the Roslyn bug where we can't use a discard instead:
// https://github.com/dotnet/roslyn/issues/56587#issuecomment-934955526
private static bool s_dictionaryGetValueRefOrAddDefaultExistsDummy;
private const int InitialCapacity = 4;
internal const int ArrayThreshold = 64; // Above this threshold, header ordering will not be preserved
internal HeaderEntry[]? GetEntriesArray()
{
object? store = _headerStore;
if (store is null)
{
return null;
}
else if (store is HeaderEntry[] entries)
{
return entries;
}
else
{
return GetEntriesFromDictionary();
}
HeaderEntry[] GetEntriesFromDictionary()
{
var dictionary = (Dictionary<HeaderDescriptor, object>)_headerStore!;
var entries = new HeaderEntry[dictionary.Count];
int i = 0;
foreach (KeyValuePair<HeaderDescriptor, object> entry in dictionary)
{
entries[i++] = new HeaderEntry
{
Key = entry.Key,
Value = entry.Value
};
}
return entries;
}
}
internal ReadOnlySpan<HeaderEntry> GetEntries()
{
return new ReadOnlySpan<HeaderEntry>(GetEntriesArray(), 0, _count);
}
internal int Count => _count;
private bool EntriesAreLiveView => _headerStore is HeaderEntry[];
private ref object GetValueRefOrNullRef(HeaderDescriptor key)
{
ref object valueRef = ref Unsafe.NullRef<object>();
object? store = _headerStore;
if (store is HeaderEntry[] entries)
{
for (int i = 0; i < _count && i < entries.Length; i++)
{
if (key.Equals(entries[i].Key))
{
valueRef = ref entries[i].Value;
break;
}
}
}
else if (store is not null)
{
valueRef = ref CollectionsMarshal.GetValueRefOrNullRef(Unsafe.As<Dictionary<HeaderDescriptor, object>>(store), key);
}
return ref valueRef;
}
private ref object? GetValueRefOrAddDefault(HeaderDescriptor key)
{
object? store = _headerStore;
if (store is HeaderEntry[] entries)
{
for (int i = 0; i < _count && i < entries.Length; i++)
{
if (key.Equals(entries[i].Key))
{
return ref entries[i].Value!;
}
}
int count = _count;
_count++;
if ((uint)count < (uint)entries.Length)
{
entries[count].Key = key;
return ref entries[count].Value!;
}
return ref GrowEntriesAndAddDefault(key);
}
else if (store is null)
{
_count++;
entries = new HeaderEntry[InitialCapacity];
_headerStore = entries;
ref HeaderEntry firstEntry = ref MemoryMarshal.GetArrayDataReference(entries);
firstEntry.Key = key;
return ref firstEntry.Value!;
}
else
{
return ref DictionaryGetValueRefOrAddDefault(key);
}
ref object? GrowEntriesAndAddDefault(HeaderDescriptor key)
{
var entries = (HeaderEntry[])_headerStore!;
if (entries.Length == ArrayThreshold)
{
return ref ConvertToDictionaryAndAddDefault(key);
}
else
{
Array.Resize(ref entries, entries.Length << 1);
_headerStore = entries;
ref HeaderEntry firstNewEntry = ref entries[entries.Length >> 1];
firstNewEntry.Key = key;
return ref firstNewEntry.Value!;
}
}
ref object? ConvertToDictionaryAndAddDefault(HeaderDescriptor key)
{
var entries = (HeaderEntry[])_headerStore!;
var dictionary = new Dictionary<HeaderDescriptor, object>(ArrayThreshold);
_headerStore = dictionary;
foreach (HeaderEntry entry in entries)
{
dictionary.Add(entry.Key, entry.Value);
}
Debug.Assert(dictionary.Count == _count - 1);
return ref CollectionsMarshal.GetValueRefOrAddDefault(dictionary, key, out s_dictionaryGetValueRefOrAddDefaultExistsDummy);
}
ref object? DictionaryGetValueRefOrAddDefault(HeaderDescriptor key)
{
var dictionary = (Dictionary<HeaderDescriptor, object>)_headerStore!;
ref object? value = ref CollectionsMarshal.GetValueRefOrAddDefault(dictionary, key, out s_dictionaryGetValueRefOrAddDefaultExistsDummy);
if (value is null)
{
_count++;
}
return ref value;
}
}
private void AddEntryToStore(HeaderEntry entry)
{
Debug.Assert(!ContainsKey(entry.Key));
if (_headerStore is HeaderEntry[] entries)
{
int count = _count;
if ((uint)count < (uint)entries.Length)
{
entries[count] = entry;
_count++;
return;
}
}
GetValueRefOrAddDefault(entry.Key) = entry.Value;
}
internal bool ContainsKey(HeaderDescriptor key)
{
return !Unsafe.IsNullRef(ref GetValueRefOrNullRef(key));
}
public void Clear()
{
if (_headerStore is HeaderEntry[] entries)
{
Array.Clear(entries, 0, _count);
}
else
{
_headerStore = null;
}
_count = 0;
}
internal bool Remove(HeaderDescriptor key)
{
bool removed = false;
object? store = _headerStore;
if (store is HeaderEntry[] entries)
{
for (int i = 0; i < _count && i < entries.Length; i++)
{
if (key.Equals(entries[i].Key))
{
while (i + 1 < _count && (uint)(i + 1) < (uint)entries.Length)
{
entries[i] = entries[i + 1];
i++;
}
entries[i] = default;
removed = true;
break;
}
}
}
else if (store is not null)
{
removed = Unsafe.As<Dictionary<HeaderDescriptor, object>>(store).Remove(key);
}
if (removed)
{
_count--;
}
return removed;
}
#endregion // _headerStore implementation
}
}
| 1 |
dotnet/runtime | 66,250 | use the name of the header when throwing exception for invalid chars | Fixes #65136 | pedrobsaila | 2022-03-05T20:18:11Z | 2022-03-08T16:15:18Z | 8fe2d2b0bbb7bec841c97372528531c071b29c16 | fa5dcda23c681031b5d2bd68482702baef80b836 | use the name of the header when throwing exception for invalid chars. Fixes #65136 | ./src/libraries/System.Net.Http/tests/UnitTests/Headers/HttpHeadersTest.cs | // Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
using System.Collections;
using System.Collections.Generic;
using System.Globalization;
using System.Linq;
using System.Net.Http.Headers;
using System.Tests;
using Xunit;
namespace System.Net.Http.Tests
{
public class HttpHeadersTest
{
// Note: These are not real known headers, so they won't be returned if we call HeaderDescriptor.Get().
private static readonly HeaderDescriptor known1Header = (new KnownHeader("known1", HttpHeaderType.General, new MockHeaderParser())).Descriptor;
private static readonly HeaderDescriptor known2Header = (new KnownHeader("known2", HttpHeaderType.General, new MockHeaderParser())).Descriptor;
private static readonly HeaderDescriptor known3Header = (new KnownHeader("known3", HttpHeaderType.General, new MockHeaderParser())).Descriptor;
private static readonly HeaderDescriptor known4Header = (new KnownHeader("known3", HttpHeaderType.General, new CustomTypeHeaderParser())).Descriptor;
private static readonly HeaderDescriptor noComparerHeader = (new KnownHeader("noComparerHeader", HttpHeaderType.General, new NoComparerHeaderParser())).Descriptor;
private static readonly HeaderDescriptor customTypeHeader = (new KnownHeader("customTypeHeader", HttpHeaderType.General, new CustomTypeHeaderParser())).Descriptor;
private static readonly HeaderDescriptor customHeader;
static HttpHeadersTest()
{
HeaderDescriptor.TryGet("custom", out customHeader);
}
private const string customHeaderName = "custom-header";
private const string rawPrefix = "raw";
private const string parsedPrefix = "parsed";
private const string invalidHeaderValue = "invalid";
[Theory]
[InlineData(null)]
[InlineData("")]
public void TryAddWithoutValidation_UseEmptyHeaderName_False(string headerName)
{
MockHeaders headers = new MockHeaders();
Assert.False(headers.TryAddWithoutValidation(headerName, "value"));
}
[Theory]
[MemberData(nameof(GetInvalidHeaderNames))]
public void TryAddWithoutValidation_UseInvalidHeaderName_False(string headerName)
{
MockHeaders headers = new MockHeaders();
Assert.False(headers.TryAddWithoutValidation(headerName, "value"));
}
[Fact]
public void TryAddWithoutValidation_AddSingleValue_ValueParsed()
{
MockHeaders headers = new MockHeaders();
headers.TryAddWithoutValidation(headers.Descriptor, rawPrefix);
Assert.Equal(1, headers.Count());
Assert.Equal(1, headers.First().Value.Count());
Assert.Equal(parsedPrefix, headers.First().Value.First());
Assert.Equal(1, headers.Parser.TryParseValueCallCount);
}
[Fact]
public void TryAddWithoutValidation_AddTwoSingleValues_BothValuesParsed()
{
MockHeaders headers = new MockHeaders();
headers.TryAddWithoutValidation(headers.Descriptor, rawPrefix + "1");
headers.TryAddWithoutValidation(headers.Descriptor, rawPrefix + "2");
Assert.Equal(0, headers.Parser.TryParseValueCallCount);
Assert.Equal(1, headers.Count());
Assert.Equal(2, headers.First().Value.Count());
Assert.Equal(parsedPrefix + "1", headers.First().Value.ElementAt(0));
Assert.Equal(parsedPrefix + "2", headers.First().Value.ElementAt(1));
Assert.Equal(2, headers.Parser.TryParseValueCallCount);
}
[Fact]
public void TryAddWithoutValidation_AddTwoValidValuesAsOneString_BothValuesParsed()
{
MockHeaders headers = new MockHeaders();
headers.TryAddWithoutValidation(headers.Descriptor, rawPrefix + "1," + rawPrefix + "2");
Assert.Equal(0, headers.Parser.TryParseValueCallCount);
Assert.Equal(1, headers.Count());
Assert.Equal(2, headers.First().Value.Count());
Assert.Equal(parsedPrefix + "1", headers.First().Value.ElementAt(0));
Assert.Equal(parsedPrefix + "2", headers.First().Value.ElementAt(1));
// The parser gets called for each value in the raw string. I.e. if we have 1 raw string containing two
// values, the parser gets called twice.
Assert.Equal(2, headers.Parser.TryParseValueCallCount);
}
[Fact]
public void TryAddWithoutValidation_AddTwoValuesOneValidOneInvalidAsOneString_RawStringAddedAsInvalid()
{
MockHeaders headers = new MockHeaders();
headers.TryAddWithoutValidation(headers.Descriptor, rawPrefix + "1," + invalidHeaderValue);
Assert.Equal(0, headers.Parser.TryParseValueCallCount);
Assert.Equal(1, headers.Count());
Assert.Equal(1, headers.First().Value.Count());
// We expect the value to be returned without change since it couldn't be parsed in its entirety.
Assert.Equal(rawPrefix + "1," + invalidHeaderValue, headers.First().Value.ElementAt(0));
// The parser gets called twice, but the second time it returns false, because it tries to parse
// 'invalidHeaderValue'.
Assert.Equal(2, headers.Parser.TryParseValueCallCount);
}
[Fact]
public void TryAddWithoutValidation_AddTwoValueStringAndThirdValue_AllValuesParsed()
{
MockHeaders headers = new MockHeaders();
headers.TryAddWithoutValidation(headers.Descriptor, rawPrefix + "1," + rawPrefix + "2");
headers.TryAddWithoutValidation(headers.Descriptor, rawPrefix + "3");
Assert.Equal(0, headers.Parser.TryParseValueCallCount);
Assert.Equal(1, headers.Count());
Assert.Equal(3, headers.First().Value.Count());
Assert.Equal(parsedPrefix + "1", headers.First().Value.ElementAt(0));
Assert.Equal(parsedPrefix + "2", headers.First().Value.ElementAt(1));
Assert.Equal(parsedPrefix + "3", headers.First().Value.ElementAt(2));
Assert.Equal(3, headers.Parser.TryParseValueCallCount);
}
[Fact]
public void TryAddWithoutValidation_AddInvalidAndValidValueString_BothValuesParsed()
{
MockHeaders headers = new MockHeaders();
headers.TryAddWithoutValidation(headers.Descriptor, rawPrefix);
headers.TryAddWithoutValidation(headers.Descriptor, invalidHeaderValue);
Assert.Equal(0, headers.Parser.TryParseValueCallCount);
Assert.Equal(1, headers.Count());
Assert.Equal(2, headers.First().Value.Count());
Assert.Equal(parsedPrefix, headers.First().Value.ElementAt(0));
Assert.Equal(invalidHeaderValue, headers.First().Value.ElementAt(1));
Assert.Equal(2, headers.Parser.TryParseValueCallCount);
}
[Fact]
public void TryAddWithoutValidation_AddEmptyValueString_HeaderWithNoValueAfterParsing()
{
MockHeaders headers = new MockHeaders();
// The parser returns 'true' to indicate that it could parse the value (empty values allowed) and an
// value of 'null'. HttpHeaders will remove the header from the collection since the known header doesn't
// have a value.
headers.TryAddWithoutValidation(headers.Descriptor, string.Empty);
Assert.Equal(0, headers.Parser.TryParseValueCallCount);
Assert.Equal(0, headers.Count());
headers.Clear();
headers.TryAddWithoutValidation("custom", (string)null);
Assert.Equal(1, headers.Count());
Assert.Equal(1, headers.First().Value.Count());
Assert.Equal(string.Empty, headers.GetValues("custom").First());
}
[Fact]
public void TryAddWithoutValidation_AddValidAndInvalidValueString_BothValuesParsed()
{
MockHeaders headers = new MockHeaders();
headers.TryAddWithoutValidation(headers.Descriptor, invalidHeaderValue);
headers.TryAddWithoutValidation(headers.Descriptor, rawPrefix);
Assert.Equal(0, headers.Parser.TryParseValueCallCount);
Assert.Equal(1, headers.Count());
Assert.Equal(2, headers.First().Value.Count());
// If you compare this test with the previous one: Note that we reversed the order of adding the invalid
// string and the valid string. However, when enumerating header values the order is still the same as in
// the previous test.
// We don't keep track of the order if we have both invalid & valid values. This would add complexity
// and additional memory to store the information. Given how rare this scenario is we consider this
// by design. Note that this scenario is only an issue if:
// - The header value has an invalid format (very rare for standard headers) AND
// - There are multiple header values (some valid, some invalid) AND
// - The order of the headers matters (e.g. Transfer-Encoding)
Assert.Equal(parsedPrefix, headers.First().Value.ElementAt(0));
Assert.Equal(invalidHeaderValue, headers.First().Value.ElementAt(1));
Assert.Equal(2, headers.Parser.TryParseValueCallCount);
string expected = headers.Descriptor.Name + ": " + parsedPrefix + ", " + invalidHeaderValue + Environment.NewLine;
Assert.Equal(expected, headers.ToString());
}
[Fact]
public void TryAddWithoutValidation_AddNullValueForKnownHeader_ParserRejectsNullEmptyStringAdded()
{
MockHeaders headers = new MockHeaders();
headers.TryAddWithoutValidation(headers.Descriptor, (string)null);
Assert.Equal(0, headers.Parser.TryParseValueCallCount);
// MockParser is called with an empty string and decides that it is OK to have empty values but they
// shouldn't be added to the list of header values. HttpHeaders will remove the header since it doesn't
// have values.
Assert.Equal(0, headers.Count());
Assert.Equal(1, headers.Parser.TryParseValueCallCount);
}
[Fact]
public void TryAddWithoutValidation_AddNullValueForUnknownHeader_EmptyStringAddedAsValue()
{
MockHeaders headers = new MockHeaders();
headers.TryAddWithoutValidation(customHeaderName, (string)null);
Assert.Equal(1, headers.Count());
Assert.Equal(1, headers.First().Value.Count());
// 'null' values are internally stored as string.Empty. Since we added a custom header, there is no
// parser and the empty string is just added to the list of 'parsed values'.
Assert.Equal(string.Empty, headers.First().Value.First());
Assert.Equal(0, headers.Parser.TryParseValueCallCount);
}
[Fact]
public void TryAddWithoutValidation_AddValueForUnknownHeader_ValueAddedToStore()
{
MockHeaders headers = new MockHeaders();
headers.TryAddWithoutValidation(customHeaderName, "custom value");
Assert.Equal(1, headers.Count());
Assert.Equal(1, headers.First().Value.Count());
Assert.Equal("custom value", headers.First().Value.First());
Assert.Equal(0, headers.Parser.TryParseValueCallCount);
}
[Fact]
public void TryAddWithoutValidation_AddNullAndEmptyValuesToKnownHeader_HeaderRemovedFromCollection()
{
MockHeaders headers = new MockHeaders();
headers.TryAddWithoutValidation(headers.Descriptor, (string)null);
headers.TryAddWithoutValidation(headers.Descriptor, string.Empty);
Assert.Equal(0, headers.Parser.TryParseValueCallCount);
Assert.Equal(0, headers.Count());
// TryAddWithoutValidation() adds 'null' as string.empty to distinguish between an empty raw value and no raw
// value. When the parser is called later, the parser can decide whether empty strings are valid or not.
// In our case the MockParser returns 'success' with a parsed value of 'null' indicating that it is OK to
// have empty values, but they should be ignored.
Assert.Equal(2, headers.Parser.EmptyValueCount);
Assert.Equal(2, headers.Parser.TryParseValueCallCount);
}
[Fact]
public void TryAddWithoutValidation_AddNullAndEmptyValuesToUnknownHeader_TwoEmptyStringsAddedAsValues()
{
MockHeaders headers = new MockHeaders();
headers.TryAddWithoutValidation(customHeaderName, (string)null);
headers.TryAddWithoutValidation(customHeaderName, string.Empty);
Assert.Equal(1, headers.Count());
Assert.Equal(2, headers.First().Value.Count());
// TryAddWithoutValidation() adds 'null' as string.empty to distinguish between an empty raw value and no raw
// value. For custom headers we just add what the user gives us. I.e. the result is a header with two empty
// values.
Assert.Equal(string.Empty, headers.First().Value.ElementAt(0));
Assert.Equal(string.Empty, headers.First().Value.ElementAt(1));
}
[Fact]
public void TryAddWithoutValidation_AddMultipleValueToSingleValueHeaders_FirstHeaderAddedOthersAreInvalid()
{
MockHeaderParser parser = new MockHeaderParser(false); // doesn't support multiple values.
MockHeaders headers = new MockHeaders(parser);
headers.TryAddWithoutValidation(headers.Descriptor, rawPrefix + "1");
headers.TryAddWithoutValidation(headers.Descriptor, rawPrefix + "2");
Assert.Equal(0, headers.Parser.TryParseValueCallCount);
Assert.Equal(1, headers.Count());
Assert.Equal(2, headers.First().Value.Count());
// Note that the first value was parsed and added to the 'parsed values' list. The second value however
// was added to the 'invalid values' list since the header doesn't support multiple values.
Assert.Equal(parsedPrefix + "1", headers.First().Value.ElementAt(0));
Assert.Equal(rawPrefix + "2", headers.First().Value.ElementAt(1));
// The parser is only called once for the first value. HttpHeaders doesn't invoke the parser for
// additional values if the parser only supports one value.
Assert.Equal(1, headers.Parser.TryParseValueCallCount);
}
[Fact]
public void TryAddWithoutValidation_AddMultipleValueStringToSingleValueHeaders_MultipleValueStringAddedAsInvalid()
{
MockHeaderParser parser = new MockHeaderParser(false); // doesn't support multiple values.
MockHeaders headers = new MockHeaders(parser);
headers.TryAddWithoutValidation(headers.Descriptor, rawPrefix + "1," + rawPrefix + "2");
Assert.Equal(0, headers.Parser.TryParseValueCallCount);
// Since parsing the header value fails because it is composed of 2 values, the original string is added
// to the list of 'invalid values'. Therefore we only have 1 header value (the original string).
Assert.Equal(1, headers.Count());
Assert.Equal(1, headers.First().Value.Count());
Assert.Equal(rawPrefix + "1," + rawPrefix + "2", headers.First().Value.First());
Assert.Equal(1, headers.Parser.TryParseValueCallCount);
}
[Theory]
[MemberData(nameof(HeaderValuesWithNewLines))]
public void TryAddWithoutValidation_AddValueContainingNewLine_Rejected(string headerValue)
{
var headers = new HttpRequestHeaders();
// This value is considered invalid (newline char followed by non-whitespace). However, since
// TryAddWithoutValidation() only causes the header value to be analyzed when it gets actually accessed, no
// exception is thrown. Instead the value is discarded and a warning is logged.
headers.TryAddWithoutValidation("foo", headerValue);
Assert.Equal(1, headers.NonValidated.Count);
Assert.Equal(headerValue, headers.NonValidated["foo"].ToString());
Assert.False(headers.Contains("foo"));
Assert.Equal(0, headers.Count());
// Accessing the header forces parsing and the invalid value is removed
Assert.Equal(0, headers.NonValidated.Count);
headers.Clear();
headers.TryAddWithoutValidation("foo", new[] { "valid", headerValue });
Assert.Equal(1, headers.NonValidated.Count);
HeaderStringValues values = headers.NonValidated["foo"];
Assert.Equal(2, values.Count);
Assert.Equal(new[] { "valid", headerValue }, values);
Assert.Equal(1, headers.Count());
Assert.Equal(1, headers.First().Value.Count());
Assert.Equal("valid", headers.First().Value.First());
// Accessing the header forces parsing and the invalid value is removed
Assert.Equal(1, headers.NonValidated.Count);
values = headers.NonValidated["foo"];
Assert.Equal(1, values.Count);
Assert.Equal("valid", values.ToString());
}
[Fact]
public void TryAddWithoutValidation_MultipleAddInvalidValuesToNonExistingHeader_AddHeader()
{
MockHeaders headers = new MockHeaders();
headers.TryAddWithoutValidation(headers.Descriptor, new string[] { invalidHeaderValue });
// Make sure the header did not get added since we just tried to add an invalid value.
Assert.True(headers.Contains(headers.Descriptor));
Assert.Equal(1, headers.First().Value.Count());
Assert.Equal(invalidHeaderValue, headers.First().Value.ElementAt(0));
}
[Fact]
public void TryAddWithoutValidation_MultipleAddValidValueThenAddInvalidValuesToExistingHeader_AddValues()
{
MockHeaders headers = new MockHeaders();
headers.TryAddWithoutValidation(headers.Descriptor, rawPrefix + "1");
headers.TryAddWithoutValidation(headers.Descriptor, new string[] { rawPrefix + "2", invalidHeaderValue });
Assert.True(headers.Contains(headers.Descriptor));
Assert.Equal(3, headers.First().Value.Count());
Assert.Equal(parsedPrefix + "1", headers.First().Value.ElementAt(0));
Assert.Equal(parsedPrefix + "2", headers.First().Value.ElementAt(1));
Assert.Equal(invalidHeaderValue, headers.First().Value.ElementAt(2));
}
[Fact]
public void TryAddWithoutValidation_MultipleAddValidValueThenAddInvalidValuesToNonExistingHeader_AddHeader()
{
MockHeaders headers = new MockHeaders();
headers.TryAddWithoutValidation(headers.Descriptor, new string[] { rawPrefix + "1", invalidHeaderValue });
Assert.True(headers.Contains(headers.Descriptor));
Assert.Equal(2, headers.First().Value.Count());
Assert.Equal(parsedPrefix + "1", headers.First().Value.ElementAt(0));
Assert.Equal(invalidHeaderValue, headers.First().Value.ElementAt(1));
}
[Fact]
public void TryAddWithoutValidation_MultipleAddNullValueCollection_Throws()
{
MockHeaders headers = new MockHeaders();
string[] values = null;
Assert.Throws<ArgumentNullException>(() => { headers.TryAddWithoutValidation(headers.Descriptor, values); });
}
[Theory]
[InlineData(null)]
[InlineData("")]
public void Add_SingleUseEmptyHeaderName_Throw(string headerName)
{
MockHeaders headers = new MockHeaders();
AssertExtensions.Throws<ArgumentException>("name", () => { headers.Add(headerName, "value"); });
}
[Theory]
[MemberData(nameof(GetInvalidHeaderNames))]
public void Add_SingleUseInvalidHeaderName_Throw(string headerName)
{
MockHeaders headers = new MockHeaders();
Assert.Throws<FormatException>(() => { headers.Add(headerName, "value"); });
}
[Fact]
public void Add_SingleUseStoreWithNoParserStore_AllHeadersConsideredCustom()
{
CustomTypeHeaders headers = new CustomTypeHeaders();
headers.Add("custom", "value");
Assert.Equal(1, headers.Count());
Assert.Equal(1, headers.First().Value.Count());
Assert.Equal("value", headers.First().Value.First());
}
[Fact]
public void Add_SingleAddValidValue_ValueParsedCorrectly()
{
MockHeaders headers = new MockHeaders();
headers.Add(headers.Descriptor, rawPrefix);
// Add() should trigger parsing.
Assert.Equal(1, headers.Parser.TryParseValueCallCount);
Assert.Equal(1, headers.Count());
Assert.Equal(1, headers.First().Value.Count());
Assert.Equal(parsedPrefix, headers.First().Value.ElementAt(0));
// Value is already parsed. There shouldn't be additional calls to the parser.
Assert.Equal(1, headers.Parser.TryParseValueCallCount);
}
[Fact]
public void Add_SingleAddEmptyValueMultipleTimes_EmptyHeaderAdded()
{
MockHeaders headers = new MockHeaders();
headers.Add(headers.Descriptor, (string)null);
headers.Add(headers.Descriptor, string.Empty);
headers.Add(headers.Descriptor, string.Empty);
// Add() should trigger parsing.
Assert.Equal(3, headers.Parser.TryParseValueCallCount);
Assert.Equal(0, headers.Count());
}
[Fact]
public void Add_SingleAddInvalidValueToNonExistingHeader_ThrowAndDontAddHeader()
{
// Since Add() immediately parses the value, it will throw an exception if the value is invalid.
MockHeaders headers = new MockHeaders();
Assert.Throws<FormatException>(() => { headers.Add(headers.Descriptor, invalidHeaderValue); });
// Make sure the header did not get added to the store.
Assert.False(headers.Contains(headers.Descriptor),
"No header expected to be added since header value was invalid.");
}
[Fact]
public void Add_SingleAddValidValueThenAddInvalidValue_ThrowAndHeaderContainsValidValue()
{
MockHeaders headers = new MockHeaders();
headers.Add(headers.Descriptor, rawPrefix);
Assert.Throws<FormatException>(() => { headers.Add(headers.Descriptor, invalidHeaderValue); });
// Make sure the header did not get removed due to the failed add.
Assert.True(headers.Contains(headers.Descriptor), "Header was removed even if there is a valid header value.");
Assert.Equal(1, headers.First().Value.Count());
Assert.Equal(parsedPrefix, headers.First().Value.ElementAt(0));
}
[Fact]
public void Add_MultipleAddInvalidValuesToNonExistingHeader_ThrowAndDontAddHeader()
{
MockHeaders headers = new MockHeaders();
Assert.Throws<FormatException>(() => { headers.Add(headers.Descriptor, new string[] { invalidHeaderValue }); });
// Make sure the header did not get added since we just tried to add an invalid value.
Assert.False(headers.Contains(headers.Descriptor), "Header was added even if we just added an invalid value.");
}
[Fact]
public void Add_MultipleAddValidValueThenAddInvalidValuesToExistingHeader_ThrowAndDontAddHeader()
{
MockHeaders headers = new MockHeaders();
headers.Add(headers.Descriptor, rawPrefix + "1");
Assert.Throws<FormatException>(() => { headers.Add(headers.Descriptor, new string[] { rawPrefix + "2", invalidHeaderValue }); });
// Make sure the header did not get removed due to the failed add. Note that the first value in the array
// is valid, so it gets added. I.e. we have 2 values.
Assert.True(headers.Contains(headers.Descriptor), "Header was removed even if there is a valid header value.");
Assert.Equal(2, headers.First().Value.Count());
Assert.Equal(parsedPrefix + "1", headers.First().Value.ElementAt(0));
Assert.Equal(parsedPrefix + "2", headers.First().Value.ElementAt(1));
}
[Fact]
public void Add_MultipleAddValidValueThenAddInvalidValuesToNonExistingHeader_ThrowAndDontAddHeader()
{
MockHeaders headers = new MockHeaders();
Assert.Throws<FormatException>(() => { headers.Add(headers.Descriptor, new string[] { rawPrefix + "1", invalidHeaderValue }); });
// Make sure the header got added due to the valid add. Note that the first value in the array
// is valid, so it gets added.
Assert.True(headers.Contains(headers.Descriptor), "Header was not added even though we added 1 valid value.");
Assert.Equal(1, headers.First().Value.Count());
Assert.Equal(parsedPrefix + "1", headers.First().Value.ElementAt(0));
}
[Fact]
public void Add_SingleAddThreeValidValues_ValuesParsedCorrectly()
{
MockHeaders headers = new MockHeaders();
headers.Add(headers.Descriptor, rawPrefix + "1");
headers.Add(headers.Descriptor, rawPrefix + "2");
headers.Add(headers.Descriptor, rawPrefix + "3");
// Add() should trigger parsing.
Assert.Equal(3, headers.Parser.TryParseValueCallCount);
Assert.Equal(1, headers.Count());
Assert.Equal(3, headers.First().Value.Count());
Assert.Equal(parsedPrefix + "1", headers.First().Value.ElementAt(0));
Assert.Equal(parsedPrefix + "2", headers.First().Value.ElementAt(1));
Assert.Equal(parsedPrefix + "3", headers.First().Value.ElementAt(2));
// Value is already parsed. There shouldn't be additional calls to the parser.
Assert.Equal(3, headers.Parser.TryParseValueCallCount);
}
[Fact]
public void Add_SingleAddTwoValidValuesToHeaderWithSingleValue_Throw()
{
MockHeaderParser parser = new MockHeaderParser(false); // doesn't support multiple values.
MockHeaders headers = new MockHeaders(parser);
headers.Add(headers.Descriptor, rawPrefix + "1");
// Can only add headers once.
Assert.Throws<FormatException>(() => { headers.Add(headers.Descriptor, rawPrefix + "2"); });
// Verify that the first header value is still there.
Assert.Equal(1, headers.First().Value.Count());
}
[Fact]
public void Add_SingleFirstTryAddWithoutValidationForValidValueThenAdd_TwoParsedValuesAdded()
{
MockHeaders headers = new MockHeaders();
headers.TryAddWithoutValidation(headers.Descriptor, rawPrefix + "1");
headers.Add(headers.Descriptor, rawPrefix + "2");
// Add() should trigger parsing. TryAddWithoutValidation() doesn't trigger parsing, but Add() triggers
// parsing of raw header values (TryParseValue() is called)
Assert.Equal(2, headers.Parser.TryParseValueCallCount);
Assert.Equal(1, headers.Count());
Assert.Equal(2, headers.First().Value.Count());
Assert.Equal(parsedPrefix + "1", headers.First().Value.ElementAt(0));
Assert.Equal(parsedPrefix + "2", headers.First().Value.ElementAt(1));
// Value is already parsed. There shouldn't be additional calls to the parser.
Assert.Equal(2, headers.Parser.TryParseValueCallCount);
}
[Fact]
public void Add_SingleFirstTryAddWithoutValidationForInvalidValueThenAdd_TwoParsedValuesAdded()
{
MockHeaders headers = new MockHeaders();
headers.TryAddWithoutValidation(headers.Descriptor, invalidHeaderValue);
headers.Add(headers.Descriptor, rawPrefix + "1");
// Add() should trigger parsing. TryAddWithoutValidation() doesn't trigger parsing, but Add() triggers
// parsing of raw header values (TryParseValue() is called)
Assert.Equal(2, headers.Parser.TryParseValueCallCount);
Assert.Equal(1, headers.Count());
Assert.Equal(2, headers.First().Value.Count());
Assert.Equal(parsedPrefix + "1", headers.First().Value.ElementAt(0));
Assert.Equal(invalidHeaderValue, headers.First().Value.ElementAt(1));
// Value is already parsed. There shouldn't be additional calls to the parser.
Assert.Equal(2, headers.Parser.TryParseValueCallCount);
}
[Fact]
public void Add_SingleFirstTryAddWithoutValidationForEmptyValueThenAdd_OneParsedValueAddedEmptyIgnored()
{
MockHeaders headers = new MockHeaders();
headers.TryAddWithoutValidation(headers.Descriptor, string.Empty);
headers.Add(headers.Descriptor, rawPrefix + "1");
// Add() should trigger parsing. TryAddWithoutValidation() doesn't trigger parsing, but Add() triggers
// parsing of raw header values (TryParseValue() is called)
Assert.Equal(2, headers.Parser.TryParseValueCallCount);
Assert.Equal(1, headers.Count());
Assert.Equal(1, headers.First().Value.Count());
Assert.Equal(parsedPrefix + "1", headers.First().Value.ElementAt(0));
// Value is already parsed. There shouldn't be additional calls to the parser.
Assert.Equal(2, headers.Parser.TryParseValueCallCount);
}
[Fact]
public void Add_SingleFirstAddThenTryAddWithoutValidation_TwoParsedValuesAdded()
{
MockHeaders headers = new MockHeaders();
headers.Add(headers.Descriptor, rawPrefix + "1");
headers.TryAddWithoutValidation(headers.Descriptor, rawPrefix + "2");
// Add() should trigger parsing. Since TryAddWithoutValidation() is called afterwards the second value is
// not parsed yet.
Assert.Equal(1, headers.Parser.TryParseValueCallCount);
Assert.Equal(1, headers.Count());
Assert.Equal(2, headers.First().Value.Count());
Assert.Equal(parsedPrefix + "1", headers.First().Value.ElementAt(0));
Assert.Equal(parsedPrefix + "2", headers.First().Value.ElementAt(1));
// Value is already parsed. There shouldn't be additional calls to the parser.
Assert.Equal(2, headers.Parser.TryParseValueCallCount);
}
[Fact]
public void Add_SingleAddThenTryAddWithoutValidationThenAdd_ThreeParsedValuesAdded()
{
MockHeaders headers = new MockHeaders();
headers.Add(headers.Descriptor, rawPrefix + "1");
headers.TryAddWithoutValidation(headers.Descriptor, rawPrefix + "2");
headers.Add(headers.Descriptor, rawPrefix + "3");
// The second Add() triggers also parsing of the value added by TryAddWithoutValidation()
Assert.Equal(3, headers.Parser.TryParseValueCallCount);
Assert.Equal(1, headers.Count());
Assert.Equal(3, headers.First().Value.Count());
Assert.Equal(parsedPrefix + "1", headers.First().Value.ElementAt(0));
Assert.Equal(parsedPrefix + "2", headers.First().Value.ElementAt(1));
Assert.Equal(parsedPrefix + "3", headers.First().Value.ElementAt(2));
// Value is already parsed. There shouldn't be additional calls to the parser.
Assert.Equal(3, headers.Parser.TryParseValueCallCount);
}
[Fact]
public void Add_SingleFirstTryAddWithoutValidationThenAddToSingleValueHeader_AddThrows()
{
MockHeaderParser parser = new MockHeaderParser(false); // doesn't support multiple values.
MockHeaders headers = new MockHeaders(parser);
headers.TryAddWithoutValidation(headers.Descriptor, rawPrefix + "1");
Assert.Throws<FormatException>(() => {headers.Add(headers.Descriptor, rawPrefix + "2"); });
}
[Fact]
public void Add_SingleFirstAddThenTryAddWithoutValidationToSingleValueHeader_BothParsedAndInvalidValue()
{
MockHeaderParser parser = new MockHeaderParser(false); // doesn't support multiple values.
MockHeaders headers = new MockHeaders(parser);
headers.Add(headers.Descriptor, rawPrefix + "1");
headers.TryAddWithoutValidation(headers.Descriptor, rawPrefix + "2");
Assert.Equal(1, headers.Parser.TryParseValueCallCount);
// Add() succeeds since we don't have a value added yet. TryAddWithoutValidation() also succeeds, however
// the value is added to the 'invalid values' list when retrieved.
Assert.Equal(1, headers.Count());
Assert.Equal(2, headers.First().Value.Count());
Assert.Equal(parsedPrefix + "1", headers.First().Value.ElementAt(0));
Assert.Equal(rawPrefix + "2", headers.First().Value.ElementAt(1));
// Note that TryParseValue() is not called because HttpHeaders sees that there is already a value
// so it adds the raw value to 'invalid values'.
Assert.Equal(1, headers.Parser.TryParseValueCallCount);
}
[Fact]
public void Add_MultipleAddThreeValidValuesWithOneCall_ValuesParsedCorrectly()
{
MockHeaders headers = new MockHeaders();
string[] values = new string[] { rawPrefix + "1", rawPrefix + "2", rawPrefix + "3" };
headers.Add(headers.Descriptor, values);
// Add() should trigger parsing.
Assert.Equal(3, headers.Parser.TryParseValueCallCount);
Assert.Equal(1, headers.Count());
Assert.Equal(3, headers.First().Value.Count());
Assert.Equal(parsedPrefix + "1", headers.First().Value.ElementAt(0));
Assert.Equal(parsedPrefix + "2", headers.First().Value.ElementAt(1));
Assert.Equal(parsedPrefix + "3", headers.First().Value.ElementAt(2));
// Value is already parsed. There shouldn't be additional calls to the parser.
Assert.Equal(3, headers.Parser.TryParseValueCallCount);
}
[Fact]
public void Add_MultipleAddThreeValidValuesAsOneString_BothValuesParsed()
{
MockHeaders headers = new MockHeaders();
headers.Add(headers.Descriptor, rawPrefix + "1," + rawPrefix + "2," + rawPrefix + "3");
Assert.Equal(3, headers.Parser.TryParseValueCallCount);
Assert.Equal(1, headers.Count());
Assert.Equal(3, headers.First().Value.Count());
Assert.Equal(parsedPrefix + "1", headers.First().Value.ElementAt(0));
Assert.Equal(parsedPrefix + "2", headers.First().Value.ElementAt(1));
Assert.Equal(parsedPrefix + "3", headers.First().Value.ElementAt(2));
}
[Fact]
public void Add_MultipleAddNullValueCollection_Throw()
{
MockHeaders headers = new MockHeaders();
string[] values = null;
Assert.Throws<ArgumentNullException>(() => { headers.Add(headers.Descriptor, values); });
}
[Fact]
public void Add_SingleAddCustomHeaderWithNullValue_HeaderIsAddedWithEmptyStringValue()
{
MockHeaders headers = new MockHeaders();
headers.Add(customHeaderName, (string)null);
Assert.Equal(0, headers.Parser.TryParseValueCallCount);
Assert.Equal(1, headers.Count());
Assert.Equal(1, headers.First().Value.Count());
Assert.Equal(string.Empty, headers.First().Value.ElementAt(0));
// We're using a custom header. No parsing should be triggered.
Assert.Equal(0, headers.Parser.TryParseValueCallCount);
}
[Fact]
public void Add_SingleAddHeadersWithDifferentCasing_ConsideredTheSameHeader()
{
MockHeaders headers = new MockHeaders();
headers.Add("custom-header", "value1");
headers.Add("Custom-Header", "value2");
headers.Add("CUSTOM-HEADER", "value2");
Assert.Equal(3, headers.GetValues("custom-header").Count());
Assert.Equal(3, headers.GetValues("Custom-Header").Count());
Assert.Equal(3, headers.GetValues("CUSTOM-HEADER").Count());
Assert.Equal(3, headers.GetValues("CuStOm-HeAdEr").Count());
}
[Theory]
[MemberData(nameof(HeaderValuesWithNewLines))]
public void Add_AddValueContainingNewLine_Rejected(string headerValue)
{
var headers = new HttpRequestHeaders();
Assert.Throws<FormatException>(() => headers.Add("foo", headerValue));
Assert.Equal(0, headers.Count());
Assert.Equal(0, headers.NonValidated.Count);
headers.Clear();
Assert.Throws<FormatException>(() => headers.Add("foo", new[] { "valid", headerValue }));
Assert.Equal(1, headers.Count());
Assert.Equal(1, headers.First().Value.Count());
Assert.Equal("valid", headers.First().Value.First());
Assert.Equal(1, headers.NonValidated.Count);
Assert.Equal("valid", headers.NonValidated["foo"].ToString());
}
[Fact]
public void RemoveParsedValue_AddValueAndRemoveIt_NoHeader()
{
MockHeaders headers = new MockHeaders();
headers.Add(headers.Descriptor, rawPrefix + "1");
// Remove the parsed value (note the original string 'raw1' was "parsed" to 'parsed1')
Assert.True(headers.RemoveParsedValue(headers.Descriptor, parsedPrefix + "1"));
Assert.Equal(1, headers.Parser.TryParseValueCallCount);
// Note that when the last value of a header gets removed, the whole header gets removed.
Assert.Equal(0, headers.Count());
Assert.Equal(1, headers.Parser.TryParseValueCallCount);
// Remove the value again: It shouldn't be found in the store.
Assert.False(headers.RemoveParsedValue(headers.Descriptor, parsedPrefix + "1"));
}
[Fact]
public void RemoveParsedValue_AddInvalidValueAndRemoveValidValue_InvalidValueRemains()
{
MockHeaders headers = new MockHeaders();
headers.TryAddWithoutValidation(headers.Descriptor, invalidHeaderValue);
// Remove a valid value which is not in the store.
Assert.False(headers.RemoveParsedValue(headers.Descriptor, parsedPrefix));
Assert.Equal(1, headers.Parser.TryParseValueCallCount);
// Note that when the last value of a header gets removed, the whole header gets removed.
Assert.Equal(1, headers.Count());
Assert.Equal(invalidHeaderValue, headers.GetValues(headers.Descriptor).First());
Assert.Equal(1, headers.Parser.TryParseValueCallCount);
// Remove the value again: It shouldn't be found in the store.
Assert.False(headers.RemoveParsedValue(headers.Descriptor, parsedPrefix + "1"));
}
[Fact]
public void RemoveParsedValue_ParserWithNoEqualityComparer_CaseSensitiveComparison()
{
CustomTypeHeaders headers = new CustomTypeHeaders();
headers.AddParsedValue(noComparerHeader, "lowercasevalue");
// Since we don't provide a comparer, the default string.Equals() is called which is case-sensitive. So
// the following call should return false.
Assert.False(headers.RemoveParsedValue(noComparerHeader, "LOWERCASEVALUE"));
// Now we try to remove the value using the correct casing. This should work.
Assert.True(headers.RemoveParsedValue(noComparerHeader, "lowercasevalue"));
// Note that when the last value of a header gets removed, the whole header gets removed.
Assert.Equal(0, headers.Count());
}
[Fact]
public void RemoveParsedValue_AddTwoValuesAndRemoveThem_NoHeader()
{
MockHeaders headers = new MockHeaders();
headers.Add(headers.Descriptor, rawPrefix + "1");
headers.Add(headers.Descriptor, rawPrefix + "2");
// Remove the parsed value (note the original string 'raw1' was "parsed" to 'parsed1')
Assert.True(headers.RemoveParsedValue(headers.Descriptor, parsedPrefix + "1"));
Assert.True(headers.RemoveParsedValue(headers.Descriptor, parsedPrefix + "2"));
Assert.Equal(2, headers.Parser.TryParseValueCallCount);
// Note that when the last value of a header gets removed, the whole header gets removed.
Assert.Equal(0, headers.Count());
Assert.Equal(2, headers.Parser.TryParseValueCallCount);
}
[Fact]
public void RemoveParsedValue_AddTwoValuesAndRemoveFirstOne_SecondValueRemains()
{
MockHeaders headers = new MockHeaders();
headers.Add(headers.Descriptor, rawPrefix + "1");
headers.Add(headers.Descriptor, rawPrefix + "2");
// Remove the parsed value (note the original string 'raw1' was "parsed" to 'parsed1')
Assert.True(headers.RemoveParsedValue(headers.Descriptor, parsedPrefix + "1"));
Assert.Equal(2, headers.Parser.TryParseValueCallCount);
// Note that when the last value of a header gets removed, the whole header gets removed.
Assert.Equal(1, headers.Count());
Assert.Equal(1, headers.First().Value.Count());
Assert.Equal(parsedPrefix + "2", headers.First().Value.ElementAt(0));
Assert.Equal(2, headers.Parser.TryParseValueCallCount);
}
[Fact]
public void RemoveParsedValue_AddTwoValuesAndRemoveSecondOne_FirstValueRemains()
{
MockHeaders headers = new MockHeaders();
headers.Add(headers.Descriptor, rawPrefix + "1");
headers.Add(headers.Descriptor, rawPrefix + "2");
// Remove the parsed value (note the original string 'raw2' was "parsed" to 'parsed2')
Assert.True(headers.RemoveParsedValue(headers.Descriptor, parsedPrefix + "2"));
Assert.Equal(2, headers.Parser.TryParseValueCallCount);
// Note that when the last value of a header gets removed, the whole header gets removed.
Assert.Equal(1, headers.Count());
Assert.Equal(1, headers.First().Value.Count());
Assert.Equal(parsedPrefix + "1", headers.First().Value.ElementAt(0));
Assert.Equal(2, headers.Parser.TryParseValueCallCount);
}
[Fact]
public void RemoveParsedValue_RemoveFromNonExistingHeader_ReturnsFalse()
{
MockHeaders headers = new MockHeaders();
headers.Add(headers.Descriptor, rawPrefix);
// Header 'non-existing-header' can't be found, so false is returned.
Assert.False(headers.RemoveParsedValue(customHeader, "doesntexist"));
}
[Fact]
public void RemoveParsedValue_RemoveFromUninitializedHeaderStore_ReturnsFalse()
{
MockHeaders headers = new MockHeaders();
// If we never add a header value, the whole header (and also the header store) doesn't exist.
// Make sure we considered this case.
Assert.False(headers.RemoveParsedValue(headers.Descriptor, "doesntexist"));
}
[Fact]
public void RemoveParsedValue_AddOneValueToKnownHeaderAndCompareWithValueThatDiffersInCase_CustomComparerUsedForComparison()
{
MockHeaders headers = new MockHeaders();
headers.AddParsedValue(headers.Descriptor, "value");
// Our custom comparer (MockComparer) does case-insensitive value comparison. Verify that our custom
// comparer is used to compare the header value.
Assert.True(headers.RemoveParsedValue(headers.Descriptor, "VALUE"));
Assert.False(headers.Contains(headers.Descriptor), "Header should be removed after removing value.");
Assert.Equal(1, headers.Parser.MockComparer.EqualsCount);
}
[Fact]
public void RemoveParsedValue_AddTwoValuesToKnownHeaderAndCompareWithValueThatDiffersInCase_CustomComparerUsedForComparison()
{
MockHeaders headers = new MockHeaders();
headers.AddParsedValue(headers.Descriptor, "differentvalue");
headers.AddParsedValue(headers.Descriptor, "value");
// Our custom comparer (MockComparer) does case-insensitive value comparison. Verify that our custom
// comparer is used to compare the header value.
// Note that since we added 2 values a different code path than in the previous test is used. In this
// case we have stored the values as List<string> internally.
Assert.True(headers.RemoveParsedValue(headers.Descriptor, "VALUE"));
Assert.Equal(1, headers.GetValues(headers.Descriptor).Count());
Assert.Equal(2, headers.Parser.MockComparer.EqualsCount);
}
[Fact]
public void RemoveParsedValue_FirstAddNewlineCharsValueThenCallRemoveParsedValue_HeaderRemoved()
{
MockHeaders headers = new MockHeaders();
// Add header value with newline chars.
headers.TryAddWithoutValidation(headers.Descriptor, invalidHeaderValue + "\r\ninvalid");
Assert.Equal(0, headers.Parser.TryParseValueCallCount);
headers.RemoveParsedValue(headers.Descriptor, "");
Assert.False(headers.Contains(headers.Descriptor), "Store should not have an entry for 'knownHeader'.");
}
[Fact]
public void RemoveParsedValue_FirstAddNewlineCharsValueThenAddValidValueThenCallAddParsedValue_HeaderRemoved()
{
MockHeaders headers = new MockHeaders();
// Add header value with newline chars.
headers.TryAddWithoutValidation(headers.Descriptor, invalidHeaderValue + "\r\ninvalid");
headers.TryAddWithoutValidation(headers.Descriptor, rawPrefix + "1");
Assert.Equal(0, headers.Parser.TryParseValueCallCount);
headers.RemoveParsedValue(headers.Descriptor, parsedPrefix + "1");
Assert.False(headers.Contains(headers.Descriptor), "Store should not have an entry for 'knownHeader'.");
}
[Fact]
public void Clear_AddMultipleHeadersAndThenClear_NoHeadersInCollection()
{
MockHeaders headers = new MockHeaders();
headers.Add(headers.Descriptor, rawPrefix + "1");
headers.TryAddWithoutValidation(headers.Descriptor, rawPrefix + "2");
headers.Add("custom1", "customValue1");
headers.Add("custom2", "customValue2");
headers.Add("custom3", "customValue3");
// Only 1 value should get parsed (call to Add() with known header value).
Assert.Equal(1, headers.Parser.TryParseValueCallCount);
// We added 4 different headers
Assert.Equal(4, headers.Count());
headers.Clear();
Assert.Equal(0, headers.Count());
// The call to Count() triggers a TryParseValue for the TryAddWithoutValidation() value. Clear() should
// not cause any additional parsing operations.
Assert.Equal(2, headers.Parser.TryParseValueCallCount);
}
[Theory]
[InlineData(null)]
[InlineData("")]
public void Remove_UseEmptyHeaderName_Throw(string headerName)
{
MockHeaders headers = new MockHeaders();
AssertExtensions.Throws<ArgumentException>("name", () => { headers.Remove(headerName); });
}
[Theory]
[MemberData(nameof(GetInvalidHeaderNames))]
public void Remove_UseInvalidHeaderName_Throw(string headerName)
{
MockHeaders headers = new MockHeaders();
Assert.Throws<FormatException>(() => { headers.Remove(headerName); });
}
[Fact]
public void Remove_AddMultipleHeadersAndDeleteFirstAndLast_FirstAndLastHeaderRemoved()
{
MockHeaders headers = new MockHeaders();
headers.Add(headers.Descriptor, rawPrefix + "1");
headers.TryAddWithoutValidation(headers.Descriptor, rawPrefix + "2");
headers.Add("custom1", "customValue1");
headers.Add("custom2", "customValue2");
headers.Add("lastheader", "customValue3");
// Only 1 value should get parsed (call to Add() with known header value).
Assert.Equal(1, headers.Parser.TryParseValueCallCount);
// We added 4 different headers
Assert.Equal(4, headers.Count());
// Remove first header
Assert.True(headers.Remove(headers.Descriptor));
Assert.Equal(3, headers.Count());
// Remove last header
Assert.True(headers.Remove("lastheader"));
Assert.Equal(2, headers.Count());
// The call to Count() triggers a TryParseValue for the TryAddWithoutValidation() value. Clear() should
// not cause any additional parsing operations.
Assert.Equal(2, headers.Parser.TryParseValueCallCount);
}
[Fact]
public void Remove_RemoveHeaderFromUninitializedHeaderStore_ReturnsFalse()
{
MockHeaders headers = new MockHeaders();
// Remove header from uninitialized store (store collection is null)
Assert.False(headers.Remove(headers.Descriptor));
Assert.Equal(0, headers.Count());
}
[Fact]
public void Remove_RemoveNonExistingHeader_ReturnsFalse()
{
MockHeaders headers = new MockHeaders();
headers.Add("custom1", "customValue1");
Assert.Equal(1, headers.Count());
// Remove header from empty store
Assert.False(headers.Remove("doesntexist"));
Assert.Equal(1, headers.Count());
}
[Theory]
[InlineData(null)]
[InlineData("")]
public void TryGetValues_UseEmptyHeaderName_False(string headerName)
{
MockHeaders headers = new MockHeaders();
IEnumerable<string> values = null;
Assert.False(headers.TryGetValues(headerName, out values));
}
[Theory]
[MemberData(nameof(GetInvalidHeaderNames))]
public void TryGetValues_UseInvalidHeaderName_False(string headerName)
{
MockHeaders headers = new MockHeaders();
IEnumerable<string> values = null;
Assert.False(headers.TryGetValues(headerName, out values));
}
[Fact]
public void TryGetValues_GetValuesFromUninitializedHeaderStore_ReturnsFalse()
{
MockHeaders headers = new MockHeaders();
IEnumerable<string> values = null;
// Get header values from uninitialized store (store collection is null)
Assert.False(headers.TryGetValues("doesntexist", out values));
Assert.Equal(0, headers.Count());
}
[Fact]
public void TryGetValues_GetValuesForNonExistingHeader_ReturnsFalse()
{
MockHeaders headers = new MockHeaders();
headers.Add("custom1", "customValue1");
IEnumerable<string> values = null;
// Get header values from uninitialized store (store collection is null)
Assert.False(headers.TryGetValues("doesntexist", out values));
Assert.Equal(1, headers.Count());
}
[Fact]
public void TryGetValues_GetValuesForExistingHeader_ReturnsTrueAndListOfValues()
{
MockHeaders headers = new MockHeaders();
headers.Add(headers.Descriptor, rawPrefix + "1");
headers.TryAddWithoutValidation(headers.Descriptor, rawPrefix + "2");
headers.TryAddWithoutValidation(headers.Descriptor, string.Empty);
// Only 1 value should get parsed (call to Add() with known header value).
Assert.Equal(1, headers.Parser.TryParseValueCallCount);
IEnumerable<string> values = null;
Assert.True(headers.TryGetValues(headers.Descriptor, out values));
Assert.NotNull(values);
// TryGetValues() should trigger parsing of values added with TryAddWithoutValidation()
Assert.Equal(3, headers.Parser.TryParseValueCallCount);
Assert.Equal(2, values.Count());
// Check returned values
Assert.Equal(parsedPrefix + "1", values.ElementAt(0));
Assert.Equal(parsedPrefix + "2", values.ElementAt(1));
}
[Theory]
[InlineData(null)]
[InlineData("")]
public void GetValues_UseEmptyHeaderName_Throw(string headerName)
{
MockHeaders headers = new MockHeaders();
AssertExtensions.Throws<ArgumentException>("name", () => { headers.GetValues(headerName); });
}
[Theory]
[MemberData(nameof(GetInvalidHeaderNames))]
public void GetValues_UseInvalidHeaderName_Throw(string headerName)
{
MockHeaders headers = new MockHeaders();
Assert.Throws<FormatException>(() => { headers.GetValues(headerName); });
}
[Fact]
public void GetValues_GetValuesFromUninitializedHeaderStore_Throw()
{
MockHeaders headers = new MockHeaders();
// Get header values from uninitialized store (store collection is null). This will throw.
Assert.Throws<InvalidOperationException>(() => { headers.GetValues("doesntexist"); });
}
[Fact]
public void GetValues_GetValuesForNonExistingHeader_Throw()
{
MockHeaders headers = new MockHeaders();
headers.Add("custom1", "customValue1");
// Get header values for non-existing header (but other headers exist in the store).
Assert.Throws<InvalidOperationException>(() => { headers.GetValues("doesntexist"); });
}
[Fact]
public void GetValues_GetValuesForExistingHeader_ReturnsTrueAndListOfValues()
{
MockHeaders headers = new MockHeaders();
headers.TryAddWithoutValidation("custom", rawPrefix + "0"); // this must not influence the result.
headers.Add(headers.Descriptor, rawPrefix + "1");
headers.TryAddWithoutValidation(headers.Descriptor, rawPrefix + "2");
// Only 1 value should get parsed (call to Add() with known header value).
Assert.Equal(1, headers.Parser.TryParseValueCallCount);
IEnumerable<string> values = headers.GetValues(headers.Descriptor);
Assert.NotNull(values);
// GetValues() should trigger parsing of values added with TryAddWithoutValidation()
Assert.Equal(2, headers.Parser.TryParseValueCallCount);
Assert.Equal(2, values.Count());
// Check returned values
Assert.Equal(parsedPrefix + "1", values.ElementAt(0));
Assert.Equal(parsedPrefix + "2", values.ElementAt(1));
}
[Fact]
public void GetValues_HeadersWithEmptyValues_ReturnsEmptyArray()
{
MockHeaders headers = new MockHeaders();
headers.TryAddWithoutValidation(customHeaderName, (string)null);
headers.Add(headers.Descriptor, string.Empty);
// In the known header case, the MockParser accepts empty values but tells the store to not add the value.
// Since no value is added for 'knownHeader', HttpHeaders removes the header from the store. This is only
// done for known headers. Custom headers are allowed to have empty/null values as shown by
// 'valuesForCustomHeaders' below
Assert.False(headers.Contains(headers.Descriptor));
// In the custom header case, we add whatever the users adds (besides that we add string.Empty if the
// user adds null). So here we do have 1 value: string.Empty.
IEnumerable<string> valuesForCustomHeader = headers.GetValues(customHeaderName);
Assert.NotNull(valuesForCustomHeader);
Assert.Equal(1, valuesForCustomHeader.Count());
Assert.Equal(string.Empty, valuesForCustomHeader.First());
}
[Fact]
public void GetParsedValues_GetValuesFromUninitializedHeaderStore_ReturnsNull()
{
MockHeaders headers = new MockHeaders();
// Get header values from uninitialized store (store collection is null).
object storeValue = headers.GetParsedValues(customHeader);
Assert.Null(storeValue);
}
[Fact]
public void GetParsedValues_GetValuesForNonExistingHeader_ReturnsNull()
{
MockHeaders headers = new MockHeaders();
headers.Add("custom1", "customValue1");
// Get header values for non-existing header (but other headers exist in the store).
object storeValue = headers.GetParsedValues(customHeader);
Assert.Null(storeValue);
}
[Fact]
public void GetParsedValues_GetSingleValueForExistingHeader_ReturnsAddedValue()
{
MockHeaders headers = new MockHeaders();
headers.Add(customHeader.Name, "customValue1");
// Get header values for non-existing header (but other headers exist in the store).
object storeValue = headers.GetParsedValues(customHeader);
Assert.NotNull(storeValue);
// If we only have one value, then GetValues() should return just the value and not wrap it in a List<T>.
Assert.Equal("customValue1", storeValue);
}
[Fact]
public void GetParsedValues_HeaderWithEmptyValues_ReturnsEmpty()
{
MockHeaders headers = new MockHeaders();
headers.Add(headers.Descriptor, string.Empty);
object storeValue = headers.GetParsedValues(headers.Descriptor);
Assert.Null(storeValue);
}
[Fact]
public void GetParsedValues_GetMultipleValuesForExistingHeader_ReturnsListOfValues()
{
MockHeaders headers = new MockHeaders();
headers.TryAddWithoutValidation("custom", rawPrefix + "0"); // this must not influence the result.
headers.Add(headers.Descriptor, rawPrefix + "1");
headers.TryAddWithoutValidation(headers.Descriptor, rawPrefix + "2");
// Only 1 value should get parsed (call to Add() with known header value).
Assert.Equal(1, headers.Parser.TryParseValueCallCount);
object storeValue = headers.GetParsedValues(headers.Descriptor);
Assert.NotNull(storeValue);
// GetValues<T>() should trigger parsing of values added with TryAddWithoutValidation()
Assert.Equal(2, headers.Parser.TryParseValueCallCount);
// Since we added 2 values to header 'knownHeader', we expect GetValues() to return a List<T> with
// two values.
List<object> storeValues = storeValue as List<object>;
Assert.NotNull(storeValues);
Assert.Equal(2, storeValues.Count);
Assert.Equal(parsedPrefix + "1", storeValues[0]);
Assert.Equal(parsedPrefix + "2", storeValues[1]);
}
[Fact]
public void GetParsedValues_GetValuesForExistingHeaderWithInvalidValues_ReturnsOnlyParsedValues()
{
MockHeaders headers = new MockHeaders();
headers.Add(headers.Descriptor, rawPrefix);
// Here we add an invalid value. GetValues<T> only returns parsable values. So this value should get
// parsed, however it will be added to the 'invalid values' list and thus is not part of the collection
// returned by the enumerator.
headers.TryAddWithoutValidation(headers.Descriptor, invalidHeaderValue);
// Only 1 value should get parsed (call to Add() with known header value).
Assert.Equal(1, headers.Parser.TryParseValueCallCount);
object storeValue = headers.GetParsedValues(headers.Descriptor);
Assert.NotNull(storeValue);
// GetValues<T>() should trigger parsing of values added with TryAddWithoutValidation()
Assert.Equal(2, headers.Parser.TryParseValueCallCount);
// Since we added only one valid value to 'knownHeader', we expect GetValues() to return a that value.
Assert.Equal(parsedPrefix, storeValue);
}
[Fact]
public void GetParsedValues_GetValuesForExistingHeaderWithOnlyInvalidValues_ReturnsEmptyEnumerator()
{
MockHeaders headers = new MockHeaders();
// Here we add an invalid value. GetValues<T> only returns parsable values. So this value should get
// parsed, however it will be added to the 'invalid values' list and thus is not part of the collection
// returned by the enumerator.
headers.TryAddWithoutValidation(headers.Descriptor, invalidHeaderValue);
Assert.Equal(0, headers.Parser.TryParseValueCallCount);
object storeValue = headers.GetParsedValues(headers.Descriptor);
Assert.Null(storeValue);
// GetValues<T>() should trigger parsing of values added with TryAddWithoutValidation()
Assert.Equal(1, headers.Parser.TryParseValueCallCount);
}
[Fact]
public void GetParsedValues_AddInvalidValueToHeader_HeaderGetsRemovedAndNullReturned()
{
MockHeaders headers = new MockHeaders();
headers.TryAddWithoutValidation(headers.Descriptor, invalidHeaderValue + "\r\ninvalid");
Assert.Equal(0, headers.Parser.TryParseValueCallCount);
object storeValue = headers.GetParsedValues(headers.Descriptor);
Assert.Null(storeValue);
Assert.False(headers.Contains(headers.Descriptor));
}
[Fact]
public void GetParsedValues_GetParsedValuesForKnownHeaderWithNewlineChars_ReturnsNull()
{
MockHeaders headers = new MockHeaders();
// Add header value with newline chars.
headers.TryAddWithoutValidation(headers.Descriptor, invalidHeaderValue + "\r\ninvalid");
Assert.Equal(0, headers.Parser.TryParseValueCallCount);
Assert.Null(headers.GetParsedValues(headers.Descriptor));
Assert.Equal(0, headers.Count());
Assert.Equal(1, headers.Parser.TryParseValueCallCount);
}
[Fact]
public void NonValidated_Default_Empty()
{
HttpHeadersNonValidated v = default;
Assert.Equal(0, v.Count);
Assert.Empty(v);
Assert.False(v.TryGetValues("Host", out HeaderStringValues values));
Assert.Empty(values);
}
[Fact]
public void NonValidated_SetValidAndInvalidHeaderValues_AllHeaderValuesReturned()
{
MockHeaderParser parser = new MockHeaderParser("---");
MockHeaders headers = new MockHeaders(parser);
headers.Add(headers.Descriptor, rawPrefix + "1");
headers.TryAddWithoutValidation(headers.Descriptor, "value2,value3");
headers.TryAddWithoutValidation(headers.Descriptor, invalidHeaderValue);
string expectedValue = "value2,value3---" + invalidHeaderValue + "---" + parsedPrefix + "1";
Assert.Equal(1, headers.NonValidated.Count);
int iterations = 0;
foreach (KeyValuePair<string, HeaderStringValues> header in headers.NonValidated)
{
// Note that raw values don't get parsed but just added to the result.
iterations++;
Assert.Equal(headers.Descriptor.Name, header.Key);
Assert.Equal(3, header.Value.Count);
Assert.Equal(expectedValue, header.Value.ToString());
}
Assert.Equal(1, iterations);
}
[Fact]
public void NonValidated_SetMultipleHeaders_AllHeaderValuesReturned()
{
MockHeaderParser parser = new MockHeaderParser(true);
MockHeaders headers = new MockHeaders(parser);
headers.Add(headers.Descriptor, rawPrefix + "1");
headers.Add("header2", "value2");
headers.Add("header3", (string)null);
headers.Add("header4", "value41");
headers.Add("header4", "value42");
string[] expectedHeaderNames = { headers.Descriptor.Name, "header2", "header3", "header4" };
string[] expectedHeaderValues = { parsedPrefix + "1", "value2", "", "value41, value42" };
int i = 0;
foreach (KeyValuePair<string, HeaderStringValues> header in headers.NonValidated)
{
Assert.NotEqual(expectedHeaderNames.Length, i);
Assert.Equal(expectedHeaderNames[i], header.Key);
Assert.Equal(expectedHeaderValues[i], header.Value.ToString());
i++;
}
}
[Fact]
public void NonValidated_SetMultipleValuesOnSingleValueHeader_AllHeaderValuesReturned()
{
MockHeaderParser parser = new MockHeaderParser(false);
MockHeaders headers = new MockHeaders(parser);
headers.TryAddWithoutValidation(headers.Descriptor, "value1");
headers.TryAddWithoutValidation(headers.Descriptor, rawPrefix);
foreach (KeyValuePair<string, HeaderStringValues> header in headers.NonValidated)
{
Assert.Equal(headers.Descriptor.Name, header.Key);
// Note that the added rawPrefix did not get parsed
Assert.Equal("value1, " + rawPrefix, header.Value.ToString());
}
}
[Fact]
public void NonValidated_ValidAndInvalidValues_DictionaryMembersWork()
{
var headers = new HttpResponseHeaders();
IReadOnlyDictionary<string, HeaderStringValues> nonValidated = headers.NonValidated;
Assert.True(headers.TryAddWithoutValidation("Location", "http:/invalidLocation"));
Assert.True(headers.TryAddWithoutValidation("Location", "http:/anotherLocation"));
Assert.True(headers.TryAddWithoutValidation("Date", "not a date"));
Assert.Equal(2, nonValidated.Count);
Assert.True(nonValidated.ContainsKey("Location"));
Assert.True(nonValidated.ContainsKey("Date"));
Assert.False(nonValidated.ContainsKey("Age"));
Assert.False(nonValidated.TryGetValue("Age", out _));
Assert.Throws<KeyNotFoundException>(() => nonValidated["Age"]);
Assert.True(nonValidated.TryGetValue("Location", out HeaderStringValues locations));
Assert.Equal(2, locations.Count);
Assert.Equal(new[] { "http:/invalidLocation", "http:/anotherLocation" }, locations.ToArray());
Assert.Equal("http:/invalidLocation, http:/anotherLocation", locations.ToString());
Assert.True(nonValidated.TryGetValue("Date", out HeaderStringValues dates));
Assert.Equal(1, dates.Count);
Assert.Equal(new[] { "not a date" }, dates.ToArray());
Assert.Equal("not a date", dates.ToString());
dates = nonValidated["Date"];
Assert.Equal(1, dates.Count);
Assert.Equal(new[] { "not a date" }, dates.ToArray());
Assert.Equal("not a date", dates.ToString());
Assert.Equal(new HashSet<string> { "Location", "Date" }, nonValidated.Keys.ToHashSet());
}
[Fact]
public void NonValidated_ValidInvalidAndRaw_AllReturned()
{
var headers = new HttpResponseHeaders();
IReadOnlyDictionary<string, HeaderStringValues> nonValidated = headers.NonValidated;
// Parsed value
headers.Date = new DateTimeOffset(1, 2, 3, 4, 5, 6, TimeSpan.Zero);
// Invalid value
headers.TryAddWithoutValidation("Date", "not a date");
foreach (KeyValuePair<string, IEnumerable<string>> _ in headers) { }
// Raw value
headers.TryAddWithoutValidation("Date", "another not a date");
// All three show up
Assert.Equal(1, nonValidated.Count);
Assert.Equal(3, nonValidated["Date"].Count);
using (new ThreadCultureChange(new CultureInfo("en-US")))
{
Assert.Equal(new HashSet<string> { "not a date", "another not a date", "Sat, 03 Feb 0001 04:05:06 GMT" }, nonValidated["Date"].ToHashSet());
}
}
[Theory]
[InlineData(null)]
[InlineData("")]
public void Contains_UseEmptyHeaderName_Throw(string headerName)
{
MockHeaders headers = new MockHeaders();
AssertExtensions.Throws<ArgumentException>("name", () => { headers.Contains(headerName); });
}
[Theory]
[MemberData(nameof(GetInvalidHeaderNames))]
public void Contains_UseInvalidHeaderName_Throw(string headerName)
{
MockHeaders headers = new MockHeaders();
Assert.Throws<FormatException>(() => { headers.Contains(headerName); });
}
[Fact]
public void Contains_CallContainsFromUninitializedHeaderStore_ReturnsFalse()
{
MockHeaders headers = new MockHeaders();
Assert.False(headers.Contains("doesntexist"));
}
[Fact]
public void Contains_CallContainsForNonExistingHeader_ReturnsFalse()
{
MockHeaders headers = new MockHeaders();
headers.Add(headers.Descriptor, rawPrefix);
Assert.False(headers.Contains("doesntexist"));
}
[Fact]
public void Contains_CallContainsForEmptyHeader_ReturnsFalse()
{
MockHeaders headers = new MockHeaders();
headers.Add(headers.Descriptor, string.Empty);
Assert.False(headers.Contains(headers.Descriptor));
}
[Fact]
public void Contains_CallContainsForExistingHeader_ReturnsTrue()
{
MockHeaders headers = new MockHeaders();
headers.Add("custom1", "customValue1");
headers.Add("custom2", "customValue2");
headers.Add("custom3", "customValue3");
headers.Add("custom4", "customValue4");
headers.TryAddWithoutValidation(headers.Descriptor, rawPrefix);
// Nothing got parsed so far since we just added custom headers and for the known header we called
// TryAddWithoutValidation().
Assert.Equal(0, headers.Parser.TryParseValueCallCount);
Assert.True(headers.Contains(headers.Descriptor));
// Contains() should trigger parsing of values added with TryAddWithoutValidation(): If the value was invalid,
// i.e. contains newline chars, then the header will be removed from the collection.
Assert.Equal(1, headers.Parser.TryParseValueCallCount);
}
[Fact]
public void Contains_AddValuesWithNewlineChars_HeadersGetRemovedWhenCallingContains()
{
MockHeaders headers = new MockHeaders();
headers.TryAddWithoutValidation(headers.Descriptor, invalidHeaderValue + "\r\ninvalid");
headers.TryAddWithoutValidation("custom", "invalid\r\nvalue");
Assert.False(headers.Contains(headers.Descriptor), "Store should not have an entry for 'knownHeader'.");
Assert.False(headers.Contains("custom"), "Store should not have an entry for 'custom'.");
}
[Fact]
public void GetEnumerator_GetEnumeratorFromUninitializedHeaderStore_ReturnsEmptyEnumerator()
{
MockHeaders headers = new MockHeaders();
IEnumerator<KeyValuePair<string, IEnumerable<string>>> enumerator = headers.GetEnumerator();
Assert.False(enumerator.MoveNext());
}
[Fact]
public void GetEnumerator_FirstHeaderWithOneValueSecondHeaderWithTwoValues_EnumeratorReturnsTwoHeaders()
{
MockHeaders headers = new MockHeaders();
headers.Add(customHeaderName, "custom0");
headers.Add(headers.Descriptor, rawPrefix + "1");
headers.TryAddWithoutValidation(headers.Descriptor, rawPrefix + "2");
// The value added with TryAddWithoutValidation() wasn't parsed yet.
Assert.Equal(1, headers.Parser.TryParseValueCallCount);
IEnumerator<KeyValuePair<string, IEnumerable<string>>> enumerator = headers.GetEnumerator();
// Getting the enumerator doesn't trigger parsing.
Assert.Equal(1, headers.Parser.TryParseValueCallCount);
Assert.True(enumerator.MoveNext());
Assert.Equal(customHeaderName, enumerator.Current.Key);
Assert.Equal(1, enumerator.Current.Value.Count());
Assert.Equal("custom0", enumerator.Current.Value.ElementAt(0));
// Starting using the enumerator will trigger parsing of raw values. The first header is not a known
// header, so there shouldn't be any parsing.
Assert.Equal(1, headers.Parser.TryParseValueCallCount);
Assert.True(enumerator.MoveNext());
Assert.Equal(headers.Descriptor.Name, enumerator.Current.Key);
Assert.Equal(2, enumerator.Current.Value.Count());
Assert.Equal(parsedPrefix + "1", enumerator.Current.Value.ElementAt(0));
Assert.Equal(parsedPrefix + "2", enumerator.Current.Value.ElementAt(1));
// The second header is a known header, so parsing raw values should get executed.
Assert.Equal(2, headers.Parser.TryParseValueCallCount);
Assert.False(enumerator.MoveNext(), "Only 2 values expected, but enumerator returns a third one.");
}
[Fact]
public void GetEnumerator_FirstCustomHeaderWithEmptyValueSecondKnownHeaderWithEmptyValue_EnumeratorReturnsOneHeader()
{
MockHeaders headers = new MockHeaders();
headers.Add(customHeaderName, string.Empty);
headers.Add(headers.Descriptor, string.Empty);
IEnumerator<KeyValuePair<string, IEnumerable<string>>> enumerator = headers.GetEnumerator();
Assert.True(enumerator.MoveNext());
Assert.Equal(customHeaderName, enumerator.Current.Key);
Assert.Equal(1, enumerator.Current.Value.Count());
Assert.Equal(string.Empty, enumerator.Current.Value.ElementAt(0));
Assert.False(enumerator.MoveNext(), "Only the (empty) custom value should be returned.");
}
[Fact]
public void GetEnumerator_UseExplicitInterfaceImplementation_EnumeratorReturnsNoOfHeaders()
{
MockHeaders headers = new MockHeaders();
headers.Add("custom1", "customValue1");
headers.Add("custom2", "customValue2");
headers.Add("custom3", "customValue3");
headers.Add("custom4", "customValue4");
System.Collections.IEnumerable headersAsIEnumerable = headers;
IEnumerator enumerator = headersAsIEnumerable.GetEnumerator();
KeyValuePair<string, IEnumerable<string>> currentValue;
for (int i = 1; i <= 4; i++)
{
Assert.True(enumerator.MoveNext());
currentValue = (KeyValuePair<string, IEnumerable<string>>)enumerator.Current;
Assert.Equal("custom" + i, currentValue.Key);
Assert.Equal(1, currentValue.Value.Count());
}
Assert.False(enumerator.MoveNext(), "Only 2 values expected, but enumerator returns a third one.");
}
[Fact]
public void GetEnumerator_InvalidValueBetweenValidHeaders_EnumeratorReturnsAllValidValuesAndRemovesInvalidValue()
{
MockHeaders headers = new MockHeaders();
headers.TryAddWithoutValidation("foo", "fooValue");
headers.TryAddWithoutValidation("invalid", "invalid\nvalue");
headers.TryAddWithoutValidation("bar", "barValue");
Assert.Equal(3, headers.Count);
IDictionary<string, IEnumerable<string>> dict = headers.ToDictionary(pair => pair.Key, pair => pair.Value);
Assert.Equal("fooValue", Assert.Single(Assert.Contains("foo", dict)));
Assert.Equal("barValue", Assert.Single(Assert.Contains("bar", dict)));
Assert.Equal(2, headers.Count);
Assert.False(headers.NonValidated.Contains("invalid"));
}
[Fact]
public void AddParsedValue_AddSingleValueToNonExistingHeader_HeaderGetsCreatedAndValueAdded()
{
Uri headerValue = new Uri("http://example.org/");
CustomTypeHeaders headers = new CustomTypeHeaders();
headers.AddParsedValue(customTypeHeader, headerValue);
Assert.True(headers.Contains(customTypeHeader), "Store doesn't have the header after adding a value to it.");
Assert.Equal(headerValue.ToString(), headers.First().Value.ElementAt(0));
}
[Fact]
public void AddParsedValue_AddValueTypeValueToNonExistingHeader_HeaderGetsCreatedAndBoxedValueAdded()
{
int headerValue = 5;
CustomTypeHeaders headers = new CustomTypeHeaders();
headers.AddParsedValue(customTypeHeader, headerValue);
Assert.True(headers.Contains(customTypeHeader), "Store doesn't have the header after adding a value to it.");
Assert.Equal(headerValue.ToString(), headers.First().Value.ElementAt(0));
}
[Fact]
public void AddParsedValue_AddTwoValuesToNonExistingHeader_HeaderGetsCreatedAndValuesAdded()
{
Uri headerValue1 = new Uri("http://example.org/1/");
Uri headerValue2 = new Uri("http://example.org/2/");
CustomTypeHeaders headers = new CustomTypeHeaders();
headers.AddParsedValue(customTypeHeader, headerValue1);
// Adding a second value will cause a List<T> to be created in order to store values. If we just add
// one value, no List<T> is created, but the header is just added as store value.
headers.AddParsedValue(customTypeHeader, headerValue2);
Assert.True(headers.Contains(customTypeHeader), "Store doesn't have the header after adding a value to it.");
Assert.Equal(2, headers.GetValues(customTypeHeader).Count());
Assert.Equal(headerValue1.ToString(), headers.First().Value.ElementAt(0));
Assert.Equal(headerValue2.ToString(), headers.First().Value.ElementAt(1));
}
[Fact]
public void AddParsedValue_UseDifferentAddMethods_AllValuesAddedCorrectly()
{
MockHeaders headers = new MockHeaders();
headers.Add(headers.Descriptor, rawPrefix + "1");
headers.TryAddWithoutValidation(headers.Descriptor, rawPrefix + "2");
Assert.Equal(1, headers.Parser.TryParseValueCallCount);
headers.AddParsedValue(headers.Descriptor, parsedPrefix + "3");
// Adding a parsed value, will trigger all raw values to be parsed.
Assert.Equal(2, headers.Parser.TryParseValueCallCount);
Assert.Equal(3, headers.GetValues(headers.Descriptor).Count());
Assert.Equal(parsedPrefix + "1", headers.First().Value.ElementAt(0));
Assert.Equal(parsedPrefix + "2", headers.First().Value.ElementAt(1));
Assert.Equal(parsedPrefix + "3", headers.First().Value.ElementAt(2));
}
[Fact]
public void AddParsedValue_FirstAddNewlineCharsValueThenCallAddParsedValue_ParsedValueAdded()
{
MockHeaders headers = new MockHeaders();
// Add header value with newline chars.
headers.TryAddWithoutValidation(headers.Descriptor, invalidHeaderValue + "\r\ninvalid");
Assert.Equal(0, headers.Parser.TryParseValueCallCount);
headers.AddParsedValue(headers.Descriptor, parsedPrefix + "1");
Assert.True(headers.Contains(headers.Descriptor), "Store should have an entry for 'knownHeader'.");
Assert.Equal(1, headers.GetValues(headers.Descriptor).Count());
Assert.Equal(parsedPrefix + "1", headers.GetValues(headers.Descriptor).First());
}
[Fact]
public void AddParsedValue_FirstAddNewlineCharsValueThenAddValidValueThenCallAddParsedValue_ParsedValueAdded()
{
MockHeaders headers = new MockHeaders();
// Add header value with newline chars.
headers.TryAddWithoutValidation(headers.Descriptor, invalidHeaderValue + "\r\ninvalid");
headers.TryAddWithoutValidation(headers.Descriptor, rawPrefix + "0");
Assert.Equal(0, headers.Parser.TryParseValueCallCount);
headers.AddParsedValue(headers.Descriptor, parsedPrefix + "1");
Assert.True(headers.Contains(headers.Descriptor), "Store should have an entry for 'knownHeader'.");
Assert.Equal(2, headers.GetValues(headers.Descriptor).Count());
Assert.Equal(parsedPrefix + "0", headers.GetValues(headers.Descriptor).ElementAt(0));
Assert.Equal(parsedPrefix + "1", headers.GetValues(headers.Descriptor).ElementAt(1));
}
[Fact]
public void SetParsedValue_AddSingleValueToNonExistingHeader_HeaderGetsCreatedAndValueAdded()
{
Uri headerValue = new Uri("http://example.org/");
CustomTypeHeaders headers = new CustomTypeHeaders();
headers.SetParsedValue(customTypeHeader, headerValue);
Assert.True(headers.Contains(customTypeHeader), "Store doesn't have the header after adding a value to it.");
Assert.Equal(headerValue.ToString(), headers.First().Value.ElementAt(0));
}
[Fact]
public void SetParsedValue_SetTwoValuesToNonExistingHeader_HeaderGetsCreatedAndLastValueAdded()
{
Uri headerValue1 = new Uri("http://example.org/1/");
Uri headerValue2 = new Uri("http://example.org/2/");
CustomTypeHeaders headers = new CustomTypeHeaders();
headers.SetParsedValue(customTypeHeader, headerValue1);
// The following line will remove the previously added values and replace them with the provided value.
headers.SetParsedValue(customTypeHeader, headerValue2);
Assert.True(headers.Contains(customTypeHeader), "Store doesn't have the header after adding a value to it.");
Assert.Equal(1, headers.GetValues(customTypeHeader).Count());
// The second value replaces the first value.
Assert.Equal(headerValue2.ToString(), headers.First().Value.ElementAt(0));
}
[Fact]
public void SetParsedValue_SetValueAfterAddingMultipleValues_SetValueReplacesOtherValues()
{
MockHeaders headers = new MockHeaders();
headers.Add(headers.Descriptor, rawPrefix + "1");
headers.TryAddWithoutValidation(headers.Descriptor, rawPrefix + "2");
Assert.Equal(1, headers.Parser.TryParseValueCallCount);
headers.SetParsedValue(headers.Descriptor, parsedPrefix + "3");
// Adding a parsed value, will trigger all raw values to be parsed.
Assert.Equal(2, headers.Parser.TryParseValueCallCount);
Assert.Equal(1, headers.GetValues(headers.Descriptor).Count());
Assert.Equal(parsedPrefix + "3", headers.First().Value.ElementAt(0));
}
[Fact]
public void ContainsParsedValue_ContainsParsedValueFromUninitializedHeaderStore_ReturnsFalse()
{
MockHeaders headers = new MockHeaders();
Assert.False(headers.ContainsParsedValue(customHeader, "custom1"));
}
[Fact]
public void ContainsParsedValue_ContainsParsedValueForNonExistingHeader_ReturnsFalse()
{
MockHeaders headers = new MockHeaders();
headers.TryAddWithoutValidation(headers.Descriptor, rawPrefix);
Assert.Equal(0, headers.Parser.TryParseValueCallCount);
Assert.False(headers.ContainsParsedValue(customHeader, "custom1"));
// ContainsParsedValue() must not trigger raw value parsing for headers other than the requested one.
// In this case we expect ContainsParsedValue(customeHeader) not to trigger raw value parsing for
// 'headers.Descriptor'.
Assert.Equal(0, headers.Parser.TryParseValueCallCount);
}
[Fact]
public void ContainsParsedValue_ContainsParsedValueForNonExistingHeaderValue_ReturnsFalse()
{
MockHeaders headers = new MockHeaders();
headers.AddParsedValue(headers.Descriptor, "value1");
headers.AddParsedValue(headers.Descriptor, "value2");
// After adding two values to header 'knownHeader' we ask for a non-existing value.
Assert.False(headers.ContainsParsedValue(headers.Descriptor, "doesntexist"));
}
[Fact]
public void ContainsParsedValue_ContainsParsedValueForExistingHeaderButNonAvailableValue_ReturnsFalse()
{
MockHeaders headers = new MockHeaders();
headers.TryAddWithoutValidation(headers.Descriptor, rawPrefix);
Assert.Equal(0, headers.Parser.TryParseValueCallCount);
Assert.False(headers.ContainsParsedValue(headers.Descriptor, "custom1"));
// ContainsParsedValue() must trigger raw value parsing for the header it was asked for.
Assert.Equal(1, headers.Parser.TryParseValueCallCount);
}
[Fact]
public void ContainsParsedValue_ContainsParsedValueForExistingHeaderWithAvailableValue_ReturnsTrue()
{
MockHeaders headers = new MockHeaders();
headers.TryAddWithoutValidation(headers.Descriptor, rawPrefix + "1");
headers.TryAddWithoutValidation(headers.Descriptor, rawPrefix + "2");
headers.TryAddWithoutValidation(headers.Descriptor, rawPrefix + "3");
headers.TryAddWithoutValidation(headers.Descriptor, rawPrefix + "4");
Assert.Equal(0, headers.Parser.TryParseValueCallCount);
Assert.True(headers.ContainsParsedValue(headers.Descriptor, parsedPrefix + "3"));
// ContainsParsedValue() must trigger raw value parsing for the header it was asked for.
Assert.Equal(4, headers.Parser.TryParseValueCallCount);
}
[Fact]
public void ContainsParsedValue_AddOneValueToKnownHeaderAndCompareWithValueThatDiffersInCase_CustomComparerUsedForComparison()
{
MockHeaders headers = new MockHeaders();
headers.AddParsedValue(headers.Descriptor, "value");
// Our custom comparer (MockComparer) does case-insensitive value comparison. Verify that our custom
// comparer is used to compare the header value.
Assert.True(headers.ContainsParsedValue(headers.Descriptor, "VALUE"));
Assert.Equal(1, headers.Parser.MockComparer.EqualsCount);
headers.Clear();
headers.TryAddWithoutValidation(headers.Descriptor, invalidHeaderValue);
Assert.False(headers.ContainsParsedValue(headers.Descriptor, invalidHeaderValue));
}
[Fact]
public void ContainsParsedValue_AddTwoValuesToKnownHeaderAndCompareWithValueThatDiffersInCase_CustomComparerUsedForComparison()
{
MockHeaders headers = new MockHeaders();
headers.AddParsedValue(headers.Descriptor, "differentvalue");
headers.AddParsedValue(headers.Descriptor, "value");
// Our custom comparer (MockComparer) does case-insensitive value comparison. Verify that our custom
// comparer is used to compare the header value.
// Note that since we added 2 values a different code path than in the previous test is used. In this
// case we have stored the values as List<string> internally.
Assert.True(headers.ContainsParsedValue(headers.Descriptor, "VALUE"));
Assert.Equal(2, headers.Parser.MockComparer.EqualsCount);
}
[Fact]
public void ContainsParsedValue_ParserWithNoEqualityComparer_CaseSensitiveComparison()
{
CustomTypeHeaders headers = new CustomTypeHeaders();
headers.AddParsedValue(noComparerHeader, "lowercasevalue");
// Since we don't provide a comparer, the default string.Equals() is called which is case-sensitive. So
// the following call should return false.
Assert.False(headers.ContainsParsedValue(noComparerHeader, "LOWERCASEVALUE"));
// Now we try to use the correct casing. This should return true.
Assert.True(headers.ContainsParsedValue(noComparerHeader, "lowercasevalue"));
}
[Fact]
public void ContainsParsedValue_CallFromEmptyHeaderStore_ReturnsFalse()
{
MockHeaders headers = new MockHeaders();
// This will create a header entry with no value.
headers.Add(headers.Descriptor, string.Empty);
Assert.False(headers.Contains(headers.Descriptor), "Expected known header to be in the store.");
// This will just return fals and not touch the header.
Assert.False(headers.ContainsParsedValue(headers.Descriptor, "x"),
"Expected 'ContainsParsedValue' to return false.");
}
[Fact]
public void AddHeaders_SourceAndDestinationStoreHaveMultipleHeaders_OnlyHeadersNotInDestinationAreCopiedFromSource()
{
// Add header values to the source store.
MockHeaders source = new MockHeaders();
source.Add("custom1", "source10");
source.Add("custom1", "source11");
source.TryAddWithoutValidation("custom2", "source2");
source.Add(known1Header, rawPrefix + "3");
source.TryAddWithoutValidation(known1Header, rawPrefix + "4");
source.TryAddWithoutValidation(known2Header, rawPrefix + "5");
source.TryAddWithoutValidation(known2Header, invalidHeaderValue);
source.TryAddWithoutValidation(known2Header, rawPrefix + "7");
// this header value gets removed when it gets parsed.
source.TryAddWithoutValidation(known3Header, (string)null);
source.Add(known3Header, string.Empty);
DateTimeOffset known4Value1 = new DateTimeOffset(2010, 6, 15, 18, 31, 34, TimeSpan.Zero);
DateTimeOffset known4Value2 = new DateTimeOffset(2010, 4, 8, 11, 21, 04, TimeSpan.Zero);
source.AddParsedValue(known4Header, known4Value1);
source.AddParsedValue(known4Header, known4Value2);
source.Add("custom5", "source5");
source.TryAddWithoutValidation("custom6", (string)null);
// This header value gets added even though it doesn't have values. But since this is a custom header we
// assume it supports empty values.
source.TryAddWithoutValidation("custom7", (string)null);
source.Add("custom7", string.Empty);
// Add header values to the destination store.
MockHeaders destination = new MockHeaders();
destination.Add("custom2", "destination1");
destination.Add(known1Header, rawPrefix + "9");
// Now add all headers that are in source but not destination to destination.
destination.AddHeaders(source);
Assert.Equal(8, destination.Count());
Assert.Equal(2, destination.GetValues("custom1").Count());
Assert.Equal("source10", destination.GetValues("custom1").ElementAt(0));
Assert.Equal("source11", destination.GetValues("custom1").ElementAt(1));
// This value was set in destination. The header in source was ignored.
Assert.Equal(1, destination.GetValues("custom2").Count());
Assert.Equal("destination1", destination.GetValues("custom2").First());
// This value was set in destination. The header in source was ignored.
Assert.Equal(1, destination.GetValues(known1Header).Count());
Assert.Equal(parsedPrefix + "9", destination.GetValues(known1Header).First());
// The header in source gets first parsed and then copied to destination. Note that here we have one
// invalid value.
Assert.Equal(3, destination.GetValues(known2Header).Count());
Assert.Equal(parsedPrefix + "5", destination.GetValues(known2Header).ElementAt(0));
Assert.Equal(parsedPrefix + "7", destination.GetValues(known2Header).ElementAt(1));
Assert.Equal(invalidHeaderValue, destination.GetValues(known2Header).ElementAt(2));
// Header 'known3' should not be copied, since it doesn't contain any values.
Assert.False(destination.Contains(known3Header), "'known3' header value count.");
Assert.Equal(2, destination.GetValues(known4Header).Count());
Assert.Equal(known4Value1.ToString(), destination.GetValues(known4Header).ElementAt(0));
Assert.Equal(known4Value2.ToString(), destination.GetValues(known4Header).ElementAt(1));
Assert.Equal("source5", destination.GetValues("custom5").First());
Assert.Equal(string.Empty, destination.GetValues("custom6").First());
// Unlike 'known3', 'custom7' was added even though it only had empty values. The reason is that 'custom7'
// is a custom header so we just add whatever value we get passed in.
Assert.Equal(2, destination.GetValues("custom7").Count());
Assert.Equal("", destination.GetValues("custom7").ElementAt(0));
Assert.Equal("", destination.GetValues("custom7").ElementAt(1));
}
[Fact]
public void AddHeaders_SourceHasEmptyHeaderStore_DestinationRemainsUnchanged()
{
MockHeaders source = new MockHeaders();
MockHeaders destination = new MockHeaders();
destination.Add(known1Header, rawPrefix);
destination.AddHeaders(source);
Assert.Equal(1, destination.Count());
}
[Fact]
public void AddHeaders_DestinationHasEmptyHeaderStore_DestinationHeaderStoreGetsCreatedAndValuesAdded()
{
MockHeaders source = new MockHeaders();
source.Add(known1Header, rawPrefix);
MockHeaders destination = new MockHeaders();
destination.AddHeaders(source);
Assert.Equal(1, destination.Count());
}
[Fact]
public void AddHeaders_SourceHasInvalidHeaderValues_InvalidHeadersRemovedFromSourceAndNotCopiedToDestination()
{
MockHeaders source = new MockHeaders();
source.TryAddWithoutValidation(known1Header, invalidHeaderValue + "\r\ninvalid");
source.TryAddWithoutValidation("custom", "invalid\r\nvalue");
MockHeaders destination = new MockHeaders();
destination.AddHeaders(source);
Assert.Equal(0, source.Count());
Assert.False(source.Contains(known1Header), "source contains 'known' header.");
Assert.False(source.Contains("custom"), "source contains 'custom' header.");
Assert.Equal(0, destination.Count());
Assert.False(destination.Contains(known1Header), "destination contains 'known' header.");
Assert.False(destination.Contains("custom"), "destination contains 'custom' header.");
}
[Fact]
public void AddHeaders_ResponseHeaderToRequestHeaders_Success()
{
const string Name = "WWW-Authenticate";
const string Value = "Basic realm=\"Access to the staging site\", charset=\"UTF-8\"";
var request = new HttpRequestMessage();
Assert.True(request.Headers.TryAddWithoutValidation(Name, Value));
Assert.True(request.Headers.Contains(Name));
Assert.True(request.Headers.NonValidated.Contains(Name));
Assert.True(request.Headers.TryGetValues(Name, out IEnumerable<string> values));
Assert.Equal(Value, values.Single());
Assert.True(request.Headers.NonValidated.TryGetValues(Name, out HeaderStringValues nvValues));
Assert.Equal(Value, nvValues.Single());
}
[Fact]
public void AddHeaders_RequestHeaderToResponseHeaders_Success()
{
const string Name = "Referer";
const string Value = "https://dot.net";
var response = new HttpResponseMessage();
Assert.True(response.Headers.TryAddWithoutValidation(Name, Value));
Assert.True(response.Headers.Contains(Name));
Assert.True(response.Headers.NonValidated.Contains(Name));
Assert.True(response.Headers.TryGetValues(Name, out IEnumerable<string> values));
Assert.Equal(Value, values.Single());
Assert.True(response.Headers.NonValidated.TryGetValues(Name, out HeaderStringValues nvValues));
Assert.Equal(Value, nvValues.Single());
}
[Fact]
public void HeaderStringValues_Default_Empty()
{
HeaderStringValues v = default;
Assert.Equal(0, v.Count);
Assert.Empty(v);
Assert.Equal(string.Empty, v.ToString());
}
[Fact]
public void HeaderStringValues_Constructed_ProducesExpectedResults()
{
// 0 strings
foreach (HeaderStringValues hsv in new[] { new HeaderStringValues(KnownHeaders.Accept.Descriptor, Array.Empty<string>()) })
{
Assert.Equal(0, hsv.Count);
HeaderStringValues.Enumerator e = hsv.GetEnumerator();
Assert.False(e.MoveNext());
Assert.Equal(string.Empty, hsv.ToString());
}
// 1 string
foreach (HeaderStringValues hsv in new[] { new HeaderStringValues(KnownHeaders.Accept.Descriptor, "hello"), new HeaderStringValues(KnownHeaders.Accept.Descriptor, new[] { "hello" }) })
{
Assert.Equal(1, hsv.Count);
HeaderStringValues.Enumerator e = hsv.GetEnumerator();
Assert.True(e.MoveNext());
Assert.Equal("hello", e.Current);
Assert.False(e.MoveNext());
Assert.Equal("hello", hsv.ToString());
}
// 2 strings
foreach (HeaderStringValues hsv in new[] { new HeaderStringValues(KnownHeaders.Accept.Descriptor, new[] { "hello", "world" }) })
{
Assert.Equal(2, hsv.Count);
HeaderStringValues.Enumerator e = hsv.GetEnumerator();
Assert.True(e.MoveNext());
Assert.Equal("hello", e.Current);
Assert.True(e.MoveNext());
Assert.Equal("world", e.Current);
Assert.False(e.MoveNext());
Assert.Equal("hello, world", hsv.ToString());
}
}
[Theory]
[MemberData(nameof(NumberOfHeadersUpToArrayThreshold_AddNonValidated_EnumerateNonValidated))]
public void Add_WithinArrayThresholdHeaders_EnumerationPreservesOrdering(int numberOfHeaders, bool addNonValidated, bool enumerateNonValidated)
{
var headers = new MockHeaders();
for (int i = 0; i < numberOfHeaders; i++)
{
if (addNonValidated)
{
headers.TryAddWithoutValidation(i.ToString(), i.ToString());
}
else
{
headers.Add(i.ToString(), i.ToString());
}
}
KeyValuePair<string, string>[] entries = enumerateNonValidated
? headers.NonValidated.Select(pair => KeyValuePair.Create(pair.Key, Assert.Single(pair.Value))).ToArray()
: headers.Select(pair => KeyValuePair.Create(pair.Key, Assert.Single(pair.Value))).ToArray();
Assert.Equal(numberOfHeaders, entries.Length);
for (int i = 0; i < numberOfHeaders; i++)
{
Assert.Equal(i.ToString(), entries[i].Key);
Assert.Equal(i.ToString(), entries[i].Value);
}
}
[Fact]
public void Add_Remove_HeaderOrderingIsPreserved()
{
var headers = new MockHeaders();
headers.Add("a", "");
headers.Add("b", "");
headers.Add("c", "");
headers.Remove("b");
Assert.Equal(new[] { "a", "c" }, headers.Select(pair => pair.Key));
}
[Fact]
public void Add_AddToExistingKey_OriginalOrderingIsPreserved()
{
var headers = new MockHeaders();
headers.Add("a", "a1");
headers.Add("b", "b1");
headers.Add("a", "a2");
Assert.Equal(new[] { "a", "b" }, headers.Select(pair => pair.Key));
}
[Theory]
[InlineData(3)]
[InlineData(4)]
[InlineData(5)]
[InlineData(HttpHeaders.ArrayThreshold / 4)]
[InlineData(HttpHeaders.ArrayThreshold / 2)]
[InlineData(HttpHeaders.ArrayThreshold - 1)]
[InlineData(HttpHeaders.ArrayThreshold)]
[InlineData(HttpHeaders.ArrayThreshold + 1)]
[InlineData(HttpHeaders.ArrayThreshold * 2)]
[InlineData(HttpHeaders.ArrayThreshold * 4)]
public void Add_LargeNumberOfHeaders_OperationsStillSupported(int numberOfHeaders)
{
string[] keys = Enumerable.Range(1, numberOfHeaders).Select(i => i.ToString()).ToArray();
var headers = new MockHeaders();
foreach (string key in keys)
{
Assert.False(headers.NonValidated.Contains(key));
headers.TryAddWithoutValidation(key, key);
Assert.True(headers.NonValidated.Contains(key));
}
string[] nonValidatedKeys = headers.NonValidated.Select(pair => pair.Key).ToArray();
Assert.Equal(numberOfHeaders, nonValidatedKeys.Length);
string[] newKeys = headers.Select(pair => pair.Key).ToArray();
Assert.Equal(numberOfHeaders, newKeys.Length);
string[] nonValidatedKeysAfterValidation = headers.NonValidated.Select(pair => pair.Key).ToArray();
Assert.Equal(numberOfHeaders, nonValidatedKeysAfterValidation.Length);
if (numberOfHeaders > HttpHeaders.ArrayThreshold)
{
// Ordering is lost when adding more than ArrayThreshold headers
Array.Sort(nonValidatedKeys, (a, b) => int.Parse(a).CompareTo(int.Parse(b)));
Array.Sort(newKeys, (a, b) => int.Parse(a).CompareTo(int.Parse(b)));
Array.Sort(nonValidatedKeysAfterValidation, (a, b) => int.Parse(a).CompareTo(int.Parse(b)));
}
Assert.Equal(keys, nonValidatedKeys);
Assert.Equal(keys, newKeys);
Assert.Equal(keys, nonValidatedKeysAfterValidation);
headers.Add("3", "secondValue");
Assert.True(headers.TryGetValues("3", out IEnumerable<string> valuesFor3));
Assert.Equal(new[] { "3", "secondValue" }, valuesFor3);
Assert.True(headers.TryAddWithoutValidation("invalid", "invalid\nvalue"));
Assert.True(headers.TryAddWithoutValidation("valid", "validValue"));
Assert.Equal(numberOfHeaders + 2, headers.NonValidated.Count);
// Remove all headers except for "1", "valid", "invalid"
for (int i = 2; i <= numberOfHeaders; i++)
{
Assert.True(headers.Remove(i.ToString()));
}
Assert.False(headers.Remove("3"));
// "1", "invalid", "valid"
Assert.True(headers.NonValidated.Contains("invalid"));
Assert.Equal(3, headers.NonValidated.Count);
Assert.Equal(new[] { "1", "valid" }, headers.Select(pair => pair.Key).OrderBy(i => i));
Assert.Equal(2, headers.NonValidated.Count);
headers.Clear();
Assert.Equal(0, headers.NonValidated.Count);
Assert.Empty(headers);
Assert.False(headers.Contains("3"));
Assert.True(headers.TryAddWithoutValidation("3", "newValue"));
Assert.True(headers.TryGetValues("3", out valuesFor3));
Assert.Equal(new[] { "newValue" }, valuesFor3);
}
public static IEnumerable<object[]> NumberOfHeadersUpToArrayThreshold_AddNonValidated_EnumerateNonValidated()
{
for (int i = 0; i <= HttpHeaders.ArrayThreshold; i++)
{
yield return new object[] { i, false, false };
yield return new object[] { i, false, true };
yield return new object[] { i, true, false };
yield return new object[] { i, true, true };
}
}
public static IEnumerable<object[]> GetInvalidHeaderNames()
{
yield return new object[] { "invalid header" };
yield return new object[] { "invalid\theader" };
yield return new object[] { "invalid\rheader" };
yield return new object[] { "invalid\nheader" };
yield return new object[] { "invalid(header" };
yield return new object[] { "invalid)header" };
yield return new object[] { "invalid<header" };
yield return new object[] { "invalid>header" };
yield return new object[] { "invalid@header" };
yield return new object[] { "invalid,header" };
yield return new object[] { "invalid;header" };
yield return new object[] { "invalid:header" };
yield return new object[] { "invalid\\header" };
yield return new object[] { "invalid\"header" };
yield return new object[] { "invalid/header" };
yield return new object[] { "invalid[header" };
yield return new object[] { "invalid]header" };
yield return new object[] { "invalid?header" };
yield return new object[] { "invalid=header" };
yield return new object[] { "invalid{header" };
yield return new object[] { "invalid}header" };
}
public static IEnumerable<object[]> HeaderValuesWithNewLines()
{
foreach (string pattern in new[] { "*", "*foo", "* foo", "foo*", "foo* ", "foo*bar", "foo* bar" })
foreach (string newLine in new[] { "\r", "\n", "\r\n" })
foreach (string prefix in new[] { "", "valid, " })
{
yield return new object[] { prefix + pattern.Replace("*", newLine) };
}
}
#region Helper methods
private class MockHeaders : HttpHeaders
{
private MockHeaderParser _parser;
private HeaderDescriptor _descriptor;
public MockHeaderParser Parser => _parser;
public HeaderDescriptor Descriptor => _descriptor;
public MockHeaders(MockHeaderParser parser)
: base()
{
_parser = parser;
_descriptor = (new KnownHeader("known", HttpHeaderType.General, parser)).Descriptor;
}
public MockHeaders()
: this(new MockHeaderParser())
{
}
}
private class MockHeaderParser : HttpHeaderParser
{
public int TryParseValueCallCount { get; set; }
public int EmptyValueCount { get; private set; }
public MockComparer MockComparer { get; private set; }
public MockHeaderParser()
: this(true)
{
}
public MockHeaderParser(bool supportsMultipleValues)
: base(supportsMultipleValues)
{
this.MockComparer = new MockComparer();
}
public MockHeaderParser(string separator)
: base(true, separator)
{
this.MockComparer = new MockComparer();
}
#region IHeaderParser Members
public override IEqualityComparer Comparer
{
get { return MockComparer; }
}
public override bool TryParseValue(string value, object storeValue, ref int index, out object parsedValue)
{
TryParseValueCallCount++;
return TryParseValueCore(value, ref index, out parsedValue);
}
private bool TryParseValueCore(string value, ref int index, out object parsedValue)
{
parsedValue = null;
if (value == null)
{
parsedValue = null;
return true;
}
if (value == string.Empty)
{
EmptyValueCount++;
parsedValue = null;
return true;
}
int separatorIndex = value.IndexOf(',', index);
// Just fail if we don't support multiple values and the value is actually a list of values.
if ((!SupportsMultipleValues) && (separatorIndex >= 0))
{
return false;
}
if (separatorIndex == -1)
{
// If the raw string just contains one value, then use the whole string.
separatorIndex = value.Length;
}
string tempValue = value.Substring(index, separatorIndex - index);
if (tempValue.StartsWith(rawPrefix, StringComparison.Ordinal))
{
index = Math.Min(separatorIndex + 1, value.Length);
// We "parse" the value by replacing 'rawPrefix' strings with 'parsedPrefix' string.
parsedValue = parsedPrefix + tempValue.Substring(rawPrefix.Length,
tempValue.Length - rawPrefix.Length);
return true;
}
// Only thing left is a deliberately chosen invalid value.
Assert.StartsWith(invalidHeaderValue, tempValue, StringComparison.Ordinal);
return false;
}
#endregion
}
private class MockComparer : IEqualityComparer
{
public int GetHashCodeCount { get; private set; }
public int EqualsCount { get; private set; }
#region IEqualityComparer Members
public new bool Equals(object x, object y)
{
Assert.NotNull(x);
Assert.NotNull(y);
EqualsCount++;
string xs = x as string;
string ys = y as string;
if ((xs != null) && (ys != null))
{
return string.Equals(xs, ys, StringComparison.OrdinalIgnoreCase);
}
return x.Equals(y);
}
public int GetHashCode(object obj)
{
GetHashCodeCount++;
return obj.GetHashCode();
}
#endregion
}
private class CustomTypeHeaders : HttpHeaders
{
public CustomTypeHeaders()
{
}
}
private class CustomTypeHeaderParser : HttpHeaderParser
{
private static CustomTypeComparer comparer = new CustomTypeComparer();
public override IEqualityComparer Comparer
{
get { return comparer; }
}
public CustomTypeHeaderParser()
: base(true)
{
}
public override bool TryParseValue(string value, object storeValue, ref int index, out object parsedValue)
{
throw new NotImplementedException();
}
}
private class CustomTypeComparer : IEqualityComparer
{
#region IEqualityComparer Members
public new bool Equals(object x, object y)
{
Assert.NotNull(x);
Assert.NotNull(y);
return x.Equals(y);
}
public int GetHashCode(object obj)
{
Assert.NotNull(obj);
return obj.GetHashCode();
}
#endregion
}
private class NoComparerHeaderParser : HttpHeaderParser
{
public NoComparerHeaderParser()
: base(true)
{
}
public override bool TryParseValue(string value, object storeValue, ref int index, out object parsedValue)
{
throw new NotImplementedException();
}
}
#endregion
}
}
| // Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
using System.Collections;
using System.Collections.Generic;
using System.Globalization;
using System.Linq;
using System.Net.Http.Headers;
using System.Tests;
using Xunit;
namespace System.Net.Http.Tests
{
public class HttpHeadersTest
{
// Note: These are not real known headers, so they won't be returned if we call HeaderDescriptor.Get().
private static readonly HeaderDescriptor known1Header = (new KnownHeader("known1", HttpHeaderType.General, new MockHeaderParser())).Descriptor;
private static readonly HeaderDescriptor known2Header = (new KnownHeader("known2", HttpHeaderType.General, new MockHeaderParser())).Descriptor;
private static readonly HeaderDescriptor known3Header = (new KnownHeader("known3", HttpHeaderType.General, new MockHeaderParser())).Descriptor;
private static readonly HeaderDescriptor known4Header = (new KnownHeader("known3", HttpHeaderType.General, new CustomTypeHeaderParser())).Descriptor;
private static readonly HeaderDescriptor noComparerHeader = (new KnownHeader("noComparerHeader", HttpHeaderType.General, new NoComparerHeaderParser())).Descriptor;
private static readonly HeaderDescriptor customTypeHeader = (new KnownHeader("customTypeHeader", HttpHeaderType.General, new CustomTypeHeaderParser())).Descriptor;
private static readonly HeaderDescriptor customHeader;
static HttpHeadersTest()
{
HeaderDescriptor.TryGet("custom", out customHeader);
}
private const string customHeaderName = "custom-header";
private const string rawPrefix = "raw";
private const string parsedPrefix = "parsed";
private const string invalidHeaderValue = "invalid";
[Theory]
[InlineData(null)]
[InlineData("")]
public void TryAddWithoutValidation_UseEmptyHeaderName_False(string headerName)
{
MockHeaders headers = new MockHeaders();
Assert.False(headers.TryAddWithoutValidation(headerName, "value"));
}
[Theory]
[MemberData(nameof(GetInvalidHeaderNames))]
public void TryAddWithoutValidation_UseInvalidHeaderName_False(string headerName)
{
MockHeaders headers = new MockHeaders();
Assert.False(headers.TryAddWithoutValidation(headerName, "value"));
}
[Fact]
public void TryAddWithoutValidation_AddSingleValue_ValueParsed()
{
MockHeaders headers = new MockHeaders();
headers.TryAddWithoutValidation(headers.Descriptor, rawPrefix);
Assert.Equal(1, headers.Count());
Assert.Equal(1, headers.First().Value.Count());
Assert.Equal(parsedPrefix, headers.First().Value.First());
Assert.Equal(1, headers.Parser.TryParseValueCallCount);
}
[Fact]
public void TryAddWithoutValidation_AddTwoSingleValues_BothValuesParsed()
{
MockHeaders headers = new MockHeaders();
headers.TryAddWithoutValidation(headers.Descriptor, rawPrefix + "1");
headers.TryAddWithoutValidation(headers.Descriptor, rawPrefix + "2");
Assert.Equal(0, headers.Parser.TryParseValueCallCount);
Assert.Equal(1, headers.Count());
Assert.Equal(2, headers.First().Value.Count());
Assert.Equal(parsedPrefix + "1", headers.First().Value.ElementAt(0));
Assert.Equal(parsedPrefix + "2", headers.First().Value.ElementAt(1));
Assert.Equal(2, headers.Parser.TryParseValueCallCount);
}
[Fact]
public void TryAddWithoutValidation_AddTwoValidValuesAsOneString_BothValuesParsed()
{
MockHeaders headers = new MockHeaders();
headers.TryAddWithoutValidation(headers.Descriptor, rawPrefix + "1," + rawPrefix + "2");
Assert.Equal(0, headers.Parser.TryParseValueCallCount);
Assert.Equal(1, headers.Count());
Assert.Equal(2, headers.First().Value.Count());
Assert.Equal(parsedPrefix + "1", headers.First().Value.ElementAt(0));
Assert.Equal(parsedPrefix + "2", headers.First().Value.ElementAt(1));
// The parser gets called for each value in the raw string. I.e. if we have 1 raw string containing two
// values, the parser gets called twice.
Assert.Equal(2, headers.Parser.TryParseValueCallCount);
}
[Fact]
public void TryAddWithoutValidation_AddTwoValuesOneValidOneInvalidAsOneString_RawStringAddedAsInvalid()
{
MockHeaders headers = new MockHeaders();
headers.TryAddWithoutValidation(headers.Descriptor, rawPrefix + "1," + invalidHeaderValue);
Assert.Equal(0, headers.Parser.TryParseValueCallCount);
Assert.Equal(1, headers.Count());
Assert.Equal(1, headers.First().Value.Count());
// We expect the value to be returned without change since it couldn't be parsed in its entirety.
Assert.Equal(rawPrefix + "1," + invalidHeaderValue, headers.First().Value.ElementAt(0));
// The parser gets called twice, but the second time it returns false, because it tries to parse
// 'invalidHeaderValue'.
Assert.Equal(2, headers.Parser.TryParseValueCallCount);
}
[Fact]
public void TryAddWithoutValidation_AddTwoValueStringAndThirdValue_AllValuesParsed()
{
MockHeaders headers = new MockHeaders();
headers.TryAddWithoutValidation(headers.Descriptor, rawPrefix + "1," + rawPrefix + "2");
headers.TryAddWithoutValidation(headers.Descriptor, rawPrefix + "3");
Assert.Equal(0, headers.Parser.TryParseValueCallCount);
Assert.Equal(1, headers.Count());
Assert.Equal(3, headers.First().Value.Count());
Assert.Equal(parsedPrefix + "1", headers.First().Value.ElementAt(0));
Assert.Equal(parsedPrefix + "2", headers.First().Value.ElementAt(1));
Assert.Equal(parsedPrefix + "3", headers.First().Value.ElementAt(2));
Assert.Equal(3, headers.Parser.TryParseValueCallCount);
}
[Fact]
public void TryAddWithoutValidation_AddInvalidAndValidValueString_BothValuesParsed()
{
MockHeaders headers = new MockHeaders();
headers.TryAddWithoutValidation(headers.Descriptor, rawPrefix);
headers.TryAddWithoutValidation(headers.Descriptor, invalidHeaderValue);
Assert.Equal(0, headers.Parser.TryParseValueCallCount);
Assert.Equal(1, headers.Count());
Assert.Equal(2, headers.First().Value.Count());
Assert.Equal(parsedPrefix, headers.First().Value.ElementAt(0));
Assert.Equal(invalidHeaderValue, headers.First().Value.ElementAt(1));
Assert.Equal(2, headers.Parser.TryParseValueCallCount);
}
[Fact]
public void TryAddWithoutValidation_AddEmptyValueString_HeaderWithNoValueAfterParsing()
{
MockHeaders headers = new MockHeaders();
// The parser returns 'true' to indicate that it could parse the value (empty values allowed) and an
// value of 'null'. HttpHeaders will remove the header from the collection since the known header doesn't
// have a value.
headers.TryAddWithoutValidation(headers.Descriptor, string.Empty);
Assert.Equal(0, headers.Parser.TryParseValueCallCount);
Assert.Equal(0, headers.Count());
headers.Clear();
headers.TryAddWithoutValidation("custom", (string)null);
Assert.Equal(1, headers.Count());
Assert.Equal(1, headers.First().Value.Count());
Assert.Equal(string.Empty, headers.GetValues("custom").First());
}
[Fact]
public void TryAddWithoutValidation_AddValidAndInvalidValueString_BothValuesParsed()
{
MockHeaders headers = new MockHeaders();
headers.TryAddWithoutValidation(headers.Descriptor, invalidHeaderValue);
headers.TryAddWithoutValidation(headers.Descriptor, rawPrefix);
Assert.Equal(0, headers.Parser.TryParseValueCallCount);
Assert.Equal(1, headers.Count());
Assert.Equal(2, headers.First().Value.Count());
// If you compare this test with the previous one: Note that we reversed the order of adding the invalid
// string and the valid string. However, when enumerating header values the order is still the same as in
// the previous test.
// We don't keep track of the order if we have both invalid & valid values. This would add complexity
// and additional memory to store the information. Given how rare this scenario is we consider this
// by design. Note that this scenario is only an issue if:
// - The header value has an invalid format (very rare for standard headers) AND
// - There are multiple header values (some valid, some invalid) AND
// - The order of the headers matters (e.g. Transfer-Encoding)
Assert.Equal(parsedPrefix, headers.First().Value.ElementAt(0));
Assert.Equal(invalidHeaderValue, headers.First().Value.ElementAt(1));
Assert.Equal(2, headers.Parser.TryParseValueCallCount);
string expected = headers.Descriptor.Name + ": " + parsedPrefix + ", " + invalidHeaderValue + Environment.NewLine;
Assert.Equal(expected, headers.ToString());
}
[Fact]
public void TryAddWithoutValidation_AddNullValueForKnownHeader_ParserRejectsNullEmptyStringAdded()
{
MockHeaders headers = new MockHeaders();
headers.TryAddWithoutValidation(headers.Descriptor, (string)null);
Assert.Equal(0, headers.Parser.TryParseValueCallCount);
// MockParser is called with an empty string and decides that it is OK to have empty values but they
// shouldn't be added to the list of header values. HttpHeaders will remove the header since it doesn't
// have values.
Assert.Equal(0, headers.Count());
Assert.Equal(1, headers.Parser.TryParseValueCallCount);
}
[Fact]
public void TryAddWithoutValidation_AddNullValueForUnknownHeader_EmptyStringAddedAsValue()
{
MockHeaders headers = new MockHeaders();
headers.TryAddWithoutValidation(customHeaderName, (string)null);
Assert.Equal(1, headers.Count());
Assert.Equal(1, headers.First().Value.Count());
// 'null' values are internally stored as string.Empty. Since we added a custom header, there is no
// parser and the empty string is just added to the list of 'parsed values'.
Assert.Equal(string.Empty, headers.First().Value.First());
Assert.Equal(0, headers.Parser.TryParseValueCallCount);
}
[Fact]
public void TryAddWithoutValidation_AddValueForUnknownHeader_ValueAddedToStore()
{
MockHeaders headers = new MockHeaders();
headers.TryAddWithoutValidation(customHeaderName, "custom value");
Assert.Equal(1, headers.Count());
Assert.Equal(1, headers.First().Value.Count());
Assert.Equal("custom value", headers.First().Value.First());
Assert.Equal(0, headers.Parser.TryParseValueCallCount);
}
[Fact]
public void TryAddWithoutValidation_AddNullAndEmptyValuesToKnownHeader_HeaderRemovedFromCollection()
{
MockHeaders headers = new MockHeaders();
headers.TryAddWithoutValidation(headers.Descriptor, (string)null);
headers.TryAddWithoutValidation(headers.Descriptor, string.Empty);
Assert.Equal(0, headers.Parser.TryParseValueCallCount);
Assert.Equal(0, headers.Count());
// TryAddWithoutValidation() adds 'null' as string.empty to distinguish between an empty raw value and no raw
// value. When the parser is called later, the parser can decide whether empty strings are valid or not.
// In our case the MockParser returns 'success' with a parsed value of 'null' indicating that it is OK to
// have empty values, but they should be ignored.
Assert.Equal(2, headers.Parser.EmptyValueCount);
Assert.Equal(2, headers.Parser.TryParseValueCallCount);
}
[Fact]
public void TryAddWithoutValidation_AddNullAndEmptyValuesToUnknownHeader_TwoEmptyStringsAddedAsValues()
{
MockHeaders headers = new MockHeaders();
headers.TryAddWithoutValidation(customHeaderName, (string)null);
headers.TryAddWithoutValidation(customHeaderName, string.Empty);
Assert.Equal(1, headers.Count());
Assert.Equal(2, headers.First().Value.Count());
// TryAddWithoutValidation() adds 'null' as string.empty to distinguish between an empty raw value and no raw
// value. For custom headers we just add what the user gives us. I.e. the result is a header with two empty
// values.
Assert.Equal(string.Empty, headers.First().Value.ElementAt(0));
Assert.Equal(string.Empty, headers.First().Value.ElementAt(1));
}
[Fact]
public void TryAddWithoutValidation_AddMultipleValueToSingleValueHeaders_FirstHeaderAddedOthersAreInvalid()
{
MockHeaderParser parser = new MockHeaderParser(false); // doesn't support multiple values.
MockHeaders headers = new MockHeaders(parser);
headers.TryAddWithoutValidation(headers.Descriptor, rawPrefix + "1");
headers.TryAddWithoutValidation(headers.Descriptor, rawPrefix + "2");
Assert.Equal(0, headers.Parser.TryParseValueCallCount);
Assert.Equal(1, headers.Count());
Assert.Equal(2, headers.First().Value.Count());
// Note that the first value was parsed and added to the 'parsed values' list. The second value however
// was added to the 'invalid values' list since the header doesn't support multiple values.
Assert.Equal(parsedPrefix + "1", headers.First().Value.ElementAt(0));
Assert.Equal(rawPrefix + "2", headers.First().Value.ElementAt(1));
// The parser is only called once for the first value. HttpHeaders doesn't invoke the parser for
// additional values if the parser only supports one value.
Assert.Equal(1, headers.Parser.TryParseValueCallCount);
}
[Fact]
public void TryAddWithoutValidation_AddMultipleValueStringToSingleValueHeaders_MultipleValueStringAddedAsInvalid()
{
MockHeaderParser parser = new MockHeaderParser(false); // doesn't support multiple values.
MockHeaders headers = new MockHeaders(parser);
headers.TryAddWithoutValidation(headers.Descriptor, rawPrefix + "1," + rawPrefix + "2");
Assert.Equal(0, headers.Parser.TryParseValueCallCount);
// Since parsing the header value fails because it is composed of 2 values, the original string is added
// to the list of 'invalid values'. Therefore we only have 1 header value (the original string).
Assert.Equal(1, headers.Count());
Assert.Equal(1, headers.First().Value.Count());
Assert.Equal(rawPrefix + "1," + rawPrefix + "2", headers.First().Value.First());
Assert.Equal(1, headers.Parser.TryParseValueCallCount);
}
[Theory]
[MemberData(nameof(HeaderValuesWithNewLines))]
public void TryAddWithoutValidation_AddValueContainingNewLine_Rejected(string headerValue)
{
var headers = new HttpRequestHeaders();
// This value is considered invalid (newline char followed by non-whitespace). However, since
// TryAddWithoutValidation() only causes the header value to be analyzed when it gets actually accessed, no
// exception is thrown. Instead the value is discarded and a warning is logged.
headers.TryAddWithoutValidation("foo", headerValue);
Assert.Equal(1, headers.NonValidated.Count);
Assert.Equal(headerValue, headers.NonValidated["foo"].ToString());
Assert.False(headers.Contains("foo"));
Assert.Equal(0, headers.Count());
// Accessing the header forces parsing and the invalid value is removed
Assert.Equal(0, headers.NonValidated.Count);
headers.Clear();
headers.TryAddWithoutValidation("foo", new[] { "valid", headerValue });
Assert.Equal(1, headers.NonValidated.Count);
HeaderStringValues values = headers.NonValidated["foo"];
Assert.Equal(2, values.Count);
Assert.Equal(new[] { "valid", headerValue }, values);
Assert.Equal(1, headers.Count());
Assert.Equal(1, headers.First().Value.Count());
Assert.Equal("valid", headers.First().Value.First());
// Accessing the header forces parsing and the invalid value is removed
Assert.Equal(1, headers.NonValidated.Count);
values = headers.NonValidated["foo"];
Assert.Equal(1, values.Count);
Assert.Equal("valid", values.ToString());
}
[Fact]
public void TryAddWithoutValidation_MultipleAddInvalidValuesToNonExistingHeader_AddHeader()
{
MockHeaders headers = new MockHeaders();
headers.TryAddWithoutValidation(headers.Descriptor, new string[] { invalidHeaderValue });
// Make sure the header did not get added since we just tried to add an invalid value.
Assert.True(headers.Contains(headers.Descriptor));
Assert.Equal(1, headers.First().Value.Count());
Assert.Equal(invalidHeaderValue, headers.First().Value.ElementAt(0));
}
[Fact]
public void TryAddWithoutValidation_MultipleAddValidValueThenAddInvalidValuesToExistingHeader_AddValues()
{
MockHeaders headers = new MockHeaders();
headers.TryAddWithoutValidation(headers.Descriptor, rawPrefix + "1");
headers.TryAddWithoutValidation(headers.Descriptor, new string[] { rawPrefix + "2", invalidHeaderValue });
Assert.True(headers.Contains(headers.Descriptor));
Assert.Equal(3, headers.First().Value.Count());
Assert.Equal(parsedPrefix + "1", headers.First().Value.ElementAt(0));
Assert.Equal(parsedPrefix + "2", headers.First().Value.ElementAt(1));
Assert.Equal(invalidHeaderValue, headers.First().Value.ElementAt(2));
}
[Fact]
public void TryAddWithoutValidation_MultipleAddValidValueThenAddInvalidValuesToNonExistingHeader_AddHeader()
{
MockHeaders headers = new MockHeaders();
headers.TryAddWithoutValidation(headers.Descriptor, new string[] { rawPrefix + "1", invalidHeaderValue });
Assert.True(headers.Contains(headers.Descriptor));
Assert.Equal(2, headers.First().Value.Count());
Assert.Equal(parsedPrefix + "1", headers.First().Value.ElementAt(0));
Assert.Equal(invalidHeaderValue, headers.First().Value.ElementAt(1));
}
[Fact]
public void TryAddWithoutValidation_MultipleAddNullValueCollection_Throws()
{
MockHeaders headers = new MockHeaders();
string[] values = null;
Assert.Throws<ArgumentNullException>(() => { headers.TryAddWithoutValidation(headers.Descriptor, values); });
}
[Theory]
[InlineData(null)]
[InlineData("")]
public void Add_SingleUseEmptyHeaderName_Throw(string headerName)
{
MockHeaders headers = new MockHeaders();
AssertExtensions.Throws<ArgumentException>("name", () => { headers.Add(headerName, "value"); });
}
[Theory]
[MemberData(nameof(GetInvalidHeaderNames))]
public void Add_SingleUseInvalidHeaderName_Throw(string headerName)
{
MockHeaders headers = new MockHeaders();
Assert.Throws<FormatException>(() => { headers.Add(headerName, "value"); });
}
[Fact]
public void Add_SingleUseStoreWithNoParserStore_AllHeadersConsideredCustom()
{
CustomTypeHeaders headers = new CustomTypeHeaders();
headers.Add("custom", "value");
Assert.Equal(1, headers.Count());
Assert.Equal(1, headers.First().Value.Count());
Assert.Equal("value", headers.First().Value.First());
}
[Fact]
public void Add_SingleAddValidValue_ValueParsedCorrectly()
{
MockHeaders headers = new MockHeaders();
headers.Add(headers.Descriptor, rawPrefix);
// Add() should trigger parsing.
Assert.Equal(1, headers.Parser.TryParseValueCallCount);
Assert.Equal(1, headers.Count());
Assert.Equal(1, headers.First().Value.Count());
Assert.Equal(parsedPrefix, headers.First().Value.ElementAt(0));
// Value is already parsed. There shouldn't be additional calls to the parser.
Assert.Equal(1, headers.Parser.TryParseValueCallCount);
}
[Fact]
public void Add_SingleAddEmptyValueMultipleTimes_EmptyHeaderAdded()
{
MockHeaders headers = new MockHeaders();
headers.Add(headers.Descriptor, (string)null);
headers.Add(headers.Descriptor, string.Empty);
headers.Add(headers.Descriptor, string.Empty);
// Add() should trigger parsing.
Assert.Equal(3, headers.Parser.TryParseValueCallCount);
Assert.Equal(0, headers.Count());
}
[Fact]
public void Add_SingleAddInvalidValueToNonExistingHeader_ThrowAndDontAddHeader()
{
// Since Add() immediately parses the value, it will throw an exception if the value is invalid.
MockHeaders headers = new MockHeaders();
Assert.Throws<FormatException>(() => { headers.Add(headers.Descriptor, invalidHeaderValue); });
// Make sure the header did not get added to the store.
Assert.False(headers.Contains(headers.Descriptor),
"No header expected to be added since header value was invalid.");
}
[Fact]
public void Add_SingleAddValidValueThenAddInvalidValue_ThrowAndHeaderContainsValidValue()
{
MockHeaders headers = new MockHeaders();
headers.Add(headers.Descriptor, rawPrefix);
Assert.Throws<FormatException>(() => { headers.Add(headers.Descriptor, invalidHeaderValue); });
// Make sure the header did not get removed due to the failed add.
Assert.True(headers.Contains(headers.Descriptor), "Header was removed even if there is a valid header value.");
Assert.Equal(1, headers.First().Value.Count());
Assert.Equal(parsedPrefix, headers.First().Value.ElementAt(0));
}
[Fact]
public void Add_MultipleAddInvalidValuesToNonExistingHeader_ThrowAndDontAddHeader()
{
MockHeaders headers = new MockHeaders();
Assert.Throws<FormatException>(() => { headers.Add(headers.Descriptor, new string[] { invalidHeaderValue }); });
// Make sure the header did not get added since we just tried to add an invalid value.
Assert.False(headers.Contains(headers.Descriptor), "Header was added even if we just added an invalid value.");
}
[Fact]
public void Add_MultipleAddValidValueThenAddInvalidValuesToExistingHeader_ThrowAndDontAddHeader()
{
MockHeaders headers = new MockHeaders();
headers.Add(headers.Descriptor, rawPrefix + "1");
Assert.Throws<FormatException>(() => { headers.Add(headers.Descriptor, new string[] { rawPrefix + "2", invalidHeaderValue }); });
// Make sure the header did not get removed due to the failed add. Note that the first value in the array
// is valid, so it gets added. I.e. we have 2 values.
Assert.True(headers.Contains(headers.Descriptor), "Header was removed even if there is a valid header value.");
Assert.Equal(2, headers.First().Value.Count());
Assert.Equal(parsedPrefix + "1", headers.First().Value.ElementAt(0));
Assert.Equal(parsedPrefix + "2", headers.First().Value.ElementAt(1));
}
[Fact]
public void Add_MultipleAddValidValueThenAddInvalidValuesToNonExistingHeader_ThrowAndDontAddHeader()
{
MockHeaders headers = new MockHeaders();
Assert.Throws<FormatException>(() => { headers.Add(headers.Descriptor, new string[] { rawPrefix + "1", invalidHeaderValue }); });
// Make sure the header got added due to the valid add. Note that the first value in the array
// is valid, so it gets added.
Assert.True(headers.Contains(headers.Descriptor), "Header was not added even though we added 1 valid value.");
Assert.Equal(1, headers.First().Value.Count());
Assert.Equal(parsedPrefix + "1", headers.First().Value.ElementAt(0));
}
[Fact]
public void Add_SingleAddThreeValidValues_ValuesParsedCorrectly()
{
MockHeaders headers = new MockHeaders();
headers.Add(headers.Descriptor, rawPrefix + "1");
headers.Add(headers.Descriptor, rawPrefix + "2");
headers.Add(headers.Descriptor, rawPrefix + "3");
// Add() should trigger parsing.
Assert.Equal(3, headers.Parser.TryParseValueCallCount);
Assert.Equal(1, headers.Count());
Assert.Equal(3, headers.First().Value.Count());
Assert.Equal(parsedPrefix + "1", headers.First().Value.ElementAt(0));
Assert.Equal(parsedPrefix + "2", headers.First().Value.ElementAt(1));
Assert.Equal(parsedPrefix + "3", headers.First().Value.ElementAt(2));
// Value is already parsed. There shouldn't be additional calls to the parser.
Assert.Equal(3, headers.Parser.TryParseValueCallCount);
}
[Fact]
public void Add_SingleAddTwoValidValuesToHeaderWithSingleValue_Throw()
{
MockHeaderParser parser = new MockHeaderParser(false); // doesn't support multiple values.
MockHeaders headers = new MockHeaders(parser);
headers.Add(headers.Descriptor, rawPrefix + "1");
// Can only add headers once.
Assert.Throws<FormatException>(() => { headers.Add(headers.Descriptor, rawPrefix + "2"); });
// Verify that the first header value is still there.
Assert.Equal(1, headers.First().Value.Count());
}
[Fact]
public void Add_SingleFirstTryAddWithoutValidationForValidValueThenAdd_TwoParsedValuesAdded()
{
MockHeaders headers = new MockHeaders();
headers.TryAddWithoutValidation(headers.Descriptor, rawPrefix + "1");
headers.Add(headers.Descriptor, rawPrefix + "2");
// Add() should trigger parsing. TryAddWithoutValidation() doesn't trigger parsing, but Add() triggers
// parsing of raw header values (TryParseValue() is called)
Assert.Equal(2, headers.Parser.TryParseValueCallCount);
Assert.Equal(1, headers.Count());
Assert.Equal(2, headers.First().Value.Count());
Assert.Equal(parsedPrefix + "1", headers.First().Value.ElementAt(0));
Assert.Equal(parsedPrefix + "2", headers.First().Value.ElementAt(1));
// Value is already parsed. There shouldn't be additional calls to the parser.
Assert.Equal(2, headers.Parser.TryParseValueCallCount);
}
[Fact]
public void Add_SingleFirstTryAddWithoutValidationForInvalidValueThenAdd_TwoParsedValuesAdded()
{
MockHeaders headers = new MockHeaders();
headers.TryAddWithoutValidation(headers.Descriptor, invalidHeaderValue);
headers.Add(headers.Descriptor, rawPrefix + "1");
// Add() should trigger parsing. TryAddWithoutValidation() doesn't trigger parsing, but Add() triggers
// parsing of raw header values (TryParseValue() is called)
Assert.Equal(2, headers.Parser.TryParseValueCallCount);
Assert.Equal(1, headers.Count());
Assert.Equal(2, headers.First().Value.Count());
Assert.Equal(parsedPrefix + "1", headers.First().Value.ElementAt(0));
Assert.Equal(invalidHeaderValue, headers.First().Value.ElementAt(1));
// Value is already parsed. There shouldn't be additional calls to the parser.
Assert.Equal(2, headers.Parser.TryParseValueCallCount);
}
[Fact]
public void Add_SingleFirstTryAddWithoutValidationForEmptyValueThenAdd_OneParsedValueAddedEmptyIgnored()
{
MockHeaders headers = new MockHeaders();
headers.TryAddWithoutValidation(headers.Descriptor, string.Empty);
headers.Add(headers.Descriptor, rawPrefix + "1");
// Add() should trigger parsing. TryAddWithoutValidation() doesn't trigger parsing, but Add() triggers
// parsing of raw header values (TryParseValue() is called)
Assert.Equal(2, headers.Parser.TryParseValueCallCount);
Assert.Equal(1, headers.Count());
Assert.Equal(1, headers.First().Value.Count());
Assert.Equal(parsedPrefix + "1", headers.First().Value.ElementAt(0));
// Value is already parsed. There shouldn't be additional calls to the parser.
Assert.Equal(2, headers.Parser.TryParseValueCallCount);
}
[Fact]
public void Add_SingleFirstAddThenTryAddWithoutValidation_TwoParsedValuesAdded()
{
MockHeaders headers = new MockHeaders();
headers.Add(headers.Descriptor, rawPrefix + "1");
headers.TryAddWithoutValidation(headers.Descriptor, rawPrefix + "2");
// Add() should trigger parsing. Since TryAddWithoutValidation() is called afterwards the second value is
// not parsed yet.
Assert.Equal(1, headers.Parser.TryParseValueCallCount);
Assert.Equal(1, headers.Count());
Assert.Equal(2, headers.First().Value.Count());
Assert.Equal(parsedPrefix + "1", headers.First().Value.ElementAt(0));
Assert.Equal(parsedPrefix + "2", headers.First().Value.ElementAt(1));
// Value is already parsed. There shouldn't be additional calls to the parser.
Assert.Equal(2, headers.Parser.TryParseValueCallCount);
}
[Fact]
public void Add_SingleAddThenTryAddWithoutValidationThenAdd_ThreeParsedValuesAdded()
{
MockHeaders headers = new MockHeaders();
headers.Add(headers.Descriptor, rawPrefix + "1");
headers.TryAddWithoutValidation(headers.Descriptor, rawPrefix + "2");
headers.Add(headers.Descriptor, rawPrefix + "3");
// The second Add() triggers also parsing of the value added by TryAddWithoutValidation()
Assert.Equal(3, headers.Parser.TryParseValueCallCount);
Assert.Equal(1, headers.Count());
Assert.Equal(3, headers.First().Value.Count());
Assert.Equal(parsedPrefix + "1", headers.First().Value.ElementAt(0));
Assert.Equal(parsedPrefix + "2", headers.First().Value.ElementAt(1));
Assert.Equal(parsedPrefix + "3", headers.First().Value.ElementAt(2));
// Value is already parsed. There shouldn't be additional calls to the parser.
Assert.Equal(3, headers.Parser.TryParseValueCallCount);
}
[Fact]
public void Add_SingleFirstTryAddWithoutValidationThenAddToSingleValueHeader_AddThrows()
{
MockHeaderParser parser = new MockHeaderParser(false); // doesn't support multiple values.
MockHeaders headers = new MockHeaders(parser);
headers.TryAddWithoutValidation(headers.Descriptor, rawPrefix + "1");
Assert.Throws<FormatException>(() => {headers.Add(headers.Descriptor, rawPrefix + "2"); });
}
[Fact]
public void Add_SingleFirstAddThenTryAddWithoutValidationToSingleValueHeader_BothParsedAndInvalidValue()
{
MockHeaderParser parser = new MockHeaderParser(false); // doesn't support multiple values.
MockHeaders headers = new MockHeaders(parser);
headers.Add(headers.Descriptor, rawPrefix + "1");
headers.TryAddWithoutValidation(headers.Descriptor, rawPrefix + "2");
Assert.Equal(1, headers.Parser.TryParseValueCallCount);
// Add() succeeds since we don't have a value added yet. TryAddWithoutValidation() also succeeds, however
// the value is added to the 'invalid values' list when retrieved.
Assert.Equal(1, headers.Count());
Assert.Equal(2, headers.First().Value.Count());
Assert.Equal(parsedPrefix + "1", headers.First().Value.ElementAt(0));
Assert.Equal(rawPrefix + "2", headers.First().Value.ElementAt(1));
// Note that TryParseValue() is not called because HttpHeaders sees that there is already a value
// so it adds the raw value to 'invalid values'.
Assert.Equal(1, headers.Parser.TryParseValueCallCount);
}
[Fact]
public void Add_MultipleAddThreeValidValuesWithOneCall_ValuesParsedCorrectly()
{
MockHeaders headers = new MockHeaders();
string[] values = new string[] { rawPrefix + "1", rawPrefix + "2", rawPrefix + "3" };
headers.Add(headers.Descriptor, values);
// Add() should trigger parsing.
Assert.Equal(3, headers.Parser.TryParseValueCallCount);
Assert.Equal(1, headers.Count());
Assert.Equal(3, headers.First().Value.Count());
Assert.Equal(parsedPrefix + "1", headers.First().Value.ElementAt(0));
Assert.Equal(parsedPrefix + "2", headers.First().Value.ElementAt(1));
Assert.Equal(parsedPrefix + "3", headers.First().Value.ElementAt(2));
// Value is already parsed. There shouldn't be additional calls to the parser.
Assert.Equal(3, headers.Parser.TryParseValueCallCount);
}
[Fact]
public void Add_MultipleAddThreeValidValuesAsOneString_BothValuesParsed()
{
MockHeaders headers = new MockHeaders();
headers.Add(headers.Descriptor, rawPrefix + "1," + rawPrefix + "2," + rawPrefix + "3");
Assert.Equal(3, headers.Parser.TryParseValueCallCount);
Assert.Equal(1, headers.Count());
Assert.Equal(3, headers.First().Value.Count());
Assert.Equal(parsedPrefix + "1", headers.First().Value.ElementAt(0));
Assert.Equal(parsedPrefix + "2", headers.First().Value.ElementAt(1));
Assert.Equal(parsedPrefix + "3", headers.First().Value.ElementAt(2));
}
[Fact]
public void Add_MultipleAddNullValueCollection_Throw()
{
MockHeaders headers = new MockHeaders();
string[] values = null;
Assert.Throws<ArgumentNullException>(() => { headers.Add(headers.Descriptor, values); });
}
[Fact]
public void Add_SingleAddCustomHeaderWithNullValue_HeaderIsAddedWithEmptyStringValue()
{
MockHeaders headers = new MockHeaders();
headers.Add(customHeaderName, (string)null);
Assert.Equal(0, headers.Parser.TryParseValueCallCount);
Assert.Equal(1, headers.Count());
Assert.Equal(1, headers.First().Value.Count());
Assert.Equal(string.Empty, headers.First().Value.ElementAt(0));
// We're using a custom header. No parsing should be triggered.
Assert.Equal(0, headers.Parser.TryParseValueCallCount);
}
[Fact]
public void Add_SingleAddHeadersWithDifferentCasing_ConsideredTheSameHeader()
{
MockHeaders headers = new MockHeaders();
headers.Add("custom-header", "value1");
headers.Add("Custom-Header", "value2");
headers.Add("CUSTOM-HEADER", "value2");
Assert.Equal(3, headers.GetValues("custom-header").Count());
Assert.Equal(3, headers.GetValues("Custom-Header").Count());
Assert.Equal(3, headers.GetValues("CUSTOM-HEADER").Count());
Assert.Equal(3, headers.GetValues("CuStOm-HeAdEr").Count());
}
[Theory]
[MemberData(nameof(HeaderValuesWithNewLines))]
public void Add_AddValueContainingNewLine_Rejected(string headerValue)
{
var headers = new HttpRequestHeaders();
Assert.Throws<FormatException>(() => headers.Add("foo", headerValue));
Assert.Equal(0, headers.Count());
Assert.Equal(0, headers.NonValidated.Count);
headers.Clear();
Assert.Throws<FormatException>(() => headers.Add("foo", new[] { "valid", headerValue }));
Assert.Equal(1, headers.Count());
Assert.Equal(1, headers.First().Value.Count());
Assert.Equal("valid", headers.First().Value.First());
Assert.Equal(1, headers.NonValidated.Count);
Assert.Equal("valid", headers.NonValidated["foo"].ToString());
}
[Fact]
public void RemoveParsedValue_AddValueAndRemoveIt_NoHeader()
{
MockHeaders headers = new MockHeaders();
headers.Add(headers.Descriptor, rawPrefix + "1");
// Remove the parsed value (note the original string 'raw1' was "parsed" to 'parsed1')
Assert.True(headers.RemoveParsedValue(headers.Descriptor, parsedPrefix + "1"));
Assert.Equal(1, headers.Parser.TryParseValueCallCount);
// Note that when the last value of a header gets removed, the whole header gets removed.
Assert.Equal(0, headers.Count());
Assert.Equal(1, headers.Parser.TryParseValueCallCount);
// Remove the value again: It shouldn't be found in the store.
Assert.False(headers.RemoveParsedValue(headers.Descriptor, parsedPrefix + "1"));
}
[Fact]
public void RemoveParsedValue_AddInvalidValueAndRemoveValidValue_InvalidValueRemains()
{
MockHeaders headers = new MockHeaders();
headers.TryAddWithoutValidation(headers.Descriptor, invalidHeaderValue);
// Remove a valid value which is not in the store.
Assert.False(headers.RemoveParsedValue(headers.Descriptor, parsedPrefix));
Assert.Equal(1, headers.Parser.TryParseValueCallCount);
// Note that when the last value of a header gets removed, the whole header gets removed.
Assert.Equal(1, headers.Count());
Assert.Equal(invalidHeaderValue, headers.GetValues(headers.Descriptor).First());
Assert.Equal(1, headers.Parser.TryParseValueCallCount);
// Remove the value again: It shouldn't be found in the store.
Assert.False(headers.RemoveParsedValue(headers.Descriptor, parsedPrefix + "1"));
}
[Fact]
public void RemoveParsedValue_ParserWithNoEqualityComparer_CaseSensitiveComparison()
{
CustomTypeHeaders headers = new CustomTypeHeaders();
headers.AddParsedValue(noComparerHeader, "lowercasevalue");
// Since we don't provide a comparer, the default string.Equals() is called which is case-sensitive. So
// the following call should return false.
Assert.False(headers.RemoveParsedValue(noComparerHeader, "LOWERCASEVALUE"));
// Now we try to remove the value using the correct casing. This should work.
Assert.True(headers.RemoveParsedValue(noComparerHeader, "lowercasevalue"));
// Note that when the last value of a header gets removed, the whole header gets removed.
Assert.Equal(0, headers.Count());
}
[Fact]
public void RemoveParsedValue_AddTwoValuesAndRemoveThem_NoHeader()
{
MockHeaders headers = new MockHeaders();
headers.Add(headers.Descriptor, rawPrefix + "1");
headers.Add(headers.Descriptor, rawPrefix + "2");
// Remove the parsed value (note the original string 'raw1' was "parsed" to 'parsed1')
Assert.True(headers.RemoveParsedValue(headers.Descriptor, parsedPrefix + "1"));
Assert.True(headers.RemoveParsedValue(headers.Descriptor, parsedPrefix + "2"));
Assert.Equal(2, headers.Parser.TryParseValueCallCount);
// Note that when the last value of a header gets removed, the whole header gets removed.
Assert.Equal(0, headers.Count());
Assert.Equal(2, headers.Parser.TryParseValueCallCount);
}
[Fact]
public void RemoveParsedValue_AddTwoValuesAndRemoveFirstOne_SecondValueRemains()
{
MockHeaders headers = new MockHeaders();
headers.Add(headers.Descriptor, rawPrefix + "1");
headers.Add(headers.Descriptor, rawPrefix + "2");
// Remove the parsed value (note the original string 'raw1' was "parsed" to 'parsed1')
Assert.True(headers.RemoveParsedValue(headers.Descriptor, parsedPrefix + "1"));
Assert.Equal(2, headers.Parser.TryParseValueCallCount);
// Note that when the last value of a header gets removed, the whole header gets removed.
Assert.Equal(1, headers.Count());
Assert.Equal(1, headers.First().Value.Count());
Assert.Equal(parsedPrefix + "2", headers.First().Value.ElementAt(0));
Assert.Equal(2, headers.Parser.TryParseValueCallCount);
}
[Fact]
public void RemoveParsedValue_AddTwoValuesAndRemoveSecondOne_FirstValueRemains()
{
MockHeaders headers = new MockHeaders();
headers.Add(headers.Descriptor, rawPrefix + "1");
headers.Add(headers.Descriptor, rawPrefix + "2");
// Remove the parsed value (note the original string 'raw2' was "parsed" to 'parsed2')
Assert.True(headers.RemoveParsedValue(headers.Descriptor, parsedPrefix + "2"));
Assert.Equal(2, headers.Parser.TryParseValueCallCount);
// Note that when the last value of a header gets removed, the whole header gets removed.
Assert.Equal(1, headers.Count());
Assert.Equal(1, headers.First().Value.Count());
Assert.Equal(parsedPrefix + "1", headers.First().Value.ElementAt(0));
Assert.Equal(2, headers.Parser.TryParseValueCallCount);
}
[Fact]
public void RemoveParsedValue_RemoveFromNonExistingHeader_ReturnsFalse()
{
MockHeaders headers = new MockHeaders();
headers.Add(headers.Descriptor, rawPrefix);
// Header 'non-existing-header' can't be found, so false is returned.
Assert.False(headers.RemoveParsedValue(customHeader, "doesntexist"));
}
[Fact]
public void RemoveParsedValue_RemoveFromUninitializedHeaderStore_ReturnsFalse()
{
MockHeaders headers = new MockHeaders();
// If we never add a header value, the whole header (and also the header store) doesn't exist.
// Make sure we considered this case.
Assert.False(headers.RemoveParsedValue(headers.Descriptor, "doesntexist"));
}
[Fact]
public void RemoveParsedValue_AddOneValueToKnownHeaderAndCompareWithValueThatDiffersInCase_CustomComparerUsedForComparison()
{
MockHeaders headers = new MockHeaders();
headers.AddParsedValue(headers.Descriptor, "value");
// Our custom comparer (MockComparer) does case-insensitive value comparison. Verify that our custom
// comparer is used to compare the header value.
Assert.True(headers.RemoveParsedValue(headers.Descriptor, "VALUE"));
Assert.False(headers.Contains(headers.Descriptor), "Header should be removed after removing value.");
Assert.Equal(1, headers.Parser.MockComparer.EqualsCount);
}
[Fact]
public void RemoveParsedValue_AddTwoValuesToKnownHeaderAndCompareWithValueThatDiffersInCase_CustomComparerUsedForComparison()
{
MockHeaders headers = new MockHeaders();
headers.AddParsedValue(headers.Descriptor, "differentvalue");
headers.AddParsedValue(headers.Descriptor, "value");
// Our custom comparer (MockComparer) does case-insensitive value comparison. Verify that our custom
// comparer is used to compare the header value.
// Note that since we added 2 values a different code path than in the previous test is used. In this
// case we have stored the values as List<string> internally.
Assert.True(headers.RemoveParsedValue(headers.Descriptor, "VALUE"));
Assert.Equal(1, headers.GetValues(headers.Descriptor).Count());
Assert.Equal(2, headers.Parser.MockComparer.EqualsCount);
}
[Fact]
public void RemoveParsedValue_FirstAddNewlineCharsValueThenCallRemoveParsedValue_HeaderRemoved()
{
MockHeaders headers = new MockHeaders();
// Add header value with newline chars.
headers.TryAddWithoutValidation(headers.Descriptor, invalidHeaderValue + "\r\ninvalid");
Assert.Equal(0, headers.Parser.TryParseValueCallCount);
headers.RemoveParsedValue(headers.Descriptor, "");
Assert.False(headers.Contains(headers.Descriptor), "Store should not have an entry for 'knownHeader'.");
}
[Fact]
public void RemoveParsedValue_FirstAddNewlineCharsValueThenAddValidValueThenCallAddParsedValue_HeaderRemoved()
{
MockHeaders headers = new MockHeaders();
// Add header value with newline chars.
headers.TryAddWithoutValidation(headers.Descriptor, invalidHeaderValue + "\r\ninvalid");
headers.TryAddWithoutValidation(headers.Descriptor, rawPrefix + "1");
Assert.Equal(0, headers.Parser.TryParseValueCallCount);
headers.RemoveParsedValue(headers.Descriptor, parsedPrefix + "1");
Assert.False(headers.Contains(headers.Descriptor), "Store should not have an entry for 'knownHeader'.");
}
[Fact]
public void Clear_AddMultipleHeadersAndThenClear_NoHeadersInCollection()
{
MockHeaders headers = new MockHeaders();
headers.Add(headers.Descriptor, rawPrefix + "1");
headers.TryAddWithoutValidation(headers.Descriptor, rawPrefix + "2");
headers.Add("custom1", "customValue1");
headers.Add("custom2", "customValue2");
headers.Add("custom3", "customValue3");
// Only 1 value should get parsed (call to Add() with known header value).
Assert.Equal(1, headers.Parser.TryParseValueCallCount);
// We added 4 different headers
Assert.Equal(4, headers.Count());
headers.Clear();
Assert.Equal(0, headers.Count());
// The call to Count() triggers a TryParseValue for the TryAddWithoutValidation() value. Clear() should
// not cause any additional parsing operations.
Assert.Equal(2, headers.Parser.TryParseValueCallCount);
}
[Theory]
[InlineData(null)]
[InlineData("")]
public void Remove_UseEmptyHeaderName_Throw(string headerName)
{
MockHeaders headers = new MockHeaders();
AssertExtensions.Throws<ArgumentException>("name", () => { headers.Remove(headerName); });
}
[Theory]
[MemberData(nameof(GetInvalidHeaderNames))]
public void Remove_UseInvalidHeaderName_Throw(string headerName)
{
MockHeaders headers = new MockHeaders();
Assert.Throws<FormatException>(() => { headers.Remove(headerName); });
}
[Fact]
public void Remove_AddMultipleHeadersAndDeleteFirstAndLast_FirstAndLastHeaderRemoved()
{
MockHeaders headers = new MockHeaders();
headers.Add(headers.Descriptor, rawPrefix + "1");
headers.TryAddWithoutValidation(headers.Descriptor, rawPrefix + "2");
headers.Add("custom1", "customValue1");
headers.Add("custom2", "customValue2");
headers.Add("lastheader", "customValue3");
// Only 1 value should get parsed (call to Add() with known header value).
Assert.Equal(1, headers.Parser.TryParseValueCallCount);
// We added 4 different headers
Assert.Equal(4, headers.Count());
// Remove first header
Assert.True(headers.Remove(headers.Descriptor));
Assert.Equal(3, headers.Count());
// Remove last header
Assert.True(headers.Remove("lastheader"));
Assert.Equal(2, headers.Count());
// The call to Count() triggers a TryParseValue for the TryAddWithoutValidation() value. Clear() should
// not cause any additional parsing operations.
Assert.Equal(2, headers.Parser.TryParseValueCallCount);
}
[Fact]
public void Remove_RemoveHeaderFromUninitializedHeaderStore_ReturnsFalse()
{
MockHeaders headers = new MockHeaders();
// Remove header from uninitialized store (store collection is null)
Assert.False(headers.Remove(headers.Descriptor));
Assert.Equal(0, headers.Count());
}
[Fact]
public void Remove_RemoveNonExistingHeader_ReturnsFalse()
{
MockHeaders headers = new MockHeaders();
headers.Add("custom1", "customValue1");
Assert.Equal(1, headers.Count());
// Remove header from empty store
Assert.False(headers.Remove("doesntexist"));
Assert.Equal(1, headers.Count());
}
[Theory]
[InlineData(null)]
[InlineData("")]
public void TryGetValues_UseEmptyHeaderName_False(string headerName)
{
MockHeaders headers = new MockHeaders();
IEnumerable<string> values = null;
Assert.False(headers.TryGetValues(headerName, out values));
}
[Theory]
[MemberData(nameof(GetInvalidHeaderNames))]
public void TryGetValues_UseInvalidHeaderName_False(string headerName)
{
MockHeaders headers = new MockHeaders();
IEnumerable<string> values = null;
Assert.False(headers.TryGetValues(headerName, out values));
}
[Fact]
public void TryGetValues_GetValuesFromUninitializedHeaderStore_ReturnsFalse()
{
MockHeaders headers = new MockHeaders();
IEnumerable<string> values = null;
// Get header values from uninitialized store (store collection is null)
Assert.False(headers.TryGetValues("doesntexist", out values));
Assert.Equal(0, headers.Count());
}
[Fact]
public void TryGetValues_GetValuesForNonExistingHeader_ReturnsFalse()
{
MockHeaders headers = new MockHeaders();
headers.Add("custom1", "customValue1");
IEnumerable<string> values = null;
// Get header values from uninitialized store (store collection is null)
Assert.False(headers.TryGetValues("doesntexist", out values));
Assert.Equal(1, headers.Count());
}
[Fact]
public void TryGetValues_GetValuesForExistingHeader_ReturnsTrueAndListOfValues()
{
MockHeaders headers = new MockHeaders();
headers.Add(headers.Descriptor, rawPrefix + "1");
headers.TryAddWithoutValidation(headers.Descriptor, rawPrefix + "2");
headers.TryAddWithoutValidation(headers.Descriptor, string.Empty);
// Only 1 value should get parsed (call to Add() with known header value).
Assert.Equal(1, headers.Parser.TryParseValueCallCount);
IEnumerable<string> values = null;
Assert.True(headers.TryGetValues(headers.Descriptor, out values));
Assert.NotNull(values);
// TryGetValues() should trigger parsing of values added with TryAddWithoutValidation()
Assert.Equal(3, headers.Parser.TryParseValueCallCount);
Assert.Equal(2, values.Count());
// Check returned values
Assert.Equal(parsedPrefix + "1", values.ElementAt(0));
Assert.Equal(parsedPrefix + "2", values.ElementAt(1));
}
[Theory]
[InlineData(null)]
[InlineData("")]
public void GetValues_UseEmptyHeaderName_Throw(string headerName)
{
MockHeaders headers = new MockHeaders();
AssertExtensions.Throws<ArgumentException>("name", () => { headers.GetValues(headerName); });
}
[Theory]
[MemberData(nameof(GetInvalidHeaderNames))]
public void GetValues_UseInvalidHeaderName_Throw(string headerName)
{
MockHeaders headers = new MockHeaders();
Assert.Throws<FormatException>(() => { headers.GetValues(headerName); });
}
[Fact]
public void GetValues_GetValuesFromUninitializedHeaderStore_Throw()
{
MockHeaders headers = new MockHeaders();
// Get header values from uninitialized store (store collection is null). This will throw.
Assert.Throws<InvalidOperationException>(() => { headers.GetValues("doesntexist"); });
}
[Fact]
public void GetValues_GetValuesForNonExistingHeader_Throw()
{
MockHeaders headers = new MockHeaders();
headers.Add("custom1", "customValue1");
// Get header values for non-existing header (but other headers exist in the store).
Assert.Throws<InvalidOperationException>(() => { headers.GetValues("doesntexist"); });
}
[Fact]
public void GetValues_GetValuesForExistingHeader_ReturnsTrueAndListOfValues()
{
MockHeaders headers = new MockHeaders();
headers.TryAddWithoutValidation("custom", rawPrefix + "0"); // this must not influence the result.
headers.Add(headers.Descriptor, rawPrefix + "1");
headers.TryAddWithoutValidation(headers.Descriptor, rawPrefix + "2");
// Only 1 value should get parsed (call to Add() with known header value).
Assert.Equal(1, headers.Parser.TryParseValueCallCount);
IEnumerable<string> values = headers.GetValues(headers.Descriptor);
Assert.NotNull(values);
// GetValues() should trigger parsing of values added with TryAddWithoutValidation()
Assert.Equal(2, headers.Parser.TryParseValueCallCount);
Assert.Equal(2, values.Count());
// Check returned values
Assert.Equal(parsedPrefix + "1", values.ElementAt(0));
Assert.Equal(parsedPrefix + "2", values.ElementAt(1));
}
[Fact]
public void GetValues_HeadersWithEmptyValues_ReturnsEmptyArray()
{
MockHeaders headers = new MockHeaders();
headers.TryAddWithoutValidation(customHeaderName, (string)null);
headers.Add(headers.Descriptor, string.Empty);
// In the known header case, the MockParser accepts empty values but tells the store to not add the value.
// Since no value is added for 'knownHeader', HttpHeaders removes the header from the store. This is only
// done for known headers. Custom headers are allowed to have empty/null values as shown by
// 'valuesForCustomHeaders' below
Assert.False(headers.Contains(headers.Descriptor));
// In the custom header case, we add whatever the users adds (besides that we add string.Empty if the
// user adds null). So here we do have 1 value: string.Empty.
IEnumerable<string> valuesForCustomHeader = headers.GetValues(customHeaderName);
Assert.NotNull(valuesForCustomHeader);
Assert.Equal(1, valuesForCustomHeader.Count());
Assert.Equal(string.Empty, valuesForCustomHeader.First());
}
[Fact]
public void GetParsedValues_GetValuesFromUninitializedHeaderStore_ReturnsNull()
{
MockHeaders headers = new MockHeaders();
// Get header values from uninitialized store (store collection is null).
object storeValue = headers.GetParsedValues(customHeader);
Assert.Null(storeValue);
}
[Fact]
public void GetParsedValues_GetValuesForNonExistingHeader_ReturnsNull()
{
MockHeaders headers = new MockHeaders();
headers.Add("custom1", "customValue1");
// Get header values for non-existing header (but other headers exist in the store).
object storeValue = headers.GetParsedValues(customHeader);
Assert.Null(storeValue);
}
[Fact]
public void GetParsedValues_GetSingleValueForExistingHeader_ReturnsAddedValue()
{
MockHeaders headers = new MockHeaders();
headers.Add(customHeader.Name, "customValue1");
// Get header values for non-existing header (but other headers exist in the store).
object storeValue = headers.GetParsedValues(customHeader);
Assert.NotNull(storeValue);
// If we only have one value, then GetValues() should return just the value and not wrap it in a List<T>.
Assert.Equal("customValue1", storeValue);
}
[Fact]
public void GetParsedValues_HeaderWithEmptyValues_ReturnsEmpty()
{
MockHeaders headers = new MockHeaders();
headers.Add(headers.Descriptor, string.Empty);
object storeValue = headers.GetParsedValues(headers.Descriptor);
Assert.Null(storeValue);
}
[Fact]
public void GetParsedValues_GetMultipleValuesForExistingHeader_ReturnsListOfValues()
{
MockHeaders headers = new MockHeaders();
headers.TryAddWithoutValidation("custom", rawPrefix + "0"); // this must not influence the result.
headers.Add(headers.Descriptor, rawPrefix + "1");
headers.TryAddWithoutValidation(headers.Descriptor, rawPrefix + "2");
// Only 1 value should get parsed (call to Add() with known header value).
Assert.Equal(1, headers.Parser.TryParseValueCallCount);
object storeValue = headers.GetParsedValues(headers.Descriptor);
Assert.NotNull(storeValue);
// GetValues<T>() should trigger parsing of values added with TryAddWithoutValidation()
Assert.Equal(2, headers.Parser.TryParseValueCallCount);
// Since we added 2 values to header 'knownHeader', we expect GetValues() to return a List<T> with
// two values.
List<object> storeValues = storeValue as List<object>;
Assert.NotNull(storeValues);
Assert.Equal(2, storeValues.Count);
Assert.Equal(parsedPrefix + "1", storeValues[0]);
Assert.Equal(parsedPrefix + "2", storeValues[1]);
}
[Fact]
public void GetParsedValues_GetValuesForExistingHeaderWithInvalidValues_ReturnsOnlyParsedValues()
{
MockHeaders headers = new MockHeaders();
headers.Add(headers.Descriptor, rawPrefix);
// Here we add an invalid value. GetValues<T> only returns parsable values. So this value should get
// parsed, however it will be added to the 'invalid values' list and thus is not part of the collection
// returned by the enumerator.
headers.TryAddWithoutValidation(headers.Descriptor, invalidHeaderValue);
// Only 1 value should get parsed (call to Add() with known header value).
Assert.Equal(1, headers.Parser.TryParseValueCallCount);
object storeValue = headers.GetParsedValues(headers.Descriptor);
Assert.NotNull(storeValue);
// GetValues<T>() should trigger parsing of values added with TryAddWithoutValidation()
Assert.Equal(2, headers.Parser.TryParseValueCallCount);
// Since we added only one valid value to 'knownHeader', we expect GetValues() to return a that value.
Assert.Equal(parsedPrefix, storeValue);
}
[Fact]
public void GetParsedValues_GetValuesForExistingHeaderWithOnlyInvalidValues_ReturnsEmptyEnumerator()
{
MockHeaders headers = new MockHeaders();
// Here we add an invalid value. GetValues<T> only returns parsable values. So this value should get
// parsed, however it will be added to the 'invalid values' list and thus is not part of the collection
// returned by the enumerator.
headers.TryAddWithoutValidation(headers.Descriptor, invalidHeaderValue);
Assert.Equal(0, headers.Parser.TryParseValueCallCount);
object storeValue = headers.GetParsedValues(headers.Descriptor);
Assert.Null(storeValue);
// GetValues<T>() should trigger parsing of values added with TryAddWithoutValidation()
Assert.Equal(1, headers.Parser.TryParseValueCallCount);
}
[Fact]
public void GetParsedValues_AddInvalidValueToHeader_HeaderGetsRemovedAndNullReturned()
{
MockHeaders headers = new MockHeaders();
headers.TryAddWithoutValidation(headers.Descriptor, invalidHeaderValue + "\r\ninvalid");
Assert.Equal(0, headers.Parser.TryParseValueCallCount);
object storeValue = headers.GetParsedValues(headers.Descriptor);
Assert.Null(storeValue);
Assert.False(headers.Contains(headers.Descriptor));
}
[Fact]
public void GetParsedValues_GetParsedValuesForKnownHeaderWithNewlineChars_ReturnsNull()
{
MockHeaders headers = new MockHeaders();
// Add header value with newline chars.
headers.TryAddWithoutValidation(headers.Descriptor, invalidHeaderValue + "\r\ninvalid");
Assert.Equal(0, headers.Parser.TryParseValueCallCount);
Assert.Null(headers.GetParsedValues(headers.Descriptor));
Assert.Equal(0, headers.Count());
Assert.Equal(1, headers.Parser.TryParseValueCallCount);
}
[Fact]
public void NonValidated_Default_Empty()
{
HttpHeadersNonValidated v = default;
Assert.Equal(0, v.Count);
Assert.Empty(v);
Assert.False(v.TryGetValues("Host", out HeaderStringValues values));
Assert.Empty(values);
}
[Fact]
public void NonValidated_SetValidAndInvalidHeaderValues_AllHeaderValuesReturned()
{
MockHeaderParser parser = new MockHeaderParser("---");
MockHeaders headers = new MockHeaders(parser);
headers.Add(headers.Descriptor, rawPrefix + "1");
headers.TryAddWithoutValidation(headers.Descriptor, "value2,value3");
headers.TryAddWithoutValidation(headers.Descriptor, invalidHeaderValue);
string expectedValue = "value2,value3---" + invalidHeaderValue + "---" + parsedPrefix + "1";
Assert.Equal(1, headers.NonValidated.Count);
int iterations = 0;
foreach (KeyValuePair<string, HeaderStringValues> header in headers.NonValidated)
{
// Note that raw values don't get parsed but just added to the result.
iterations++;
Assert.Equal(headers.Descriptor.Name, header.Key);
Assert.Equal(3, header.Value.Count);
Assert.Equal(expectedValue, header.Value.ToString());
}
Assert.Equal(1, iterations);
}
[Fact]
public void NonValidated_SetMultipleHeaders_AllHeaderValuesReturned()
{
MockHeaderParser parser = new MockHeaderParser(true);
MockHeaders headers = new MockHeaders(parser);
headers.Add(headers.Descriptor, rawPrefix + "1");
headers.Add("header2", "value2");
headers.Add("header3", (string)null);
headers.Add("header4", "value41");
headers.Add("header4", "value42");
string[] expectedHeaderNames = { headers.Descriptor.Name, "header2", "header3", "header4" };
string[] expectedHeaderValues = { parsedPrefix + "1", "value2", "", "value41, value42" };
int i = 0;
foreach (KeyValuePair<string, HeaderStringValues> header in headers.NonValidated)
{
Assert.NotEqual(expectedHeaderNames.Length, i);
Assert.Equal(expectedHeaderNames[i], header.Key);
Assert.Equal(expectedHeaderValues[i], header.Value.ToString());
i++;
}
}
[Fact]
public void NonValidated_SetMultipleValuesOnSingleValueHeader_AllHeaderValuesReturned()
{
MockHeaderParser parser = new MockHeaderParser(false);
MockHeaders headers = new MockHeaders(parser);
headers.TryAddWithoutValidation(headers.Descriptor, "value1");
headers.TryAddWithoutValidation(headers.Descriptor, rawPrefix);
foreach (KeyValuePair<string, HeaderStringValues> header in headers.NonValidated)
{
Assert.Equal(headers.Descriptor.Name, header.Key);
// Note that the added rawPrefix did not get parsed
Assert.Equal("value1, " + rawPrefix, header.Value.ToString());
}
}
[Fact]
public void NonValidated_ValidAndInvalidValues_DictionaryMembersWork()
{
var headers = new HttpResponseHeaders();
IReadOnlyDictionary<string, HeaderStringValues> nonValidated = headers.NonValidated;
Assert.True(headers.TryAddWithoutValidation("Location", "http:/invalidLocation"));
Assert.True(headers.TryAddWithoutValidation("Location", "http:/anotherLocation"));
Assert.True(headers.TryAddWithoutValidation("Date", "not a date"));
Assert.Equal(2, nonValidated.Count);
Assert.True(nonValidated.ContainsKey("Location"));
Assert.True(nonValidated.ContainsKey("Date"));
Assert.False(nonValidated.ContainsKey("Age"));
Assert.False(nonValidated.TryGetValue("Age", out _));
Assert.Throws<KeyNotFoundException>(() => nonValidated["Age"]);
Assert.True(nonValidated.TryGetValue("Location", out HeaderStringValues locations));
Assert.Equal(2, locations.Count);
Assert.Equal(new[] { "http:/invalidLocation", "http:/anotherLocation" }, locations.ToArray());
Assert.Equal("http:/invalidLocation, http:/anotherLocation", locations.ToString());
Assert.True(nonValidated.TryGetValue("Date", out HeaderStringValues dates));
Assert.Equal(1, dates.Count);
Assert.Equal(new[] { "not a date" }, dates.ToArray());
Assert.Equal("not a date", dates.ToString());
dates = nonValidated["Date"];
Assert.Equal(1, dates.Count);
Assert.Equal(new[] { "not a date" }, dates.ToArray());
Assert.Equal("not a date", dates.ToString());
Assert.Equal(new HashSet<string> { "Location", "Date" }, nonValidated.Keys.ToHashSet());
}
[Fact]
public void NonValidated_ValidInvalidAndRaw_AllReturned()
{
var headers = new HttpResponseHeaders();
IReadOnlyDictionary<string, HeaderStringValues> nonValidated = headers.NonValidated;
// Parsed value
headers.Date = new DateTimeOffset(1, 2, 3, 4, 5, 6, TimeSpan.Zero);
// Invalid value
headers.TryAddWithoutValidation("Date", "not a date");
foreach (KeyValuePair<string, IEnumerable<string>> _ in headers) { }
// Raw value
headers.TryAddWithoutValidation("Date", "another not a date");
// All three show up
Assert.Equal(1, nonValidated.Count);
Assert.Equal(3, nonValidated["Date"].Count);
using (new ThreadCultureChange(new CultureInfo("en-US")))
{
Assert.Equal(new HashSet<string> { "not a date", "another not a date", "Sat, 03 Feb 0001 04:05:06 GMT" }, nonValidated["Date"].ToHashSet());
}
}
[Theory]
[InlineData(null)]
[InlineData("")]
public void Contains_UseEmptyHeaderName_Throw(string headerName)
{
MockHeaders headers = new MockHeaders();
AssertExtensions.Throws<ArgumentException>("name", () => { headers.Contains(headerName); });
}
[Theory]
[MemberData(nameof(GetInvalidHeaderNames))]
public void Contains_UseInvalidHeaderName_Throw(string headerName)
{
MockHeaders headers = new MockHeaders();
Assert.Throws<FormatException>(() => { headers.Contains(headerName); });
}
[Fact]
public void Contains_CallContainsFromUninitializedHeaderStore_ReturnsFalse()
{
MockHeaders headers = new MockHeaders();
Assert.False(headers.Contains("doesntexist"));
}
[Fact]
public void Contains_CallContainsForNonExistingHeader_ReturnsFalse()
{
MockHeaders headers = new MockHeaders();
headers.Add(headers.Descriptor, rawPrefix);
Assert.False(headers.Contains("doesntexist"));
}
[Fact]
public void Contains_CallContainsForEmptyHeader_ReturnsFalse()
{
MockHeaders headers = new MockHeaders();
headers.Add(headers.Descriptor, string.Empty);
Assert.False(headers.Contains(headers.Descriptor));
}
[Fact]
public void Contains_CallContainsForExistingHeader_ReturnsTrue()
{
MockHeaders headers = new MockHeaders();
headers.Add("custom1", "customValue1");
headers.Add("custom2", "customValue2");
headers.Add("custom3", "customValue3");
headers.Add("custom4", "customValue4");
headers.TryAddWithoutValidation(headers.Descriptor, rawPrefix);
// Nothing got parsed so far since we just added custom headers and for the known header we called
// TryAddWithoutValidation().
Assert.Equal(0, headers.Parser.TryParseValueCallCount);
Assert.True(headers.Contains(headers.Descriptor));
// Contains() should trigger parsing of values added with TryAddWithoutValidation(): If the value was invalid,
// i.e. contains newline chars, then the header will be removed from the collection.
Assert.Equal(1, headers.Parser.TryParseValueCallCount);
}
[Fact]
public void Contains_AddValuesWithNewlineChars_HeadersGetRemovedWhenCallingContains()
{
MockHeaders headers = new MockHeaders();
headers.TryAddWithoutValidation(headers.Descriptor, invalidHeaderValue + "\r\ninvalid");
headers.TryAddWithoutValidation("custom", "invalid\r\nvalue");
Assert.False(headers.Contains(headers.Descriptor), "Store should not have an entry for 'knownHeader'.");
Assert.False(headers.Contains("custom"), "Store should not have an entry for 'custom'.");
}
[Fact]
public void GetEnumerator_GetEnumeratorFromUninitializedHeaderStore_ReturnsEmptyEnumerator()
{
MockHeaders headers = new MockHeaders();
IEnumerator<KeyValuePair<string, IEnumerable<string>>> enumerator = headers.GetEnumerator();
Assert.False(enumerator.MoveNext());
}
[Fact]
public void GetEnumerator_FirstHeaderWithOneValueSecondHeaderWithTwoValues_EnumeratorReturnsTwoHeaders()
{
MockHeaders headers = new MockHeaders();
headers.Add(customHeaderName, "custom0");
headers.Add(headers.Descriptor, rawPrefix + "1");
headers.TryAddWithoutValidation(headers.Descriptor, rawPrefix + "2");
// The value added with TryAddWithoutValidation() wasn't parsed yet.
Assert.Equal(1, headers.Parser.TryParseValueCallCount);
IEnumerator<KeyValuePair<string, IEnumerable<string>>> enumerator = headers.GetEnumerator();
// Getting the enumerator doesn't trigger parsing.
Assert.Equal(1, headers.Parser.TryParseValueCallCount);
Assert.True(enumerator.MoveNext());
Assert.Equal(customHeaderName, enumerator.Current.Key);
Assert.Equal(1, enumerator.Current.Value.Count());
Assert.Equal("custom0", enumerator.Current.Value.ElementAt(0));
// Starting using the enumerator will trigger parsing of raw values. The first header is not a known
// header, so there shouldn't be any parsing.
Assert.Equal(1, headers.Parser.TryParseValueCallCount);
Assert.True(enumerator.MoveNext());
Assert.Equal(headers.Descriptor.Name, enumerator.Current.Key);
Assert.Equal(2, enumerator.Current.Value.Count());
Assert.Equal(parsedPrefix + "1", enumerator.Current.Value.ElementAt(0));
Assert.Equal(parsedPrefix + "2", enumerator.Current.Value.ElementAt(1));
// The second header is a known header, so parsing raw values should get executed.
Assert.Equal(2, headers.Parser.TryParseValueCallCount);
Assert.False(enumerator.MoveNext(), "Only 2 values expected, but enumerator returns a third one.");
}
[Fact]
public void GetEnumerator_FirstCustomHeaderWithEmptyValueSecondKnownHeaderWithEmptyValue_EnumeratorReturnsOneHeader()
{
MockHeaders headers = new MockHeaders();
headers.Add(customHeaderName, string.Empty);
headers.Add(headers.Descriptor, string.Empty);
IEnumerator<KeyValuePair<string, IEnumerable<string>>> enumerator = headers.GetEnumerator();
Assert.True(enumerator.MoveNext());
Assert.Equal(customHeaderName, enumerator.Current.Key);
Assert.Equal(1, enumerator.Current.Value.Count());
Assert.Equal(string.Empty, enumerator.Current.Value.ElementAt(0));
Assert.False(enumerator.MoveNext(), "Only the (empty) custom value should be returned.");
}
[Fact]
public void GetEnumerator_UseExplicitInterfaceImplementation_EnumeratorReturnsNoOfHeaders()
{
MockHeaders headers = new MockHeaders();
headers.Add("custom1", "customValue1");
headers.Add("custom2", "customValue2");
headers.Add("custom3", "customValue3");
headers.Add("custom4", "customValue4");
System.Collections.IEnumerable headersAsIEnumerable = headers;
IEnumerator enumerator = headersAsIEnumerable.GetEnumerator();
KeyValuePair<string, IEnumerable<string>> currentValue;
for (int i = 1; i <= 4; i++)
{
Assert.True(enumerator.MoveNext());
currentValue = (KeyValuePair<string, IEnumerable<string>>)enumerator.Current;
Assert.Equal("custom" + i, currentValue.Key);
Assert.Equal(1, currentValue.Value.Count());
}
Assert.False(enumerator.MoveNext(), "Only 2 values expected, but enumerator returns a third one.");
}
[Fact]
public void GetEnumerator_InvalidValueBetweenValidHeaders_EnumeratorReturnsAllValidValuesAndRemovesInvalidValue()
{
MockHeaders headers = new MockHeaders();
headers.TryAddWithoutValidation("foo", "fooValue");
headers.TryAddWithoutValidation("invalid", "invalid\nvalue");
headers.TryAddWithoutValidation("bar", "barValue");
Assert.Equal(3, headers.Count);
IDictionary<string, IEnumerable<string>> dict = headers.ToDictionary(pair => pair.Key, pair => pair.Value);
Assert.Equal("fooValue", Assert.Single(Assert.Contains("foo", dict)));
Assert.Equal("barValue", Assert.Single(Assert.Contains("bar", dict)));
Assert.Equal(2, headers.Count);
Assert.False(headers.NonValidated.Contains("invalid"));
}
[Fact]
public void AddParsedValue_AddSingleValueToNonExistingHeader_HeaderGetsCreatedAndValueAdded()
{
Uri headerValue = new Uri("http://example.org/");
CustomTypeHeaders headers = new CustomTypeHeaders();
headers.AddParsedValue(customTypeHeader, headerValue);
Assert.True(headers.Contains(customTypeHeader), "Store doesn't have the header after adding a value to it.");
Assert.Equal(headerValue.ToString(), headers.First().Value.ElementAt(0));
}
[Fact]
public void AddParsedValue_AddValueTypeValueToNonExistingHeader_HeaderGetsCreatedAndBoxedValueAdded()
{
int headerValue = 5;
CustomTypeHeaders headers = new CustomTypeHeaders();
headers.AddParsedValue(customTypeHeader, headerValue);
Assert.True(headers.Contains(customTypeHeader), "Store doesn't have the header after adding a value to it.");
Assert.Equal(headerValue.ToString(), headers.First().Value.ElementAt(0));
}
[Fact]
public void AddParsedValue_AddTwoValuesToNonExistingHeader_HeaderGetsCreatedAndValuesAdded()
{
Uri headerValue1 = new Uri("http://example.org/1/");
Uri headerValue2 = new Uri("http://example.org/2/");
CustomTypeHeaders headers = new CustomTypeHeaders();
headers.AddParsedValue(customTypeHeader, headerValue1);
// Adding a second value will cause a List<T> to be created in order to store values. If we just add
// one value, no List<T> is created, but the header is just added as store value.
headers.AddParsedValue(customTypeHeader, headerValue2);
Assert.True(headers.Contains(customTypeHeader), "Store doesn't have the header after adding a value to it.");
Assert.Equal(2, headers.GetValues(customTypeHeader).Count());
Assert.Equal(headerValue1.ToString(), headers.First().Value.ElementAt(0));
Assert.Equal(headerValue2.ToString(), headers.First().Value.ElementAt(1));
}
[Fact]
public void AddParsedValue_UseDifferentAddMethods_AllValuesAddedCorrectly()
{
MockHeaders headers = new MockHeaders();
headers.Add(headers.Descriptor, rawPrefix + "1");
headers.TryAddWithoutValidation(headers.Descriptor, rawPrefix + "2");
Assert.Equal(1, headers.Parser.TryParseValueCallCount);
headers.AddParsedValue(headers.Descriptor, parsedPrefix + "3");
// Adding a parsed value, will trigger all raw values to be parsed.
Assert.Equal(2, headers.Parser.TryParseValueCallCount);
Assert.Equal(3, headers.GetValues(headers.Descriptor).Count());
Assert.Equal(parsedPrefix + "1", headers.First().Value.ElementAt(0));
Assert.Equal(parsedPrefix + "2", headers.First().Value.ElementAt(1));
Assert.Equal(parsedPrefix + "3", headers.First().Value.ElementAt(2));
}
[Fact]
public void AddParsedValue_FirstAddNewlineCharsValueThenCallAddParsedValue_ParsedValueAdded()
{
MockHeaders headers = new MockHeaders();
// Add header value with newline chars.
headers.TryAddWithoutValidation(headers.Descriptor, invalidHeaderValue + "\r\ninvalid");
Assert.Equal(0, headers.Parser.TryParseValueCallCount);
headers.AddParsedValue(headers.Descriptor, parsedPrefix + "1");
Assert.True(headers.Contains(headers.Descriptor), "Store should have an entry for 'knownHeader'.");
Assert.Equal(1, headers.GetValues(headers.Descriptor).Count());
Assert.Equal(parsedPrefix + "1", headers.GetValues(headers.Descriptor).First());
}
[Fact]
public void AddParsedValue_FirstAddNewlineCharsValueThenAddValidValueThenCallAddParsedValue_ParsedValueAdded()
{
MockHeaders headers = new MockHeaders();
// Add header value with newline chars.
headers.TryAddWithoutValidation(headers.Descriptor, invalidHeaderValue + "\r\ninvalid");
headers.TryAddWithoutValidation(headers.Descriptor, rawPrefix + "0");
Assert.Equal(0, headers.Parser.TryParseValueCallCount);
headers.AddParsedValue(headers.Descriptor, parsedPrefix + "1");
Assert.True(headers.Contains(headers.Descriptor), "Store should have an entry for 'knownHeader'.");
Assert.Equal(2, headers.GetValues(headers.Descriptor).Count());
Assert.Equal(parsedPrefix + "0", headers.GetValues(headers.Descriptor).ElementAt(0));
Assert.Equal(parsedPrefix + "1", headers.GetValues(headers.Descriptor).ElementAt(1));
}
[Fact]
public void SetParsedValue_AddSingleValueToNonExistingHeader_HeaderGetsCreatedAndValueAdded()
{
Uri headerValue = new Uri("http://example.org/");
CustomTypeHeaders headers = new CustomTypeHeaders();
headers.SetParsedValue(customTypeHeader, headerValue);
Assert.True(headers.Contains(customTypeHeader), "Store doesn't have the header after adding a value to it.");
Assert.Equal(headerValue.ToString(), headers.First().Value.ElementAt(0));
}
[Fact]
public void SetParsedValue_SetTwoValuesToNonExistingHeader_HeaderGetsCreatedAndLastValueAdded()
{
Uri headerValue1 = new Uri("http://example.org/1/");
Uri headerValue2 = new Uri("http://example.org/2/");
CustomTypeHeaders headers = new CustomTypeHeaders();
headers.SetParsedValue(customTypeHeader, headerValue1);
// The following line will remove the previously added values and replace them with the provided value.
headers.SetParsedValue(customTypeHeader, headerValue2);
Assert.True(headers.Contains(customTypeHeader), "Store doesn't have the header after adding a value to it.");
Assert.Equal(1, headers.GetValues(customTypeHeader).Count());
// The second value replaces the first value.
Assert.Equal(headerValue2.ToString(), headers.First().Value.ElementAt(0));
}
[Fact]
public void SetParsedValue_SetValueAfterAddingMultipleValues_SetValueReplacesOtherValues()
{
MockHeaders headers = new MockHeaders();
headers.Add(headers.Descriptor, rawPrefix + "1");
headers.TryAddWithoutValidation(headers.Descriptor, rawPrefix + "2");
Assert.Equal(1, headers.Parser.TryParseValueCallCount);
headers.SetParsedValue(headers.Descriptor, parsedPrefix + "3");
// Adding a parsed value, will trigger all raw values to be parsed.
Assert.Equal(2, headers.Parser.TryParseValueCallCount);
Assert.Equal(1, headers.GetValues(headers.Descriptor).Count());
Assert.Equal(parsedPrefix + "3", headers.First().Value.ElementAt(0));
}
[Fact]
public void ContainsParsedValue_ContainsParsedValueFromUninitializedHeaderStore_ReturnsFalse()
{
MockHeaders headers = new MockHeaders();
Assert.False(headers.ContainsParsedValue(customHeader, "custom1"));
}
[Fact]
public void ContainsParsedValue_ContainsParsedValueForNonExistingHeader_ReturnsFalse()
{
MockHeaders headers = new MockHeaders();
headers.TryAddWithoutValidation(headers.Descriptor, rawPrefix);
Assert.Equal(0, headers.Parser.TryParseValueCallCount);
Assert.False(headers.ContainsParsedValue(customHeader, "custom1"));
// ContainsParsedValue() must not trigger raw value parsing for headers other than the requested one.
// In this case we expect ContainsParsedValue(customeHeader) not to trigger raw value parsing for
// 'headers.Descriptor'.
Assert.Equal(0, headers.Parser.TryParseValueCallCount);
}
[Fact]
public void ContainsParsedValue_ContainsParsedValueForNonExistingHeaderValue_ReturnsFalse()
{
MockHeaders headers = new MockHeaders();
headers.AddParsedValue(headers.Descriptor, "value1");
headers.AddParsedValue(headers.Descriptor, "value2");
// After adding two values to header 'knownHeader' we ask for a non-existing value.
Assert.False(headers.ContainsParsedValue(headers.Descriptor, "doesntexist"));
}
[Fact]
public void ContainsParsedValue_ContainsParsedValueForExistingHeaderButNonAvailableValue_ReturnsFalse()
{
MockHeaders headers = new MockHeaders();
headers.TryAddWithoutValidation(headers.Descriptor, rawPrefix);
Assert.Equal(0, headers.Parser.TryParseValueCallCount);
Assert.False(headers.ContainsParsedValue(headers.Descriptor, "custom1"));
// ContainsParsedValue() must trigger raw value parsing for the header it was asked for.
Assert.Equal(1, headers.Parser.TryParseValueCallCount);
}
[Fact]
public void ContainsParsedValue_ContainsParsedValueForExistingHeaderWithAvailableValue_ReturnsTrue()
{
MockHeaders headers = new MockHeaders();
headers.TryAddWithoutValidation(headers.Descriptor, rawPrefix + "1");
headers.TryAddWithoutValidation(headers.Descriptor, rawPrefix + "2");
headers.TryAddWithoutValidation(headers.Descriptor, rawPrefix + "3");
headers.TryAddWithoutValidation(headers.Descriptor, rawPrefix + "4");
Assert.Equal(0, headers.Parser.TryParseValueCallCount);
Assert.True(headers.ContainsParsedValue(headers.Descriptor, parsedPrefix + "3"));
// ContainsParsedValue() must trigger raw value parsing for the header it was asked for.
Assert.Equal(4, headers.Parser.TryParseValueCallCount);
}
[Fact]
public void ContainsParsedValue_AddOneValueToKnownHeaderAndCompareWithValueThatDiffersInCase_CustomComparerUsedForComparison()
{
MockHeaders headers = new MockHeaders();
headers.AddParsedValue(headers.Descriptor, "value");
// Our custom comparer (MockComparer) does case-insensitive value comparison. Verify that our custom
// comparer is used to compare the header value.
Assert.True(headers.ContainsParsedValue(headers.Descriptor, "VALUE"));
Assert.Equal(1, headers.Parser.MockComparer.EqualsCount);
headers.Clear();
headers.TryAddWithoutValidation(headers.Descriptor, invalidHeaderValue);
Assert.False(headers.ContainsParsedValue(headers.Descriptor, invalidHeaderValue));
}
[Fact]
public void ContainsParsedValue_AddTwoValuesToKnownHeaderAndCompareWithValueThatDiffersInCase_CustomComparerUsedForComparison()
{
MockHeaders headers = new MockHeaders();
headers.AddParsedValue(headers.Descriptor, "differentvalue");
headers.AddParsedValue(headers.Descriptor, "value");
// Our custom comparer (MockComparer) does case-insensitive value comparison. Verify that our custom
// comparer is used to compare the header value.
// Note that since we added 2 values a different code path than in the previous test is used. In this
// case we have stored the values as List<string> internally.
Assert.True(headers.ContainsParsedValue(headers.Descriptor, "VALUE"));
Assert.Equal(2, headers.Parser.MockComparer.EqualsCount);
}
[Fact]
public void ContainsParsedValue_ParserWithNoEqualityComparer_CaseSensitiveComparison()
{
CustomTypeHeaders headers = new CustomTypeHeaders();
headers.AddParsedValue(noComparerHeader, "lowercasevalue");
// Since we don't provide a comparer, the default string.Equals() is called which is case-sensitive. So
// the following call should return false.
Assert.False(headers.ContainsParsedValue(noComparerHeader, "LOWERCASEVALUE"));
// Now we try to use the correct casing. This should return true.
Assert.True(headers.ContainsParsedValue(noComparerHeader, "lowercasevalue"));
}
[Fact]
public void ContainsParsedValue_CallFromEmptyHeaderStore_ReturnsFalse()
{
MockHeaders headers = new MockHeaders();
// This will create a header entry with no value.
headers.Add(headers.Descriptor, string.Empty);
Assert.False(headers.Contains(headers.Descriptor), "Expected known header to be in the store.");
// This will just return fals and not touch the header.
Assert.False(headers.ContainsParsedValue(headers.Descriptor, "x"),
"Expected 'ContainsParsedValue' to return false.");
}
[Fact]
public void AddHeaders_SourceAndDestinationStoreHaveMultipleHeaders_OnlyHeadersNotInDestinationAreCopiedFromSource()
{
// Add header values to the source store.
MockHeaders source = new MockHeaders();
source.Add("custom1", "source10");
source.Add("custom1", "source11");
source.TryAddWithoutValidation("custom2", "source2");
source.Add(known1Header, rawPrefix + "3");
source.TryAddWithoutValidation(known1Header, rawPrefix + "4");
source.TryAddWithoutValidation(known2Header, rawPrefix + "5");
source.TryAddWithoutValidation(known2Header, invalidHeaderValue);
source.TryAddWithoutValidation(known2Header, rawPrefix + "7");
// this header value gets removed when it gets parsed.
source.TryAddWithoutValidation(known3Header, (string)null);
source.Add(known3Header, string.Empty);
DateTimeOffset known4Value1 = new DateTimeOffset(2010, 6, 15, 18, 31, 34, TimeSpan.Zero);
DateTimeOffset known4Value2 = new DateTimeOffset(2010, 4, 8, 11, 21, 04, TimeSpan.Zero);
source.AddParsedValue(known4Header, known4Value1);
source.AddParsedValue(known4Header, known4Value2);
source.Add("custom5", "source5");
source.TryAddWithoutValidation("custom6", (string)null);
// This header value gets added even though it doesn't have values. But since this is a custom header we
// assume it supports empty values.
source.TryAddWithoutValidation("custom7", (string)null);
source.Add("custom7", string.Empty);
// Add header values to the destination store.
MockHeaders destination = new MockHeaders();
destination.Add("custom2", "destination1");
destination.Add(known1Header, rawPrefix + "9");
// Now add all headers that are in source but not destination to destination.
destination.AddHeaders(source);
Assert.Equal(8, destination.Count());
Assert.Equal(2, destination.GetValues("custom1").Count());
Assert.Equal("source10", destination.GetValues("custom1").ElementAt(0));
Assert.Equal("source11", destination.GetValues("custom1").ElementAt(1));
// This value was set in destination. The header in source was ignored.
Assert.Equal(1, destination.GetValues("custom2").Count());
Assert.Equal("destination1", destination.GetValues("custom2").First());
// This value was set in destination. The header in source was ignored.
Assert.Equal(1, destination.GetValues(known1Header).Count());
Assert.Equal(parsedPrefix + "9", destination.GetValues(known1Header).First());
// The header in source gets first parsed and then copied to destination. Note that here we have one
// invalid value.
Assert.Equal(3, destination.GetValues(known2Header).Count());
Assert.Equal(parsedPrefix + "5", destination.GetValues(known2Header).ElementAt(0));
Assert.Equal(parsedPrefix + "7", destination.GetValues(known2Header).ElementAt(1));
Assert.Equal(invalidHeaderValue, destination.GetValues(known2Header).ElementAt(2));
// Header 'known3' should not be copied, since it doesn't contain any values.
Assert.False(destination.Contains(known3Header), "'known3' header value count.");
Assert.Equal(2, destination.GetValues(known4Header).Count());
Assert.Equal(known4Value1.ToString(), destination.GetValues(known4Header).ElementAt(0));
Assert.Equal(known4Value2.ToString(), destination.GetValues(known4Header).ElementAt(1));
Assert.Equal("source5", destination.GetValues("custom5").First());
Assert.Equal(string.Empty, destination.GetValues("custom6").First());
// Unlike 'known3', 'custom7' was added even though it only had empty values. The reason is that 'custom7'
// is a custom header so we just add whatever value we get passed in.
Assert.Equal(2, destination.GetValues("custom7").Count());
Assert.Equal("", destination.GetValues("custom7").ElementAt(0));
Assert.Equal("", destination.GetValues("custom7").ElementAt(1));
}
[Fact]
public void AddHeaders_SourceHasEmptyHeaderStore_DestinationRemainsUnchanged()
{
MockHeaders source = new MockHeaders();
MockHeaders destination = new MockHeaders();
destination.Add(known1Header, rawPrefix);
destination.AddHeaders(source);
Assert.Equal(1, destination.Count());
}
[Fact]
public void AddHeaders_DestinationHasEmptyHeaderStore_DestinationHeaderStoreGetsCreatedAndValuesAdded()
{
MockHeaders source = new MockHeaders();
source.Add(known1Header, rawPrefix);
MockHeaders destination = new MockHeaders();
destination.AddHeaders(source);
Assert.Equal(1, destination.Count());
}
[Fact]
public void AddHeaders_SourceHasInvalidHeaderValues_InvalidHeadersRemovedFromSourceAndNotCopiedToDestination()
{
MockHeaders source = new MockHeaders();
source.TryAddWithoutValidation(known1Header, invalidHeaderValue + "\r\ninvalid");
source.TryAddWithoutValidation("custom", "invalid\r\nvalue");
MockHeaders destination = new MockHeaders();
destination.AddHeaders(source);
Assert.Equal(0, source.Count());
Assert.False(source.Contains(known1Header), "source contains 'known' header.");
Assert.False(source.Contains("custom"), "source contains 'custom' header.");
Assert.Equal(0, destination.Count());
Assert.False(destination.Contains(known1Header), "destination contains 'known' header.");
Assert.False(destination.Contains("custom"), "destination contains 'custom' header.");
}
[Fact]
public void AddHeaders_ResponseHeaderToRequestHeaders_Success()
{
const string Name = "WWW-Authenticate";
const string Value = "Basic realm=\"Access to the staging site\", charset=\"UTF-8\"";
var request = new HttpRequestMessage();
Assert.True(request.Headers.TryAddWithoutValidation(Name, Value));
Assert.True(request.Headers.Contains(Name));
Assert.True(request.Headers.NonValidated.Contains(Name));
Assert.True(request.Headers.TryGetValues(Name, out IEnumerable<string> values));
Assert.Equal(Value, values.Single());
Assert.True(request.Headers.NonValidated.TryGetValues(Name, out HeaderStringValues nvValues));
Assert.Equal(Value, nvValues.Single());
}
[Fact]
public void AddHeaders_RequestHeaderToResponseHeaders_Success()
{
const string Name = "Referer";
const string Value = "https://dot.net";
var response = new HttpResponseMessage();
Assert.True(response.Headers.TryAddWithoutValidation(Name, Value));
Assert.True(response.Headers.Contains(Name));
Assert.True(response.Headers.NonValidated.Contains(Name));
Assert.True(response.Headers.TryGetValues(Name, out IEnumerable<string> values));
Assert.Equal(Value, values.Single());
Assert.True(response.Headers.NonValidated.TryGetValues(Name, out HeaderStringValues nvValues));
Assert.Equal(Value, nvValues.Single());
}
[Fact]
public void HeaderStringValues_Default_Empty()
{
HeaderStringValues v = default;
Assert.Equal(0, v.Count);
Assert.Empty(v);
Assert.Equal(string.Empty, v.ToString());
}
[Fact]
public void HeaderStringValues_Constructed_ProducesExpectedResults()
{
// 0 strings
foreach (HeaderStringValues hsv in new[] { new HeaderStringValues(KnownHeaders.Accept.Descriptor, Array.Empty<string>()) })
{
Assert.Equal(0, hsv.Count);
HeaderStringValues.Enumerator e = hsv.GetEnumerator();
Assert.False(e.MoveNext());
Assert.Equal(string.Empty, hsv.ToString());
}
// 1 string
foreach (HeaderStringValues hsv in new[] { new HeaderStringValues(KnownHeaders.Accept.Descriptor, "hello"), new HeaderStringValues(KnownHeaders.Accept.Descriptor, new[] { "hello" }) })
{
Assert.Equal(1, hsv.Count);
HeaderStringValues.Enumerator e = hsv.GetEnumerator();
Assert.True(e.MoveNext());
Assert.Equal("hello", e.Current);
Assert.False(e.MoveNext());
Assert.Equal("hello", hsv.ToString());
}
// 2 strings
foreach (HeaderStringValues hsv in new[] { new HeaderStringValues(KnownHeaders.Accept.Descriptor, new[] { "hello", "world" }) })
{
Assert.Equal(2, hsv.Count);
HeaderStringValues.Enumerator e = hsv.GetEnumerator();
Assert.True(e.MoveNext());
Assert.Equal("hello", e.Current);
Assert.True(e.MoveNext());
Assert.Equal("world", e.Current);
Assert.False(e.MoveNext());
Assert.Equal("hello, world", hsv.ToString());
}
}
[Theory]
[MemberData(nameof(NumberOfHeadersUpToArrayThreshold_AddNonValidated_EnumerateNonValidated))]
public void Add_WithinArrayThresholdHeaders_EnumerationPreservesOrdering(int numberOfHeaders, bool addNonValidated, bool enumerateNonValidated)
{
var headers = new MockHeaders();
for (int i = 0; i < numberOfHeaders; i++)
{
if (addNonValidated)
{
headers.TryAddWithoutValidation(i.ToString(), i.ToString());
}
else
{
headers.Add(i.ToString(), i.ToString());
}
}
KeyValuePair<string, string>[] entries = enumerateNonValidated
? headers.NonValidated.Select(pair => KeyValuePair.Create(pair.Key, Assert.Single(pair.Value))).ToArray()
: headers.Select(pair => KeyValuePair.Create(pair.Key, Assert.Single(pair.Value))).ToArray();
Assert.Equal(numberOfHeaders, entries.Length);
for (int i = 0; i < numberOfHeaders; i++)
{
Assert.Equal(i.ToString(), entries[i].Key);
Assert.Equal(i.ToString(), entries[i].Value);
}
}
[Fact]
public void Add_Remove_HeaderOrderingIsPreserved()
{
var headers = new MockHeaders();
headers.Add("a", "");
headers.Add("b", "");
headers.Add("c", "");
headers.Remove("b");
Assert.Equal(new[] { "a", "c" }, headers.Select(pair => pair.Key));
}
[Fact]
public void Add_AddToExistingKey_OriginalOrderingIsPreserved()
{
var headers = new MockHeaders();
headers.Add("a", "a1");
headers.Add("b", "b1");
headers.Add("a", "a2");
Assert.Equal(new[] { "a", "b" }, headers.Select(pair => pair.Key));
}
[Theory]
[InlineData(3)]
[InlineData(4)]
[InlineData(5)]
[InlineData(HttpHeaders.ArrayThreshold / 4)]
[InlineData(HttpHeaders.ArrayThreshold / 2)]
[InlineData(HttpHeaders.ArrayThreshold - 1)]
[InlineData(HttpHeaders.ArrayThreshold)]
[InlineData(HttpHeaders.ArrayThreshold + 1)]
[InlineData(HttpHeaders.ArrayThreshold * 2)]
[InlineData(HttpHeaders.ArrayThreshold * 4)]
public void Add_LargeNumberOfHeaders_OperationsStillSupported(int numberOfHeaders)
{
string[] keys = Enumerable.Range(1, numberOfHeaders).Select(i => i.ToString()).ToArray();
var headers = new MockHeaders();
foreach (string key in keys)
{
Assert.False(headers.NonValidated.Contains(key));
headers.TryAddWithoutValidation(key, key);
Assert.True(headers.NonValidated.Contains(key));
}
string[] nonValidatedKeys = headers.NonValidated.Select(pair => pair.Key).ToArray();
Assert.Equal(numberOfHeaders, nonValidatedKeys.Length);
string[] newKeys = headers.Select(pair => pair.Key).ToArray();
Assert.Equal(numberOfHeaders, newKeys.Length);
string[] nonValidatedKeysAfterValidation = headers.NonValidated.Select(pair => pair.Key).ToArray();
Assert.Equal(numberOfHeaders, nonValidatedKeysAfterValidation.Length);
if (numberOfHeaders > HttpHeaders.ArrayThreshold)
{
// Ordering is lost when adding more than ArrayThreshold headers
Array.Sort(nonValidatedKeys, (a, b) => int.Parse(a).CompareTo(int.Parse(b)));
Array.Sort(newKeys, (a, b) => int.Parse(a).CompareTo(int.Parse(b)));
Array.Sort(nonValidatedKeysAfterValidation, (a, b) => int.Parse(a).CompareTo(int.Parse(b)));
}
Assert.Equal(keys, nonValidatedKeys);
Assert.Equal(keys, newKeys);
Assert.Equal(keys, nonValidatedKeysAfterValidation);
headers.Add("3", "secondValue");
Assert.True(headers.TryGetValues("3", out IEnumerable<string> valuesFor3));
Assert.Equal(new[] { "3", "secondValue" }, valuesFor3);
Assert.True(headers.TryAddWithoutValidation("invalid", "invalid\nvalue"));
Assert.True(headers.TryAddWithoutValidation("valid", "validValue"));
Assert.Equal(numberOfHeaders + 2, headers.NonValidated.Count);
// Remove all headers except for "1", "valid", "invalid"
for (int i = 2; i <= numberOfHeaders; i++)
{
Assert.True(headers.Remove(i.ToString()));
}
Assert.False(headers.Remove("3"));
// "1", "invalid", "valid"
Assert.True(headers.NonValidated.Contains("invalid"));
Assert.Equal(3, headers.NonValidated.Count);
Assert.Equal(new[] { "1", "valid" }, headers.Select(pair => pair.Key).OrderBy(i => i));
Assert.Equal(2, headers.NonValidated.Count);
headers.Clear();
Assert.Equal(0, headers.NonValidated.Count);
Assert.Empty(headers);
Assert.False(headers.Contains("3"));
Assert.True(headers.TryAddWithoutValidation("3", "newValue"));
Assert.True(headers.TryGetValues("3", out valuesFor3));
Assert.Equal(new[] { "newValue" }, valuesFor3);
}
[Fact]
public void TryAddInvalidHeader_ShouldThrowFormatException()
{
MockHeaders headers = new MockHeaders();
AssertExtensions.ThrowsContains<FormatException>(() => headers.Remove("\u0080"), "\u0080");
}
public static IEnumerable<object[]> NumberOfHeadersUpToArrayThreshold_AddNonValidated_EnumerateNonValidated()
{
for (int i = 0; i <= HttpHeaders.ArrayThreshold; i++)
{
yield return new object[] { i, false, false };
yield return new object[] { i, false, true };
yield return new object[] { i, true, false };
yield return new object[] { i, true, true };
}
}
public static IEnumerable<object[]> GetInvalidHeaderNames()
{
yield return new object[] { "invalid header" };
yield return new object[] { "invalid\theader" };
yield return new object[] { "invalid\rheader" };
yield return new object[] { "invalid\nheader" };
yield return new object[] { "invalid(header" };
yield return new object[] { "invalid)header" };
yield return new object[] { "invalid<header" };
yield return new object[] { "invalid>header" };
yield return new object[] { "invalid@header" };
yield return new object[] { "invalid,header" };
yield return new object[] { "invalid;header" };
yield return new object[] { "invalid:header" };
yield return new object[] { "invalid\\header" };
yield return new object[] { "invalid\"header" };
yield return new object[] { "invalid/header" };
yield return new object[] { "invalid[header" };
yield return new object[] { "invalid]header" };
yield return new object[] { "invalid?header" };
yield return new object[] { "invalid=header" };
yield return new object[] { "invalid{header" };
yield return new object[] { "invalid}header" };
}
public static IEnumerable<object[]> HeaderValuesWithNewLines()
{
foreach (string pattern in new[] { "*", "*foo", "* foo", "foo*", "foo* ", "foo*bar", "foo* bar" })
foreach (string newLine in new[] { "\r", "\n", "\r\n" })
foreach (string prefix in new[] { "", "valid, " })
{
yield return new object[] { prefix + pattern.Replace("*", newLine) };
}
}
#region Helper methods
private class MockHeaders : HttpHeaders
{
private MockHeaderParser _parser;
private HeaderDescriptor _descriptor;
public MockHeaderParser Parser => _parser;
public HeaderDescriptor Descriptor => _descriptor;
public MockHeaders(MockHeaderParser parser)
: base()
{
_parser = parser;
_descriptor = (new KnownHeader("known", HttpHeaderType.General, parser)).Descriptor;
}
public MockHeaders()
: this(new MockHeaderParser())
{
}
}
private class MockHeaderParser : HttpHeaderParser
{
public int TryParseValueCallCount { get; set; }
public int EmptyValueCount { get; private set; }
public MockComparer MockComparer { get; private set; }
public MockHeaderParser()
: this(true)
{
}
public MockHeaderParser(bool supportsMultipleValues)
: base(supportsMultipleValues)
{
this.MockComparer = new MockComparer();
}
public MockHeaderParser(string separator)
: base(true, separator)
{
this.MockComparer = new MockComparer();
}
#region IHeaderParser Members
public override IEqualityComparer Comparer
{
get { return MockComparer; }
}
public override bool TryParseValue(string value, object storeValue, ref int index, out object parsedValue)
{
TryParseValueCallCount++;
return TryParseValueCore(value, ref index, out parsedValue);
}
private bool TryParseValueCore(string value, ref int index, out object parsedValue)
{
parsedValue = null;
if (value == null)
{
parsedValue = null;
return true;
}
if (value == string.Empty)
{
EmptyValueCount++;
parsedValue = null;
return true;
}
int separatorIndex = value.IndexOf(',', index);
// Just fail if we don't support multiple values and the value is actually a list of values.
if ((!SupportsMultipleValues) && (separatorIndex >= 0))
{
return false;
}
if (separatorIndex == -1)
{
// If the raw string just contains one value, then use the whole string.
separatorIndex = value.Length;
}
string tempValue = value.Substring(index, separatorIndex - index);
if (tempValue.StartsWith(rawPrefix, StringComparison.Ordinal))
{
index = Math.Min(separatorIndex + 1, value.Length);
// We "parse" the value by replacing 'rawPrefix' strings with 'parsedPrefix' string.
parsedValue = parsedPrefix + tempValue.Substring(rawPrefix.Length,
tempValue.Length - rawPrefix.Length);
return true;
}
// Only thing left is a deliberately chosen invalid value.
Assert.StartsWith(invalidHeaderValue, tempValue, StringComparison.Ordinal);
return false;
}
#endregion
}
private class MockComparer : IEqualityComparer
{
public int GetHashCodeCount { get; private set; }
public int EqualsCount { get; private set; }
#region IEqualityComparer Members
public new bool Equals(object x, object y)
{
Assert.NotNull(x);
Assert.NotNull(y);
EqualsCount++;
string xs = x as string;
string ys = y as string;
if ((xs != null) && (ys != null))
{
return string.Equals(xs, ys, StringComparison.OrdinalIgnoreCase);
}
return x.Equals(y);
}
public int GetHashCode(object obj)
{
GetHashCodeCount++;
return obj.GetHashCode();
}
#endregion
}
private class CustomTypeHeaders : HttpHeaders
{
public CustomTypeHeaders()
{
}
}
private class CustomTypeHeaderParser : HttpHeaderParser
{
private static CustomTypeComparer comparer = new CustomTypeComparer();
public override IEqualityComparer Comparer
{
get { return comparer; }
}
public CustomTypeHeaderParser()
: base(true)
{
}
public override bool TryParseValue(string value, object storeValue, ref int index, out object parsedValue)
{
throw new NotImplementedException();
}
}
private class CustomTypeComparer : IEqualityComparer
{
#region IEqualityComparer Members
public new bool Equals(object x, object y)
{
Assert.NotNull(x);
Assert.NotNull(y);
return x.Equals(y);
}
public int GetHashCode(object obj)
{
Assert.NotNull(obj);
return obj.GetHashCode();
}
#endregion
}
private class NoComparerHeaderParser : HttpHeaderParser
{
public NoComparerHeaderParser()
: base(true)
{
}
public override bool TryParseValue(string value, object storeValue, ref int index, out object parsedValue)
{
throw new NotImplementedException();
}
}
#endregion
}
}
| 1 |
dotnet/runtime | 66,250 | use the name of the header when throwing exception for invalid chars | Fixes #65136 | pedrobsaila | 2022-03-05T20:18:11Z | 2022-03-08T16:15:18Z | 8fe2d2b0bbb7bec841c97372528531c071b29c16 | fa5dcda23c681031b5d2bd68482702baef80b836 | use the name of the header when throwing exception for invalid chars. Fixes #65136 | ./src/libraries/System.Net.WebHeaderCollection/src/Resources/Strings.resx | <?xml version="1.0" encoding="utf-8"?>
<root>
<xsd:schema id="root" xmlns="" xmlns:xsd="http://www.w3.org/2001/XMLSchema" xmlns:msdata="urn:schemas-microsoft-com:xml-msdata">
<xsd:import namespace="http://www.w3.org/XML/1998/namespace" />
<xsd:element name="root" msdata:IsDataSet="true">
<xsd:complexType>
<xsd:choice maxOccurs="unbounded">
<xsd:element name="metadata">
<xsd:complexType>
<xsd:sequence>
<xsd:element name="value" type="xsd:string" minOccurs="0" />
</xsd:sequence>
<xsd:attribute name="name" use="required" type="xsd:string" />
<xsd:attribute name="type" type="xsd:string" />
<xsd:attribute name="mimetype" type="xsd:string" />
<xsd:attribute ref="xml:space" />
</xsd:complexType>
</xsd:element>
<xsd:element name="assembly">
<xsd:complexType>
<xsd:attribute name="alias" type="xsd:string" />
<xsd:attribute name="name" type="xsd:string" />
</xsd:complexType>
</xsd:element>
<xsd:element name="data">
<xsd:complexType>
<xsd:sequence>
<xsd:element name="value" type="xsd:string" minOccurs="0" msdata:Ordinal="1" />
<xsd:element name="comment" type="xsd:string" minOccurs="0" msdata:Ordinal="2" />
</xsd:sequence>
<xsd:attribute name="name" type="xsd:string" use="required" msdata:Ordinal="1" />
<xsd:attribute name="type" type="xsd:string" msdata:Ordinal="3" />
<xsd:attribute name="mimetype" type="xsd:string" msdata:Ordinal="4" />
<xsd:attribute ref="xml:space" />
</xsd:complexType>
</xsd:element>
<xsd:element name="resheader">
<xsd:complexType>
<xsd:sequence>
<xsd:element name="value" type="xsd:string" minOccurs="0" msdata:Ordinal="1" />
</xsd:sequence>
<xsd:attribute name="name" type="xsd:string" use="required" />
</xsd:complexType>
</xsd:element>
</xsd:choice>
</xsd:complexType>
</xsd:element>
</xsd:schema>
<resheader name="resmimetype">
<value>text/microsoft-resx</value>
</resheader>
<resheader name="version">
<value>2.0</value>
</resheader>
<resheader name="reader">
<value>System.Resources.ResXResourceReader, System.Windows.Forms, Version=4.0.0.0, Culture=neutral, PublicKeyToken=b77a5c561934e089</value>
</resheader>
<resheader name="writer">
<value>System.Resources.ResXResourceWriter, System.Windows.Forms, Version=4.0.0.0, Culture=neutral, PublicKeyToken=b77a5c561934e089</value>
</resheader>
<data name="net_headers_req" xml:space="preserve">
<value>This collection holds response headers and cannot contain the specified request header.</value>
</data>
<data name="net_headers_rsp" xml:space="preserve">
<value>This collection holds request headers and cannot contain the specified response header.</value>
</data>
<data name="net_WebHeaderInvalidControlChars" xml:space="preserve">
<value>Specified value has invalid Control characters.</value>
</data>
<data name="net_WebHeaderInvalidCRLFChars" xml:space="preserve">
<value>Specified value has invalid CRLF characters.</value>
</data>
<data name="net_WebHeaderInvalidHeaderChars" xml:space="preserve">
<value>Specified value has invalid HTTP Header characters.</value>
</data>
<data name="net_WebHeaderMissingColon" xml:space="preserve">
<value>Specified value does not have a ':' separator.</value>
</data>
</root>
| <?xml version="1.0" encoding="utf-8"?>
<root>
<xsd:schema id="root" xmlns="" xmlns:xsd="http://www.w3.org/2001/XMLSchema" xmlns:msdata="urn:schemas-microsoft-com:xml-msdata">
<xsd:import namespace="http://www.w3.org/XML/1998/namespace" />
<xsd:element name="root" msdata:IsDataSet="true">
<xsd:complexType>
<xsd:choice maxOccurs="unbounded">
<xsd:element name="metadata">
<xsd:complexType>
<xsd:sequence>
<xsd:element name="value" type="xsd:string" minOccurs="0" />
</xsd:sequence>
<xsd:attribute name="name" use="required" type="xsd:string" />
<xsd:attribute name="type" type="xsd:string" />
<xsd:attribute name="mimetype" type="xsd:string" />
<xsd:attribute ref="xml:space" />
</xsd:complexType>
</xsd:element>
<xsd:element name="assembly">
<xsd:complexType>
<xsd:attribute name="alias" type="xsd:string" />
<xsd:attribute name="name" type="xsd:string" />
</xsd:complexType>
</xsd:element>
<xsd:element name="data">
<xsd:complexType>
<xsd:sequence>
<xsd:element name="value" type="xsd:string" minOccurs="0" msdata:Ordinal="1" />
<xsd:element name="comment" type="xsd:string" minOccurs="0" msdata:Ordinal="2" />
</xsd:sequence>
<xsd:attribute name="name" type="xsd:string" use="required" msdata:Ordinal="1" />
<xsd:attribute name="type" type="xsd:string" msdata:Ordinal="3" />
<xsd:attribute name="mimetype" type="xsd:string" msdata:Ordinal="4" />
<xsd:attribute ref="xml:space" />
</xsd:complexType>
</xsd:element>
<xsd:element name="resheader">
<xsd:complexType>
<xsd:sequence>
<xsd:element name="value" type="xsd:string" minOccurs="0" msdata:Ordinal="1" />
</xsd:sequence>
<xsd:attribute name="name" type="xsd:string" use="required" />
</xsd:complexType>
</xsd:element>
</xsd:choice>
</xsd:complexType>
</xsd:element>
</xsd:schema>
<resheader name="resmimetype">
<value>text/microsoft-resx</value>
</resheader>
<resheader name="version">
<value>2.0</value>
</resheader>
<resheader name="reader">
<value>System.Resources.ResXResourceReader, System.Windows.Forms, Version=4.0.0.0, Culture=neutral, PublicKeyToken=b77a5c561934e089</value>
</resheader>
<resheader name="writer">
<value>System.Resources.ResXResourceWriter, System.Windows.Forms, Version=4.0.0.0, Culture=neutral, PublicKeyToken=b77a5c561934e089</value>
</resheader>
<data name="net_headers_req" xml:space="preserve">
<value>This collection holds response headers and cannot contain the specified request header.</value>
</data>
<data name="net_headers_rsp" xml:space="preserve">
<value>This collection holds request headers and cannot contain the specified response header.</value>
</data>
<data name="net_WebHeaderInvalidControlChars" xml:space="preserve">
<value>Specified value has invalid Control characters.</value>
</data>
<data name="net_WebHeaderInvalidCRLFChars" xml:space="preserve">
<value>Specified value has invalid CRLF characters.</value>
</data>
<data name="net_WebHeaderInvalidHeaderChars" xml:space="preserve">
<value>Specified value '{0}' has invalid HTTP Header characters.</value>
</data>
<data name="net_WebHeaderMissingColon" xml:space="preserve">
<value>Specified value does not have a ':' separator.</value>
</data>
</root>
| 1 |
dotnet/runtime | 66,250 | use the name of the header when throwing exception for invalid chars | Fixes #65136 | pedrobsaila | 2022-03-05T20:18:11Z | 2022-03-08T16:15:18Z | 8fe2d2b0bbb7bec841c97372528531c071b29c16 | fa5dcda23c681031b5d2bd68482702baef80b836 | use the name of the header when throwing exception for invalid chars. Fixes #65136 | ./src/libraries/System.Net.WebHeaderCollection/tests/WebHeaderCollectionTest.cs | // Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
using System.Collections;
using System.Collections.Generic;
using Xunit;
namespace System.Net.Tests
{
public partial class WebHeaderCollectionTest
{
[Fact]
public void Ctor_Success()
{
new WebHeaderCollection();
}
[Fact]
public void DefaultPropertyValues_ReturnEmptyAfterConstruction_Success()
{
WebHeaderCollection w = new WebHeaderCollection();
Assert.Equal(0, w.AllKeys.Length);
Assert.Equal(0, w.Count);
Assert.Equal("\r\n", w.ToString());
Assert.Empty(w);
Assert.Empty(w.AllKeys);
}
[Fact]
public void HttpRequestHeader_Add_Success()
{
WebHeaderCollection w = new WebHeaderCollection();
w[HttpRequestHeader.Connection] = "keep-alive";
Assert.Equal(1, w.Count);
Assert.Equal("keep-alive", w[HttpRequestHeader.Connection]);
Assert.Equal("Connection", w.AllKeys[0]);
}
[Theory]
[InlineData((HttpRequestHeader)int.MinValue)]
[InlineData((HttpRequestHeader)(-1))]
[InlineData((HttpRequestHeader)int.MaxValue)]
public void HttpRequestHeader_AddInvalid_Throws(HttpRequestHeader header)
{
WebHeaderCollection w = new WebHeaderCollection();
Assert.Throws<IndexOutOfRangeException>(() => w[header] = "foo");
}
[Theory]
[InlineData((HttpResponseHeader)int.MinValue)]
[InlineData((HttpResponseHeader)(-1))]
[InlineData((HttpResponseHeader)int.MaxValue)]
public void HttpResponseHeader_AddInvalid_Throws(HttpResponseHeader header)
{
WebHeaderCollection w = new WebHeaderCollection();
Assert.Throws<IndexOutOfRangeException>(() => w[header] = "foo");
}
[Fact]
public void CustomHeader_AddQuery_Success()
{
string customHeader = "Custom-Header";
string customValue = "Custom;.-Value";
WebHeaderCollection w = new WebHeaderCollection();
w[customHeader] = customValue;
Assert.Equal(1, w.Count);
Assert.Equal(customValue, w[customHeader]);
Assert.Equal(customHeader, w.AllKeys[0]);
}
[Fact]
public void HttpResponseHeader_AddQuery_CommonHeader_Success()
{
string headerValue = "value123";
WebHeaderCollection w = new WebHeaderCollection();
w[HttpResponseHeader.ProxyAuthenticate] = headerValue;
w[HttpResponseHeader.WwwAuthenticate] = headerValue;
Assert.Equal(headerValue, w[HttpResponseHeader.ProxyAuthenticate]);
Assert.Equal(headerValue, w[HttpResponseHeader.WwwAuthenticate]);
}
[Fact]
public void HttpRequest_AddQuery_CommonHeader_Success()
{
string headerValue = "value123";
WebHeaderCollection w = new WebHeaderCollection();
w[HttpRequestHeader.Accept] = headerValue;
Assert.Equal(headerValue, w[HttpRequestHeader.Accept]);
}
[Fact]
public void RequestThenResponseHeaders_Add_Throws()
{
WebHeaderCollection w = new WebHeaderCollection();
w[HttpRequestHeader.Accept] = "text/json";
Assert.Throws<InvalidOperationException>(() => w[HttpResponseHeader.ContentLength] = "123");
}
[Fact]
public void ResponseThenRequestHeaders_Add_Throws()
{
WebHeaderCollection w = new WebHeaderCollection();
w[HttpResponseHeader.ContentLength] = "123";
Assert.Throws<InvalidOperationException>(() => w[HttpRequestHeader.Accept] = "text/json");
}
[Fact]
public void ResponseHeader_QueryRequest_Throws()
{
WebHeaderCollection w = new WebHeaderCollection();
w[HttpResponseHeader.ContentLength] = "123";
Assert.Throws<InvalidOperationException>(() => w[HttpRequestHeader.Accept]);
}
[Fact]
public void RequestHeader_QueryResponse_Throws()
{
WebHeaderCollection w = new WebHeaderCollection();
w[HttpRequestHeader.Accept] = "text/json";
Assert.Throws<InvalidOperationException>(() => w[HttpResponseHeader.ContentLength]);
}
[Fact]
public void Setter_ValidName_Success()
{
WebHeaderCollection w = new WebHeaderCollection();
w["Accept"] = "text/json";
}
[Theory]
[InlineData(null)]
[InlineData("")]
public void Setter_NullOrEmptyName_Throws(string name)
{
WebHeaderCollection w = new WebHeaderCollection();
AssertExtensions.Throws<ArgumentNullException>("name", () => w[name] = "test");
}
public static object[][] InvalidNames = {
new object[] { "(" },
new object[] { "\u1234" },
new object[] { "\u0019" }
};
[Theory, MemberData(nameof(InvalidNames))]
public void Setter_InvalidName_Throws(string name)
{
WebHeaderCollection w = new WebHeaderCollection();
AssertExtensions.Throws<ArgumentException>("name", () => w[name] = "test");
}
public static object[][] InvalidValues = {
new object[] { "value1\rvalue2\r" },
new object[] { "value1\nvalue2\r" },
new object[] { "value1\u007fvalue2" },
new object[] { "value1\r\nvalue2" },
new object[] { "value1\u0019value2" }
};
[Theory, MemberData(nameof(InvalidValues))]
public void Setter_InvalidValue_Throws(string value)
{
WebHeaderCollection w = new WebHeaderCollection();
AssertExtensions.Throws<ArgumentException>("value", () => w["custom"] = value);
}
public static object[][] ValidValues = {
new object[] { null },
new object[] { "" },
new object[] { "value1\r\n" },
new object[] { "value1\tvalue2" },
new object[] { "value1\r\n\tvalue2" },
new object[] { "value1\r\n value2" }
};
[Theory, MemberData(nameof(ValidValues))]
public void Setter_ValidValue_Success(string value)
{
WebHeaderCollection w = new WebHeaderCollection();
w["custom"] = value;
}
[Theory]
[InlineData("name", "name")]
[InlineData("name", "NaMe")]
[InlineData("nAmE", "name")]
public void Setter_SameHeaderTwice_Success(string firstName, string secondName)
{
WebHeaderCollection w = new WebHeaderCollection();
w[firstName] = "first";
w[secondName] = "second";
Assert.Equal(1, w.Count);
Assert.NotEmpty(w);
Assert.NotEmpty(w.AllKeys);
Assert.Equal(new[] { firstName }, w.AllKeys);
Assert.Equal("second", w[firstName]);
Assert.Equal("second", w[secondName]);
}
[Theory]
[InlineData("name")]
[InlineData("nAMe")]
public void Remove_HeaderExists_RemovesFromCollection(string name)
{
var headers = new WebHeaderCollection()
{
{ "name", "value" }
};
headers.Remove(name);
Assert.Empty(headers);
headers.Remove(name);
Assert.Empty(headers);
}
[Theory]
[InlineData(null)]
[InlineData("")]
public void Remove_NullOrEmptyHeader_ThrowsArgumentNullException(string name)
{
var headers = new WebHeaderCollection();
AssertExtensions.Throws<ArgumentNullException>("name", () => headers.Remove(name));
}
[Theory]
[InlineData(" \r \t \n")]
[InlineData(" name ")]
[MemberData(nameof(InvalidValues))]
public void Remove_InvalidHeader_ThrowsArgumentException(string name)
{
var headers = new WebHeaderCollection();
AssertExtensions.Throws<ArgumentException>("name", () => headers.Remove(name));
}
[Fact]
public void Remove_IllegalCharacter_Throws()
{
WebHeaderCollection w = new WebHeaderCollection();
AssertExtensions.Throws<ArgumentException>("name", () => w.Remove("{"));
}
[Fact]
public void Remove_EmptyCollection_Success()
{
WebHeaderCollection w = new WebHeaderCollection();
w.Remove("foo");
Assert.Equal(0, w.Count);
Assert.Empty(w);
Assert.Empty(w.AllKeys);
}
[Theory]
[InlineData("name", "name")]
[InlineData("name", "NaMe")]
public void Remove_SetThenRemove_Success(string setName, string removeName)
{
WebHeaderCollection w = new WebHeaderCollection();
w[setName] = "value";
w.Remove(removeName);
Assert.Equal(0, w.Count);
Assert.Empty(w);
Assert.Empty(w.AllKeys);
}
[Theory]
[InlineData("name", "name")]
[InlineData("name", "NaMe")]
public void Remove_SetTwoThenRemoveOne_Success(string setName, string removeName)
{
WebHeaderCollection w = new WebHeaderCollection();
w[setName] = "value";
w["foo"] = "bar";
w.Remove(removeName);
Assert.Equal(1, w.Count);
Assert.NotEmpty(w);
Assert.NotEmpty(w.AllKeys);
Assert.Equal(new[] { "foo" }, w.AllKeys);
Assert.Equal("bar", w["foo"]);
}
[Fact]
public void Getter_EmptyCollection_Success()
{
WebHeaderCollection w = new WebHeaderCollection();
Assert.Null(w["name"]);
Assert.Equal(0, w.Count);
Assert.Empty(w);
Assert.Empty(w.AllKeys);
}
[Fact]
public void Getter_NonEmptyCollectionNonExistentHeader_Success()
{
WebHeaderCollection w = new WebHeaderCollection();
w["name"] = "value";
Assert.Null(w["foo"]);
Assert.Equal(1, w.Count);
Assert.NotEmpty(w);
Assert.NotEmpty(w.AllKeys);
Assert.Equal(new[] { "name" }, w.AllKeys);
Assert.Equal("value", w["name"]);
}
[Fact]
public void Getter_Success()
{
string[] keys = { "Accept", "uPgRaDe", "Custom" };
string[] values = { "text/plain, text/html", " HTTP/2.0 , SHTTP/1.3, , RTA/x11 ", "\"xyzzy\", \"r2d2xxxx\", \"c3piozzzz\"" };
WebHeaderCollection w = new WebHeaderCollection();
for (int i = 0; i < keys.Length; ++i)
{
string key = keys[i];
string value = values[i];
w[key] = value;
}
for (int i = 0; i < keys.Length; ++i)
{
string key = keys[i];
string expected = values[i].Trim();
Assert.Equal(expected, w[key]);
Assert.Equal(expected, w[key.ToUpperInvariant()]);
Assert.Equal(expected, w[key.ToLowerInvariant()]);
}
}
[Fact]
public void ToString_Empty_Success()
{
WebHeaderCollection w = new WebHeaderCollection();
Assert.Equal("\r\n", w.ToString());
}
[Theory]
[InlineData(null)]
[InlineData("")]
public void ToString_SingleHeaderWithEmptyValue_Success(string value)
{
WebHeaderCollection w = new WebHeaderCollection();
w["name"] = value;
Assert.Equal("name: \r\n\r\n", w.ToString());
}
[Fact]
public void ToString_NotEmpty_Success()
{
WebHeaderCollection w = new WebHeaderCollection();
w["Accept"] = "text/plain";
w["Content-Length"] = "123";
Assert.Equal(
"Accept: text/plain\r\nContent-Length: 123\r\n\r\n",
w.ToString());
}
[Fact]
public void IterateCollection_Success()
{
WebHeaderCollection w = new WebHeaderCollection();
w["Accept"] = "text/plain";
w["Content-Length"] = "123";
string result = "";
foreach (var item in w)
{
result += item;
}
Assert.Equal("AcceptContent-Length", result);
}
[Fact]
public void Enumerator_Success()
{
string item1 = "Accept";
string item2 = "Content-Length";
string item3 = "Name";
WebHeaderCollection w = new WebHeaderCollection();
w[item1] = "text/plain";
w[item2] = "123";
w[item3] = "value";
IEnumerable collection = w;
IEnumerator e = collection.GetEnumerator();
for (int i = 0; i < 2; i++)
{
// Not started
Assert.Throws<InvalidOperationException>(() => e.Current);
Assert.True(e.MoveNext());
Assert.Same(item1, e.Current);
Assert.True(e.MoveNext());
Assert.Same(item2, e.Current);
Assert.True(e.MoveNext());
Assert.Same(item3, e.Current);
Assert.False(e.MoveNext());
Assert.False(e.MoveNext());
Assert.False(e.MoveNext());
// Ended
Assert.Throws<InvalidOperationException>(() => e.Current);
e.Reset();
}
}
public static IEnumerable<object[]> SerializeDeserialize_Roundtrip_MemberData()
{
for (int i = 0; i < 10; i++)
{
var wc = new WebHeaderCollection();
for (int j = 0; j < i; j++)
{
wc[$"header{j}"] = $"value{j}";
}
yield return new object[] { wc };
}
}
public static IEnumerable<object[]> Add_Value_TestData()
{
yield return new object[] { null, string.Empty };
yield return new object[] { string.Empty, string.Empty };
yield return new object[] { "VaLue", "VaLue" };
yield return new object[] { " value ", "value" };
// Documentation says this should fail but it does not.
string longString = new string('a', 65536);
yield return new object[] { longString, longString };
}
[Theory]
[MemberData(nameof(Add_Value_TestData))]
public void Add_ValidValue_Success(string value, string expectedValue)
{
var headers = new WebHeaderCollection
{
{ "name", value }
};
Assert.Equal(expectedValue, headers["name"]);
}
[Fact]
public void Add_HeaderAlreadyExists_AppendsValue()
{
var headers = new WebHeaderCollection
{
{ "name", "value1" },
{ "name", null },
{ "name", "value2" },
{ "NAME", "value3" },
{ "name", "" }
};
Assert.Equal("value1,,value2,value3,", headers["name"]);
}
[Fact]
public void Add_NullName_ThrowsArgumentNullException()
{
var headers = new WebHeaderCollection();
AssertExtensions.Throws<ArgumentNullException>("name", () => headers.Add(null, "value"));
}
[Theory]
[InlineData("")]
[InlineData("(")]
[InlineData("\r \t \n")]
[InlineData(" name ")]
[MemberData(nameof(InvalidValues))]
public void Add_InvalidName_ThrowsArgumentException(string name)
{
var headers = new WebHeaderCollection();
AssertExtensions.Throws<ArgumentException>("name", () => headers.Add(name, "value"));
}
[Theory]
[MemberData(nameof(InvalidValues))]
public void Add_InvalidValue_ThrowsArgumentException(string value)
{
var headers = new WebHeaderCollection();
AssertExtensions.Throws<ArgumentException>("value", () => headers.Add("name", value));
}
[Fact]
public void Add_ValidHeader_AddsToHeaders()
{
var headers = new WebHeaderCollection()
{
"name:value1",
"name:",
"NaMe:value2",
"name: ",
};
Assert.Equal("value1,,value2,", headers["name"]);
}
[Theory]
[InlineData(null)]
[InlineData("")]
public void Add_NullHeader_ThrowsArgumentNullException(string header)
{
var headers = new WebHeaderCollection();
AssertExtensions.Throws<ArgumentNullException>("header", () => headers.Add(header));
}
[Theory]
[InlineData(" \r \t \n", "header")]
[InlineData("nocolon", "header")]
[InlineData(" :value", "name")]
[InlineData("name :value", "name")]
[InlineData("name:va\rlue", "value")]
public void Add_InvalidHeader_ThrowsArgumentException(string header, string paramName)
{
var headers = new WebHeaderCollection();
AssertExtensions.Throws<ArgumentException>(paramName, () => headers.Add(header));
}
private const string HeaderType = "Set-Cookie";
private const string Cookie1 = "locale=en; path=/; expires=Fri, 05 Oct 2018 06:28:57 -0000";
private const string Cookie2 = "uuid=123abc; path=/; expires=Fri, 05 Oct 2018 06:28:57 -0000; secure; HttpOnly";
private const string Cookie3 = "country=US; path=/; expires=Fri, 05 Oct 2018 06:28:57 -0000";
private const string Cookie4 = "m_session=session1; path=/; expires=Sun, 08 Oct 2017 00:28:57 -0000; secure; HttpOnly";
private const string Cookie1NoAttribute = "locale=en";
private const string Cookie2NoAttribute = "uuid=123abc";
private const string Cookie3NoAttribute = "country=US";
private const string Cookie4NoAttribute = "m_session=session1";
private const string CookieInvalid = "helloWorld";
[Fact]
public void GetValues_MultipleSetCookieHeadersWithExpiresAttribute_Success()
{
WebHeaderCollection w = new WebHeaderCollection();
w.Add(HeaderType, Cookie1);
w.Add(HeaderType, Cookie2);
w.Add(HeaderType, Cookie3);
w.Add(HeaderType, Cookie4);
string[] values = w.GetValues(HeaderType);
Assert.Equal(4, values.Length);
Assert.Equal(Cookie1, values[0]);
Assert.Equal(Cookie2, values[1]);
Assert.Equal(Cookie3, values[2]);
Assert.Equal(Cookie4, values[3]);
}
[Fact]
public void GetValues_SingleSetCookieHeaderWithMultipleCookiesWithExpiresAttribute_Success()
{
WebHeaderCollection w = new WebHeaderCollection();
w.Add(HeaderType, Cookie1 + "," + Cookie2 + "," + Cookie3 + "," + Cookie4);
string[] values = w.GetValues(HeaderType);
Assert.Equal(4, values.Length);
Assert.Equal(Cookie1, values[0]);
Assert.Equal(Cookie2, values[1]);
Assert.Equal(Cookie3, values[2]);
Assert.Equal(Cookie4, values[3]);
}
[Fact]
public void GetValues_MultipleSetCookieHeadersWithNoAttribute_Success()
{
WebHeaderCollection w = new WebHeaderCollection();
w.Add(HeaderType, Cookie1NoAttribute);
w.Add(HeaderType, Cookie2NoAttribute);
w.Add(HeaderType, Cookie3NoAttribute);
w.Add(HeaderType, Cookie4NoAttribute);
string[] values = w.GetValues(HeaderType);
Assert.Equal(4, values.Length);
Assert.Equal(Cookie1NoAttribute, values[0]);
Assert.Equal(Cookie2NoAttribute, values[1]);
Assert.Equal(Cookie3NoAttribute, values[2]);
Assert.Equal(Cookie4NoAttribute, values[3]);
}
[Fact]
public void GetValues_SingleSetCookieHeaderWithMultipleCookiesWithNoAttribute_Success()
{
WebHeaderCollection w = new WebHeaderCollection();
w.Add(HeaderType, Cookie1NoAttribute + "," + Cookie2NoAttribute + "," + Cookie3NoAttribute + "," + Cookie4NoAttribute);
string[] values = w.GetValues(HeaderType);
Assert.Equal(4, values.Length);
Assert.Equal(Cookie1NoAttribute, values[0]);
Assert.Equal(Cookie2NoAttribute, values[1]);
Assert.Equal(Cookie3NoAttribute, values[2]);
Assert.Equal(Cookie4NoAttribute, values[3]);
}
[Fact]
public void GetValues_InvalidSetCookieHeader_Success()
{
WebHeaderCollection w = new WebHeaderCollection();
w.Add(HeaderType, CookieInvalid);
string[] values = w.GetValues(HeaderType);
Assert.Equal(0, values.Length);
}
[Fact]
public void GetValues_MultipleValuesHeader_Success()
{
WebHeaderCollection w = new WebHeaderCollection();
string headerType = "Accept";
w.Add(headerType, "text/plain, text/html");
string[] values = w.GetValues(headerType);
Assert.Equal(2, values.Length);
Assert.Equal("text/plain", values[0]);
Assert.Equal("text/html", values[1]);
}
[Fact]
public void HttpRequestHeader_Add_Rmemove_Success()
{
WebHeaderCollection w = new WebHeaderCollection();
w.Add(HttpRequestHeader.Warning, "Warning1");
Assert.Equal(1, w.Count);
Assert.Equal("Warning1", w[HttpRequestHeader.Warning]);
Assert.Equal("Warning", w.AllKeys[0]);
w.Remove(HttpRequestHeader.Warning);
Assert.Equal(0, w.Count);
}
[Fact]
public void HttpRequestHeader_Get_Success()
{
WebHeaderCollection w = new WebHeaderCollection();
w.Add("header1", "value1");
w.Add("header1", "value2");
string[] values = w.GetValues(0);
Assert.Equal("value1", values[0]);
Assert.Equal("value2", values[1]);
}
[Fact]
public void HttpRequestHeader_ToByteArray_Success()
{
WebHeaderCollection w = new WebHeaderCollection();
w.Add("header1", "value1");
w.Add("header1", "value2");
byte[] byteArr = w.ToByteArray();
Assert.NotEmpty(byteArr);
}
[Fact]
public void HttpRequestHeader_GetKey_Success()
{
WebHeaderCollection w = new WebHeaderCollection();
w.Add("header1", "value1");
w.Add("header1", "value2");
Assert.NotEmpty(w.GetKey(0));
}
[Fact]
public void HttpRequestHeader_GetValues_Success()
{
WebHeaderCollection w = new WebHeaderCollection();
w.Add("header1", "value1");
Assert.Equal("value1", w.GetValues("header1")[0]);
}
[Fact]
public void HttpRequestHeader_Clear_Success()
{
WebHeaderCollection w = new WebHeaderCollection();
w.Add("header1", "value1");
w.Add("header1", "value2");
w.Clear();
Assert.Equal(0, w.Count);
}
[Fact]
public void HttpRequestHeader_IsRestricted_Success()
{
Assert.True(WebHeaderCollection.IsRestricted("Accept"));
Assert.False(WebHeaderCollection.IsRestricted("Age"));
Assert.False(WebHeaderCollection.IsRestricted("Accept", true));
}
[Fact]
public void HttpRequestHeader_AddHeader_Success()
{
WebHeaderCollection w = new WebHeaderCollection();
w.Add(HttpRequestHeader.ContentLength, "10");
w.Add(HttpRequestHeader.ContentType, "text/html");
Assert.Equal(2,w.Count);
}
[Fact]
public void WebHeaderCollection_Keys_Success()
{
WebHeaderCollection w = new WebHeaderCollection();
w.Add(HttpRequestHeader.ContentLength, "10");
w.Add(HttpRequestHeader.ContentType, "text/html");
Assert.Equal(2, w.Keys.Count);
}
[Fact]
public void HttpRequestHeader_AddHeader_Failure()
{
WebHeaderCollection w = new WebHeaderCollection();
char[] arr = new char[ushort.MaxValue + 1];
string maxStr = new string(arr);
AssertExtensions.Throws<ArgumentException>("value", () => w.Add(HttpRequestHeader.ContentLength,maxStr));
AssertExtensions.Throws<ArgumentException>("value", () => w.Add("ContentLength", maxStr));
}
[Fact]
public void HttpResponseHeader_Set_Success()
{
WebHeaderCollection w = new WebHeaderCollection();
w.Set(HttpResponseHeader.ProxyAuthenticate, "value123");
Assert.Equal("value123", w[HttpResponseHeader.ProxyAuthenticate]);
}
[Fact]
public void HttpRequestHeader_Set_Success()
{
WebHeaderCollection w = new WebHeaderCollection();
w.Set(HttpRequestHeader.Connection, "keep-alive");
Assert.Equal(1, w.Count);
Assert.Equal("keep-alive", w[HttpRequestHeader.Connection]);
Assert.Equal("Connection", w.AllKeys[0]);
}
[Fact]
public void NameValue_Set_Success()
{
WebHeaderCollection w = new WebHeaderCollection();
w.Set("firstName", "first");
Assert.Equal(1, w.Count);
Assert.NotEmpty(w);
Assert.NotEmpty(w.AllKeys);
Assert.Equal(new[] { "firstName" }, w.AllKeys);
Assert.Equal("first", w["firstName"]);
}
[Fact]
public void AddLongString_DoesNotThrow()
{
string longString = new string('a', 65536);
WebHeaderCollection headerCollection = new WebHeaderCollection();
headerCollection.Add("Long-Header", longString);
headerCollection["Long-Header-2"] = longString;
headerCollection.Add(HttpResponseHeader.SetCookie, "someValueToChangeType"); // this will implicitly change _type
headerCollection.Add("Long-Header-3", longString);
headerCollection["Long-Header-4"] = longString;
Assert.Equal(longString, headerCollection["Long-Header"]);
Assert.Equal(longString, headerCollection["Long-Header-2"]);
Assert.Equal(longString, headerCollection["Long-Header-3"]);
Assert.Equal(longString, headerCollection["Long-Header-4"]);
}
}
}
| // Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
using System.Collections;
using System.Collections.Generic;
using Xunit;
namespace System.Net.Tests
{
public partial class WebHeaderCollectionTest
{
[Fact]
public void Ctor_Success()
{
new WebHeaderCollection();
}
[Fact]
public void DefaultPropertyValues_ReturnEmptyAfterConstruction_Success()
{
WebHeaderCollection w = new WebHeaderCollection();
Assert.Equal(0, w.AllKeys.Length);
Assert.Equal(0, w.Count);
Assert.Equal("\r\n", w.ToString());
Assert.Empty(w);
Assert.Empty(w.AllKeys);
}
[Fact]
public void HttpRequestHeader_Add_Success()
{
WebHeaderCollection w = new WebHeaderCollection();
w[HttpRequestHeader.Connection] = "keep-alive";
Assert.Equal(1, w.Count);
Assert.Equal("keep-alive", w[HttpRequestHeader.Connection]);
Assert.Equal("Connection", w.AllKeys[0]);
}
[Theory]
[InlineData((HttpRequestHeader)int.MinValue)]
[InlineData((HttpRequestHeader)(-1))]
[InlineData((HttpRequestHeader)int.MaxValue)]
public void HttpRequestHeader_AddInvalid_Throws(HttpRequestHeader header)
{
WebHeaderCollection w = new WebHeaderCollection();
Assert.Throws<IndexOutOfRangeException>(() => w[header] = "foo");
}
[Theory]
[InlineData((HttpResponseHeader)int.MinValue)]
[InlineData((HttpResponseHeader)(-1))]
[InlineData((HttpResponseHeader)int.MaxValue)]
public void HttpResponseHeader_AddInvalid_Throws(HttpResponseHeader header)
{
WebHeaderCollection w = new WebHeaderCollection();
Assert.Throws<IndexOutOfRangeException>(() => w[header] = "foo");
}
[Fact]
public void CustomHeader_AddQuery_Success()
{
string customHeader = "Custom-Header";
string customValue = "Custom;.-Value";
WebHeaderCollection w = new WebHeaderCollection();
w[customHeader] = customValue;
Assert.Equal(1, w.Count);
Assert.Equal(customValue, w[customHeader]);
Assert.Equal(customHeader, w.AllKeys[0]);
}
[Fact]
public void HttpResponseHeader_AddQuery_CommonHeader_Success()
{
string headerValue = "value123";
WebHeaderCollection w = new WebHeaderCollection();
w[HttpResponseHeader.ProxyAuthenticate] = headerValue;
w[HttpResponseHeader.WwwAuthenticate] = headerValue;
Assert.Equal(headerValue, w[HttpResponseHeader.ProxyAuthenticate]);
Assert.Equal(headerValue, w[HttpResponseHeader.WwwAuthenticate]);
}
[Fact]
public void HttpRequest_AddQuery_CommonHeader_Success()
{
string headerValue = "value123";
WebHeaderCollection w = new WebHeaderCollection();
w[HttpRequestHeader.Accept] = headerValue;
Assert.Equal(headerValue, w[HttpRequestHeader.Accept]);
}
[Fact]
public void RequestThenResponseHeaders_Add_Throws()
{
WebHeaderCollection w = new WebHeaderCollection();
w[HttpRequestHeader.Accept] = "text/json";
Assert.Throws<InvalidOperationException>(() => w[HttpResponseHeader.ContentLength] = "123");
}
[Fact]
public void ResponseThenRequestHeaders_Add_Throws()
{
WebHeaderCollection w = new WebHeaderCollection();
w[HttpResponseHeader.ContentLength] = "123";
Assert.Throws<InvalidOperationException>(() => w[HttpRequestHeader.Accept] = "text/json");
}
[Fact]
public void ResponseHeader_QueryRequest_Throws()
{
WebHeaderCollection w = new WebHeaderCollection();
w[HttpResponseHeader.ContentLength] = "123";
Assert.Throws<InvalidOperationException>(() => w[HttpRequestHeader.Accept]);
}
[Fact]
public void RequestHeader_QueryResponse_Throws()
{
WebHeaderCollection w = new WebHeaderCollection();
w[HttpRequestHeader.Accept] = "text/json";
Assert.Throws<InvalidOperationException>(() => w[HttpResponseHeader.ContentLength]);
}
[Fact]
public void Setter_ValidName_Success()
{
WebHeaderCollection w = new WebHeaderCollection();
w["Accept"] = "text/json";
}
[Theory]
[InlineData(null)]
[InlineData("")]
public void Setter_NullOrEmptyName_Throws(string name)
{
WebHeaderCollection w = new WebHeaderCollection();
AssertExtensions.Throws<ArgumentNullException>("name", () => w[name] = "test");
}
public static object[][] InvalidNames = {
new object[] { "(" },
new object[] { "\u1234" },
new object[] { "\u0019" }
};
[Theory, MemberData(nameof(InvalidNames))]
public void Setter_InvalidName_Throws(string name)
{
WebHeaderCollection w = new WebHeaderCollection();
ArgumentException exception = AssertExtensions.Throws<ArgumentException>("name", () => w[name] = "test");
Assert.Contains(name, exception.Message);
}
public static object[][] InvalidValues = {
new object[] { "value1\rvalue2\r" },
new object[] { "value1\nvalue2\r" },
new object[] { "value1\u007fvalue2" },
new object[] { "value1\r\nvalue2" },
new object[] { "value1\u0019value2" }
};
[Theory, MemberData(nameof(InvalidValues))]
public void Setter_InvalidValue_Throws(string value)
{
WebHeaderCollection w = new WebHeaderCollection();
AssertExtensions.Throws<ArgumentException>("value", () => w["custom"] = value);
}
public static object[][] ValidValues = {
new object[] { null },
new object[] { "" },
new object[] { "value1\r\n" },
new object[] { "value1\tvalue2" },
new object[] { "value1\r\n\tvalue2" },
new object[] { "value1\r\n value2" }
};
[Theory, MemberData(nameof(ValidValues))]
public void Setter_ValidValue_Success(string value)
{
WebHeaderCollection w = new WebHeaderCollection();
w["custom"] = value;
}
[Theory]
[InlineData("name", "name")]
[InlineData("name", "NaMe")]
[InlineData("nAmE", "name")]
public void Setter_SameHeaderTwice_Success(string firstName, string secondName)
{
WebHeaderCollection w = new WebHeaderCollection();
w[firstName] = "first";
w[secondName] = "second";
Assert.Equal(1, w.Count);
Assert.NotEmpty(w);
Assert.NotEmpty(w.AllKeys);
Assert.Equal(new[] { firstName }, w.AllKeys);
Assert.Equal("second", w[firstName]);
Assert.Equal("second", w[secondName]);
}
[Theory]
[InlineData("name")]
[InlineData("nAMe")]
public void Remove_HeaderExists_RemovesFromCollection(string name)
{
var headers = new WebHeaderCollection()
{
{ "name", "value" }
};
headers.Remove(name);
Assert.Empty(headers);
headers.Remove(name);
Assert.Empty(headers);
}
[Theory]
[InlineData(null)]
[InlineData("")]
public void Remove_NullOrEmptyHeader_ThrowsArgumentNullException(string name)
{
var headers = new WebHeaderCollection();
AssertExtensions.Throws<ArgumentNullException>("name", () => headers.Remove(name));
}
[Theory]
[InlineData(" \r \t \n")]
[InlineData(" name ")]
[MemberData(nameof(InvalidValues))]
public void Remove_InvalidHeader_ThrowsArgumentException(string name)
{
var headers = new WebHeaderCollection();
ArgumentException exception = AssertExtensions.Throws<ArgumentException>("name", () => headers.Remove(name));
Assert.Contains(name, exception.Message);
}
[Fact]
public void Remove_IllegalCharacter_Throws()
{
WebHeaderCollection w = new WebHeaderCollection();
AssertExtensions.Throws<ArgumentException>("name", () => w.Remove("{"));
}
[Fact]
public void Remove_EmptyCollection_Success()
{
WebHeaderCollection w = new WebHeaderCollection();
w.Remove("foo");
Assert.Equal(0, w.Count);
Assert.Empty(w);
Assert.Empty(w.AllKeys);
}
[Theory]
[InlineData("name", "name")]
[InlineData("name", "NaMe")]
public void Remove_SetThenRemove_Success(string setName, string removeName)
{
WebHeaderCollection w = new WebHeaderCollection();
w[setName] = "value";
w.Remove(removeName);
Assert.Equal(0, w.Count);
Assert.Empty(w);
Assert.Empty(w.AllKeys);
}
[Theory]
[InlineData("name", "name")]
[InlineData("name", "NaMe")]
public void Remove_SetTwoThenRemoveOne_Success(string setName, string removeName)
{
WebHeaderCollection w = new WebHeaderCollection();
w[setName] = "value";
w["foo"] = "bar";
w.Remove(removeName);
Assert.Equal(1, w.Count);
Assert.NotEmpty(w);
Assert.NotEmpty(w.AllKeys);
Assert.Equal(new[] { "foo" }, w.AllKeys);
Assert.Equal("bar", w["foo"]);
}
[Fact]
public void Getter_EmptyCollection_Success()
{
WebHeaderCollection w = new WebHeaderCollection();
Assert.Null(w["name"]);
Assert.Equal(0, w.Count);
Assert.Empty(w);
Assert.Empty(w.AllKeys);
}
[Fact]
public void Getter_NonEmptyCollectionNonExistentHeader_Success()
{
WebHeaderCollection w = new WebHeaderCollection();
w["name"] = "value";
Assert.Null(w["foo"]);
Assert.Equal(1, w.Count);
Assert.NotEmpty(w);
Assert.NotEmpty(w.AllKeys);
Assert.Equal(new[] { "name" }, w.AllKeys);
Assert.Equal("value", w["name"]);
}
[Fact]
public void Getter_Success()
{
string[] keys = { "Accept", "uPgRaDe", "Custom" };
string[] values = { "text/plain, text/html", " HTTP/2.0 , SHTTP/1.3, , RTA/x11 ", "\"xyzzy\", \"r2d2xxxx\", \"c3piozzzz\"" };
WebHeaderCollection w = new WebHeaderCollection();
for (int i = 0; i < keys.Length; ++i)
{
string key = keys[i];
string value = values[i];
w[key] = value;
}
for (int i = 0; i < keys.Length; ++i)
{
string key = keys[i];
string expected = values[i].Trim();
Assert.Equal(expected, w[key]);
Assert.Equal(expected, w[key.ToUpperInvariant()]);
Assert.Equal(expected, w[key.ToLowerInvariant()]);
}
}
[Fact]
public void ToString_Empty_Success()
{
WebHeaderCollection w = new WebHeaderCollection();
Assert.Equal("\r\n", w.ToString());
}
[Theory]
[InlineData(null)]
[InlineData("")]
public void ToString_SingleHeaderWithEmptyValue_Success(string value)
{
WebHeaderCollection w = new WebHeaderCollection();
w["name"] = value;
Assert.Equal("name: \r\n\r\n", w.ToString());
}
[Fact]
public void ToString_NotEmpty_Success()
{
WebHeaderCollection w = new WebHeaderCollection();
w["Accept"] = "text/plain";
w["Content-Length"] = "123";
Assert.Equal(
"Accept: text/plain\r\nContent-Length: 123\r\n\r\n",
w.ToString());
}
[Fact]
public void IterateCollection_Success()
{
WebHeaderCollection w = new WebHeaderCollection();
w["Accept"] = "text/plain";
w["Content-Length"] = "123";
string result = "";
foreach (var item in w)
{
result += item;
}
Assert.Equal("AcceptContent-Length", result);
}
[Fact]
public void Enumerator_Success()
{
string item1 = "Accept";
string item2 = "Content-Length";
string item3 = "Name";
WebHeaderCollection w = new WebHeaderCollection();
w[item1] = "text/plain";
w[item2] = "123";
w[item3] = "value";
IEnumerable collection = w;
IEnumerator e = collection.GetEnumerator();
for (int i = 0; i < 2; i++)
{
// Not started
Assert.Throws<InvalidOperationException>(() => e.Current);
Assert.True(e.MoveNext());
Assert.Same(item1, e.Current);
Assert.True(e.MoveNext());
Assert.Same(item2, e.Current);
Assert.True(e.MoveNext());
Assert.Same(item3, e.Current);
Assert.False(e.MoveNext());
Assert.False(e.MoveNext());
Assert.False(e.MoveNext());
// Ended
Assert.Throws<InvalidOperationException>(() => e.Current);
e.Reset();
}
}
public static IEnumerable<object[]> SerializeDeserialize_Roundtrip_MemberData()
{
for (int i = 0; i < 10; i++)
{
var wc = new WebHeaderCollection();
for (int j = 0; j < i; j++)
{
wc[$"header{j}"] = $"value{j}";
}
yield return new object[] { wc };
}
}
public static IEnumerable<object[]> Add_Value_TestData()
{
yield return new object[] { null, string.Empty };
yield return new object[] { string.Empty, string.Empty };
yield return new object[] { "VaLue", "VaLue" };
yield return new object[] { " value ", "value" };
// Documentation says this should fail but it does not.
string longString = new string('a', 65536);
yield return new object[] { longString, longString };
}
[Theory]
[MemberData(nameof(Add_Value_TestData))]
public void Add_ValidValue_Success(string value, string expectedValue)
{
var headers = new WebHeaderCollection
{
{ "name", value }
};
Assert.Equal(expectedValue, headers["name"]);
}
[Fact]
public void Add_HeaderAlreadyExists_AppendsValue()
{
var headers = new WebHeaderCollection
{
{ "name", "value1" },
{ "name", null },
{ "name", "value2" },
{ "NAME", "value3" },
{ "name", "" }
};
Assert.Equal("value1,,value2,value3,", headers["name"]);
}
[Fact]
public void Add_NullName_ThrowsArgumentNullException()
{
var headers = new WebHeaderCollection();
AssertExtensions.Throws<ArgumentNullException>("name", () => headers.Add(null, "value"));
}
[Theory]
[InlineData("")]
[InlineData("(")]
[InlineData("\r \t \n")]
[InlineData(" name ")]
[MemberData(nameof(InvalidValues))]
public void Add_InvalidName_ThrowsArgumentException(string name)
{
var headers = new WebHeaderCollection();
ArgumentException exception = AssertExtensions.Throws<ArgumentException>("name", () => headers.Add(name, "value"));
if (!string.IsNullOrEmpty(name))
{
Assert.Contains(name, exception.Message);
}
}
[Theory]
[MemberData(nameof(InvalidValues))]
public void Add_InvalidValue_ThrowsArgumentException(string value)
{
var headers = new WebHeaderCollection();
AssertExtensions.Throws<ArgumentException>("value", () => headers.Add("name", value));
}
[Fact]
public void Add_ValidHeader_AddsToHeaders()
{
var headers = new WebHeaderCollection()
{
"name:value1",
"name:",
"NaMe:value2",
"name: ",
};
Assert.Equal("value1,,value2,", headers["name"]);
}
[Theory]
[InlineData(null)]
[InlineData("")]
public void Add_NullHeader_ThrowsArgumentNullException(string header)
{
var headers = new WebHeaderCollection();
AssertExtensions.Throws<ArgumentNullException>("header", () => headers.Add(header));
}
[Theory]
[InlineData(" \r \t \n", "header")]
[InlineData("nocolon", "header")]
[InlineData(" :value", "name")]
[InlineData("name :value", "name")]
[InlineData("name:va\rlue", "value")]
public void Add_InvalidHeader_ThrowsArgumentException(string header, string paramName)
{
var headers = new WebHeaderCollection();
AssertExtensions.Throws<ArgumentException>(paramName, () => headers.Add(header));
}
private const string HeaderType = "Set-Cookie";
private const string Cookie1 = "locale=en; path=/; expires=Fri, 05 Oct 2018 06:28:57 -0000";
private const string Cookie2 = "uuid=123abc; path=/; expires=Fri, 05 Oct 2018 06:28:57 -0000; secure; HttpOnly";
private const string Cookie3 = "country=US; path=/; expires=Fri, 05 Oct 2018 06:28:57 -0000";
private const string Cookie4 = "m_session=session1; path=/; expires=Sun, 08 Oct 2017 00:28:57 -0000; secure; HttpOnly";
private const string Cookie1NoAttribute = "locale=en";
private const string Cookie2NoAttribute = "uuid=123abc";
private const string Cookie3NoAttribute = "country=US";
private const string Cookie4NoAttribute = "m_session=session1";
private const string CookieInvalid = "helloWorld";
[Fact]
public void GetValues_MultipleSetCookieHeadersWithExpiresAttribute_Success()
{
WebHeaderCollection w = new WebHeaderCollection();
w.Add(HeaderType, Cookie1);
w.Add(HeaderType, Cookie2);
w.Add(HeaderType, Cookie3);
w.Add(HeaderType, Cookie4);
string[] values = w.GetValues(HeaderType);
Assert.Equal(4, values.Length);
Assert.Equal(Cookie1, values[0]);
Assert.Equal(Cookie2, values[1]);
Assert.Equal(Cookie3, values[2]);
Assert.Equal(Cookie4, values[3]);
}
[Fact]
public void GetValues_SingleSetCookieHeaderWithMultipleCookiesWithExpiresAttribute_Success()
{
WebHeaderCollection w = new WebHeaderCollection();
w.Add(HeaderType, Cookie1 + "," + Cookie2 + "," + Cookie3 + "," + Cookie4);
string[] values = w.GetValues(HeaderType);
Assert.Equal(4, values.Length);
Assert.Equal(Cookie1, values[0]);
Assert.Equal(Cookie2, values[1]);
Assert.Equal(Cookie3, values[2]);
Assert.Equal(Cookie4, values[3]);
}
[Fact]
public void GetValues_MultipleSetCookieHeadersWithNoAttribute_Success()
{
WebHeaderCollection w = new WebHeaderCollection();
w.Add(HeaderType, Cookie1NoAttribute);
w.Add(HeaderType, Cookie2NoAttribute);
w.Add(HeaderType, Cookie3NoAttribute);
w.Add(HeaderType, Cookie4NoAttribute);
string[] values = w.GetValues(HeaderType);
Assert.Equal(4, values.Length);
Assert.Equal(Cookie1NoAttribute, values[0]);
Assert.Equal(Cookie2NoAttribute, values[1]);
Assert.Equal(Cookie3NoAttribute, values[2]);
Assert.Equal(Cookie4NoAttribute, values[3]);
}
[Fact]
public void GetValues_SingleSetCookieHeaderWithMultipleCookiesWithNoAttribute_Success()
{
WebHeaderCollection w = new WebHeaderCollection();
w.Add(HeaderType, Cookie1NoAttribute + "," + Cookie2NoAttribute + "," + Cookie3NoAttribute + "," + Cookie4NoAttribute);
string[] values = w.GetValues(HeaderType);
Assert.Equal(4, values.Length);
Assert.Equal(Cookie1NoAttribute, values[0]);
Assert.Equal(Cookie2NoAttribute, values[1]);
Assert.Equal(Cookie3NoAttribute, values[2]);
Assert.Equal(Cookie4NoAttribute, values[3]);
}
[Fact]
public void GetValues_InvalidSetCookieHeader_Success()
{
WebHeaderCollection w = new WebHeaderCollection();
w.Add(HeaderType, CookieInvalid);
string[] values = w.GetValues(HeaderType);
Assert.Equal(0, values.Length);
}
[Fact]
public void GetValues_MultipleValuesHeader_Success()
{
WebHeaderCollection w = new WebHeaderCollection();
string headerType = "Accept";
w.Add(headerType, "text/plain, text/html");
string[] values = w.GetValues(headerType);
Assert.Equal(2, values.Length);
Assert.Equal("text/plain", values[0]);
Assert.Equal("text/html", values[1]);
}
[Fact]
public void HttpRequestHeader_Add_Rmemove_Success()
{
WebHeaderCollection w = new WebHeaderCollection();
w.Add(HttpRequestHeader.Warning, "Warning1");
Assert.Equal(1, w.Count);
Assert.Equal("Warning1", w[HttpRequestHeader.Warning]);
Assert.Equal("Warning", w.AllKeys[0]);
w.Remove(HttpRequestHeader.Warning);
Assert.Equal(0, w.Count);
}
[Fact]
public void HttpRequestHeader_Get_Success()
{
WebHeaderCollection w = new WebHeaderCollection();
w.Add("header1", "value1");
w.Add("header1", "value2");
string[] values = w.GetValues(0);
Assert.Equal("value1", values[0]);
Assert.Equal("value2", values[1]);
}
[Fact]
public void HttpRequestHeader_ToByteArray_Success()
{
WebHeaderCollection w = new WebHeaderCollection();
w.Add("header1", "value1");
w.Add("header1", "value2");
byte[] byteArr = w.ToByteArray();
Assert.NotEmpty(byteArr);
}
[Fact]
public void HttpRequestHeader_GetKey_Success()
{
WebHeaderCollection w = new WebHeaderCollection();
w.Add("header1", "value1");
w.Add("header1", "value2");
Assert.NotEmpty(w.GetKey(0));
}
[Fact]
public void HttpRequestHeader_GetValues_Success()
{
WebHeaderCollection w = new WebHeaderCollection();
w.Add("header1", "value1");
Assert.Equal("value1", w.GetValues("header1")[0]);
}
[Fact]
public void HttpRequestHeader_Clear_Success()
{
WebHeaderCollection w = new WebHeaderCollection();
w.Add("header1", "value1");
w.Add("header1", "value2");
w.Clear();
Assert.Equal(0, w.Count);
}
[Fact]
public void HttpRequestHeader_IsRestricted_Success()
{
Assert.True(WebHeaderCollection.IsRestricted("Accept"));
Assert.False(WebHeaderCollection.IsRestricted("Age"));
Assert.False(WebHeaderCollection.IsRestricted("Accept", true));
}
[Fact]
public void HttpRequestHeader_AddHeader_Success()
{
WebHeaderCollection w = new WebHeaderCollection();
w.Add(HttpRequestHeader.ContentLength, "10");
w.Add(HttpRequestHeader.ContentType, "text/html");
Assert.Equal(2,w.Count);
}
[Fact]
public void WebHeaderCollection_Keys_Success()
{
WebHeaderCollection w = new WebHeaderCollection();
w.Add(HttpRequestHeader.ContentLength, "10");
w.Add(HttpRequestHeader.ContentType, "text/html");
Assert.Equal(2, w.Keys.Count);
}
[Fact]
public void HttpRequestHeader_AddHeader_Failure()
{
WebHeaderCollection w = new WebHeaderCollection();
char[] arr = new char[ushort.MaxValue + 1];
string maxStr = new string(arr);
AssertExtensions.Throws<ArgumentException>("value", () => w.Add(HttpRequestHeader.ContentLength,maxStr));
AssertExtensions.Throws<ArgumentException>("value", () => w.Add("ContentLength", maxStr));
}
[Fact]
public void HttpResponseHeader_Set_Success()
{
WebHeaderCollection w = new WebHeaderCollection();
w.Set(HttpResponseHeader.ProxyAuthenticate, "value123");
Assert.Equal("value123", w[HttpResponseHeader.ProxyAuthenticate]);
}
[Fact]
public void HttpRequestHeader_Set_Success()
{
WebHeaderCollection w = new WebHeaderCollection();
w.Set(HttpRequestHeader.Connection, "keep-alive");
Assert.Equal(1, w.Count);
Assert.Equal("keep-alive", w[HttpRequestHeader.Connection]);
Assert.Equal("Connection", w.AllKeys[0]);
}
[Fact]
public void NameValue_Set_Success()
{
WebHeaderCollection w = new WebHeaderCollection();
w.Set("firstName", "first");
Assert.Equal(1, w.Count);
Assert.NotEmpty(w);
Assert.NotEmpty(w.AllKeys);
Assert.Equal(new[] { "firstName" }, w.AllKeys);
Assert.Equal("first", w["firstName"]);
}
[Fact]
public void AddLongString_DoesNotThrow()
{
string longString = new string('a', 65536);
WebHeaderCollection headerCollection = new WebHeaderCollection();
headerCollection.Add("Long-Header", longString);
headerCollection["Long-Header-2"] = longString;
headerCollection.Add(HttpResponseHeader.SetCookie, "someValueToChangeType"); // this will implicitly change _type
headerCollection.Add("Long-Header-3", longString);
headerCollection["Long-Header-4"] = longString;
Assert.Equal(longString, headerCollection["Long-Header"]);
Assert.Equal(longString, headerCollection["Long-Header-2"]);
Assert.Equal(longString, headerCollection["Long-Header-3"]);
Assert.Equal(longString, headerCollection["Long-Header-4"]);
}
}
}
| 1 |
dotnet/runtime | 66,250 | use the name of the header when throwing exception for invalid chars | Fixes #65136 | pedrobsaila | 2022-03-05T20:18:11Z | 2022-03-08T16:15:18Z | 8fe2d2b0bbb7bec841c97372528531c071b29c16 | fa5dcda23c681031b5d2bd68482702baef80b836 | use the name of the header when throwing exception for invalid chars. Fixes #65136 | ./src/tests/JIT/HardwareIntrinsics/Arm/AdvSimd/OrNot.Vector64.UInt16.cs | // Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
/******************************************************************************
* This file is auto-generated from a template file by the GenerateTests.csx *
* script in tests\src\JIT\HardwareIntrinsics.Arm\Shared. In order to make *
* changes, please update the corresponding template and run according to the *
* directions listed in the file. *
******************************************************************************/
using System;
using System.Runtime.CompilerServices;
using System.Runtime.InteropServices;
using System.Runtime.Intrinsics;
using System.Runtime.Intrinsics.Arm;
namespace JIT.HardwareIntrinsics.Arm
{
public static partial class Program
{
private static void OrNot_Vector64_UInt16()
{
var test = new SimpleBinaryOpTest__OrNot_Vector64_UInt16();
if (test.IsSupported)
{
// Validates basic functionality works, using Unsafe.Read
test.RunBasicScenario_UnsafeRead();
if (AdvSimd.IsSupported)
{
// Validates basic functionality works, using Load
test.RunBasicScenario_Load();
}
// Validates calling via reflection works, using Unsafe.Read
test.RunReflectionScenario_UnsafeRead();
if (AdvSimd.IsSupported)
{
// Validates calling via reflection works, using Load
test.RunReflectionScenario_Load();
}
// Validates passing a static member works
test.RunClsVarScenario();
if (AdvSimd.IsSupported)
{
// Validates passing a static member works, using pinning and Load
test.RunClsVarScenario_Load();
}
// Validates passing a local works, using Unsafe.Read
test.RunLclVarScenario_UnsafeRead();
if (AdvSimd.IsSupported)
{
// Validates passing a local works, using Load
test.RunLclVarScenario_Load();
}
// Validates passing the field of a local class works
test.RunClassLclFldScenario();
if (AdvSimd.IsSupported)
{
// Validates passing the field of a local class works, using pinning and Load
test.RunClassLclFldScenario_Load();
}
// Validates passing an instance member of a class works
test.RunClassFldScenario();
if (AdvSimd.IsSupported)
{
// Validates passing an instance member of a class works, using pinning and Load
test.RunClassFldScenario_Load();
}
// Validates passing the field of a local struct works
test.RunStructLclFldScenario();
if (AdvSimd.IsSupported)
{
// Validates passing the field of a local struct works, using pinning and Load
test.RunStructLclFldScenario_Load();
}
// Validates passing an instance member of a struct works
test.RunStructFldScenario();
if (AdvSimd.IsSupported)
{
// Validates passing an instance member of a struct works, using pinning and Load
test.RunStructFldScenario_Load();
}
}
else
{
// Validates we throw on unsupported hardware
test.RunUnsupportedScenario();
}
if (!test.Succeeded)
{
throw new Exception("One or more scenarios did not complete as expected.");
}
}
}
public sealed unsafe class SimpleBinaryOpTest__OrNot_Vector64_UInt16
{
private struct DataTable
{
private byte[] inArray1;
private byte[] inArray2;
private byte[] outArray;
private GCHandle inHandle1;
private GCHandle inHandle2;
private GCHandle outHandle;
private ulong alignment;
public DataTable(UInt16[] inArray1, UInt16[] inArray2, UInt16[] outArray, int alignment)
{
int sizeOfinArray1 = inArray1.Length * Unsafe.SizeOf<UInt16>();
int sizeOfinArray2 = inArray2.Length * Unsafe.SizeOf<UInt16>();
int sizeOfoutArray = outArray.Length * Unsafe.SizeOf<UInt16>();
if ((alignment != 16 && alignment != 8) || (alignment * 2) < sizeOfinArray1 || (alignment * 2) < sizeOfinArray2 || (alignment * 2) < sizeOfoutArray)
{
throw new ArgumentException("Invalid value of alignment");
}
this.inArray1 = new byte[alignment * 2];
this.inArray2 = new byte[alignment * 2];
this.outArray = new byte[alignment * 2];
this.inHandle1 = GCHandle.Alloc(this.inArray1, GCHandleType.Pinned);
this.inHandle2 = GCHandle.Alloc(this.inArray2, GCHandleType.Pinned);
this.outHandle = GCHandle.Alloc(this.outArray, GCHandleType.Pinned);
this.alignment = (ulong)alignment;
Unsafe.CopyBlockUnaligned(ref Unsafe.AsRef<byte>(inArray1Ptr), ref Unsafe.As<UInt16, byte>(ref inArray1[0]), (uint)sizeOfinArray1);
Unsafe.CopyBlockUnaligned(ref Unsafe.AsRef<byte>(inArray2Ptr), ref Unsafe.As<UInt16, byte>(ref inArray2[0]), (uint)sizeOfinArray2);
}
public void* inArray1Ptr => Align((byte*)(inHandle1.AddrOfPinnedObject().ToPointer()), alignment);
public void* inArray2Ptr => Align((byte*)(inHandle2.AddrOfPinnedObject().ToPointer()), alignment);
public void* outArrayPtr => Align((byte*)(outHandle.AddrOfPinnedObject().ToPointer()), alignment);
public void Dispose()
{
inHandle1.Free();
inHandle2.Free();
outHandle.Free();
}
private static unsafe void* Align(byte* buffer, ulong expectedAlignment)
{
return (void*)(((ulong)buffer + expectedAlignment - 1) & ~(expectedAlignment - 1));
}
}
private struct TestStruct
{
public Vector64<UInt16> _fld1;
public Vector64<UInt16> _fld2;
public static TestStruct Create()
{
var testStruct = new TestStruct();
for (var i = 0; i < Op1ElementCount; i++) { _data1[i] = TestLibrary.Generator.GetUInt16(); }
Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector64<UInt16>, byte>(ref testStruct._fld1), ref Unsafe.As<UInt16, byte>(ref _data1[0]), (uint)Unsafe.SizeOf<Vector64<UInt16>>());
for (var i = 0; i < Op2ElementCount; i++) { _data2[i] = TestLibrary.Generator.GetUInt16(); }
Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector64<UInt16>, byte>(ref testStruct._fld2), ref Unsafe.As<UInt16, byte>(ref _data2[0]), (uint)Unsafe.SizeOf<Vector64<UInt16>>());
return testStruct;
}
public void RunStructFldScenario(SimpleBinaryOpTest__OrNot_Vector64_UInt16 testClass)
{
var result = AdvSimd.OrNot(_fld1, _fld2);
Unsafe.Write(testClass._dataTable.outArrayPtr, result);
testClass.ValidateResult(_fld1, _fld2, testClass._dataTable.outArrayPtr);
}
public void RunStructFldScenario_Load(SimpleBinaryOpTest__OrNot_Vector64_UInt16 testClass)
{
fixed (Vector64<UInt16>* pFld1 = &_fld1)
fixed (Vector64<UInt16>* pFld2 = &_fld2)
{
var result = AdvSimd.OrNot(
AdvSimd.LoadVector64((UInt16*)(pFld1)),
AdvSimd.LoadVector64((UInt16*)(pFld2))
);
Unsafe.Write(testClass._dataTable.outArrayPtr, result);
testClass.ValidateResult(_fld1, _fld2, testClass._dataTable.outArrayPtr);
}
}
}
private static readonly int LargestVectorSize = 8;
private static readonly int Op1ElementCount = Unsafe.SizeOf<Vector64<UInt16>>() / sizeof(UInt16);
private static readonly int Op2ElementCount = Unsafe.SizeOf<Vector64<UInt16>>() / sizeof(UInt16);
private static readonly int RetElementCount = Unsafe.SizeOf<Vector64<UInt16>>() / sizeof(UInt16);
private static UInt16[] _data1 = new UInt16[Op1ElementCount];
private static UInt16[] _data2 = new UInt16[Op2ElementCount];
private static Vector64<UInt16> _clsVar1;
private static Vector64<UInt16> _clsVar2;
private Vector64<UInt16> _fld1;
private Vector64<UInt16> _fld2;
private DataTable _dataTable;
static SimpleBinaryOpTest__OrNot_Vector64_UInt16()
{
for (var i = 0; i < Op1ElementCount; i++) { _data1[i] = TestLibrary.Generator.GetUInt16(); }
Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector64<UInt16>, byte>(ref _clsVar1), ref Unsafe.As<UInt16, byte>(ref _data1[0]), (uint)Unsafe.SizeOf<Vector64<UInt16>>());
for (var i = 0; i < Op2ElementCount; i++) { _data2[i] = TestLibrary.Generator.GetUInt16(); }
Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector64<UInt16>, byte>(ref _clsVar2), ref Unsafe.As<UInt16, byte>(ref _data2[0]), (uint)Unsafe.SizeOf<Vector64<UInt16>>());
}
public SimpleBinaryOpTest__OrNot_Vector64_UInt16()
{
Succeeded = true;
for (var i = 0; i < Op1ElementCount; i++) { _data1[i] = TestLibrary.Generator.GetUInt16(); }
Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector64<UInt16>, byte>(ref _fld1), ref Unsafe.As<UInt16, byte>(ref _data1[0]), (uint)Unsafe.SizeOf<Vector64<UInt16>>());
for (var i = 0; i < Op2ElementCount; i++) { _data2[i] = TestLibrary.Generator.GetUInt16(); }
Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector64<UInt16>, byte>(ref _fld2), ref Unsafe.As<UInt16, byte>(ref _data2[0]), (uint)Unsafe.SizeOf<Vector64<UInt16>>());
for (var i = 0; i < Op1ElementCount; i++) { _data1[i] = TestLibrary.Generator.GetUInt16(); }
for (var i = 0; i < Op2ElementCount; i++) { _data2[i] = TestLibrary.Generator.GetUInt16(); }
_dataTable = new DataTable(_data1, _data2, new UInt16[RetElementCount], LargestVectorSize);
}
public bool IsSupported => AdvSimd.IsSupported;
public bool Succeeded { get; set; }
public void RunBasicScenario_UnsafeRead()
{
TestLibrary.TestFramework.BeginScenario(nameof(RunBasicScenario_UnsafeRead));
var result = AdvSimd.OrNot(
Unsafe.Read<Vector64<UInt16>>(_dataTable.inArray1Ptr),
Unsafe.Read<Vector64<UInt16>>(_dataTable.inArray2Ptr)
);
Unsafe.Write(_dataTable.outArrayPtr, result);
ValidateResult(_dataTable.inArray1Ptr, _dataTable.inArray2Ptr, _dataTable.outArrayPtr);
}
public void RunBasicScenario_Load()
{
TestLibrary.TestFramework.BeginScenario(nameof(RunBasicScenario_Load));
var result = AdvSimd.OrNot(
AdvSimd.LoadVector64((UInt16*)(_dataTable.inArray1Ptr)),
AdvSimd.LoadVector64((UInt16*)(_dataTable.inArray2Ptr))
);
Unsafe.Write(_dataTable.outArrayPtr, result);
ValidateResult(_dataTable.inArray1Ptr, _dataTable.inArray2Ptr, _dataTable.outArrayPtr);
}
public void RunReflectionScenario_UnsafeRead()
{
TestLibrary.TestFramework.BeginScenario(nameof(RunReflectionScenario_UnsafeRead));
var result = typeof(AdvSimd).GetMethod(nameof(AdvSimd.OrNot), new Type[] { typeof(Vector64<UInt16>), typeof(Vector64<UInt16>) })
.Invoke(null, new object[] {
Unsafe.Read<Vector64<UInt16>>(_dataTable.inArray1Ptr),
Unsafe.Read<Vector64<UInt16>>(_dataTable.inArray2Ptr)
});
Unsafe.Write(_dataTable.outArrayPtr, (Vector64<UInt16>)(result));
ValidateResult(_dataTable.inArray1Ptr, _dataTable.inArray2Ptr, _dataTable.outArrayPtr);
}
public void RunReflectionScenario_Load()
{
TestLibrary.TestFramework.BeginScenario(nameof(RunReflectionScenario_Load));
var result = typeof(AdvSimd).GetMethod(nameof(AdvSimd.OrNot), new Type[] { typeof(Vector64<UInt16>), typeof(Vector64<UInt16>) })
.Invoke(null, new object[] {
AdvSimd.LoadVector64((UInt16*)(_dataTable.inArray1Ptr)),
AdvSimd.LoadVector64((UInt16*)(_dataTable.inArray2Ptr))
});
Unsafe.Write(_dataTable.outArrayPtr, (Vector64<UInt16>)(result));
ValidateResult(_dataTable.inArray1Ptr, _dataTable.inArray2Ptr, _dataTable.outArrayPtr);
}
public void RunClsVarScenario()
{
TestLibrary.TestFramework.BeginScenario(nameof(RunClsVarScenario));
var result = AdvSimd.OrNot(
_clsVar1,
_clsVar2
);
Unsafe.Write(_dataTable.outArrayPtr, result);
ValidateResult(_clsVar1, _clsVar2, _dataTable.outArrayPtr);
}
public void RunClsVarScenario_Load()
{
TestLibrary.TestFramework.BeginScenario(nameof(RunClsVarScenario_Load));
fixed (Vector64<UInt16>* pClsVar1 = &_clsVar1)
fixed (Vector64<UInt16>* pClsVar2 = &_clsVar2)
{
var result = AdvSimd.OrNot(
AdvSimd.LoadVector64((UInt16*)(pClsVar1)),
AdvSimd.LoadVector64((UInt16*)(pClsVar2))
);
Unsafe.Write(_dataTable.outArrayPtr, result);
ValidateResult(_clsVar1, _clsVar2, _dataTable.outArrayPtr);
}
}
public void RunLclVarScenario_UnsafeRead()
{
TestLibrary.TestFramework.BeginScenario(nameof(RunLclVarScenario_UnsafeRead));
var op1 = Unsafe.Read<Vector64<UInt16>>(_dataTable.inArray1Ptr);
var op2 = Unsafe.Read<Vector64<UInt16>>(_dataTable.inArray2Ptr);
var result = AdvSimd.OrNot(op1, op2);
Unsafe.Write(_dataTable.outArrayPtr, result);
ValidateResult(op1, op2, _dataTable.outArrayPtr);
}
public void RunLclVarScenario_Load()
{
TestLibrary.TestFramework.BeginScenario(nameof(RunLclVarScenario_Load));
var op1 = AdvSimd.LoadVector64((UInt16*)(_dataTable.inArray1Ptr));
var op2 = AdvSimd.LoadVector64((UInt16*)(_dataTable.inArray2Ptr));
var result = AdvSimd.OrNot(op1, op2);
Unsafe.Write(_dataTable.outArrayPtr, result);
ValidateResult(op1, op2, _dataTable.outArrayPtr);
}
public void RunClassLclFldScenario()
{
TestLibrary.TestFramework.BeginScenario(nameof(RunClassLclFldScenario));
var test = new SimpleBinaryOpTest__OrNot_Vector64_UInt16();
var result = AdvSimd.OrNot(test._fld1, test._fld2);
Unsafe.Write(_dataTable.outArrayPtr, result);
ValidateResult(test._fld1, test._fld2, _dataTable.outArrayPtr);
}
public void RunClassLclFldScenario_Load()
{
TestLibrary.TestFramework.BeginScenario(nameof(RunClassLclFldScenario_Load));
var test = new SimpleBinaryOpTest__OrNot_Vector64_UInt16();
fixed (Vector64<UInt16>* pFld1 = &test._fld1)
fixed (Vector64<UInt16>* pFld2 = &test._fld2)
{
var result = AdvSimd.OrNot(
AdvSimd.LoadVector64((UInt16*)(pFld1)),
AdvSimd.LoadVector64((UInt16*)(pFld2))
);
Unsafe.Write(_dataTable.outArrayPtr, result);
ValidateResult(test._fld1, test._fld2, _dataTable.outArrayPtr);
}
}
public void RunClassFldScenario()
{
TestLibrary.TestFramework.BeginScenario(nameof(RunClassFldScenario));
var result = AdvSimd.OrNot(_fld1, _fld2);
Unsafe.Write(_dataTable.outArrayPtr, result);
ValidateResult(_fld1, _fld2, _dataTable.outArrayPtr);
}
public void RunClassFldScenario_Load()
{
TestLibrary.TestFramework.BeginScenario(nameof(RunClassFldScenario_Load));
fixed (Vector64<UInt16>* pFld1 = &_fld1)
fixed (Vector64<UInt16>* pFld2 = &_fld2)
{
var result = AdvSimd.OrNot(
AdvSimd.LoadVector64((UInt16*)(pFld1)),
AdvSimd.LoadVector64((UInt16*)(pFld2))
);
Unsafe.Write(_dataTable.outArrayPtr, result);
ValidateResult(_fld1, _fld2, _dataTable.outArrayPtr);
}
}
public void RunStructLclFldScenario()
{
TestLibrary.TestFramework.BeginScenario(nameof(RunStructLclFldScenario));
var test = TestStruct.Create();
var result = AdvSimd.OrNot(test._fld1, test._fld2);
Unsafe.Write(_dataTable.outArrayPtr, result);
ValidateResult(test._fld1, test._fld2, _dataTable.outArrayPtr);
}
public void RunStructLclFldScenario_Load()
{
TestLibrary.TestFramework.BeginScenario(nameof(RunStructLclFldScenario_Load));
var test = TestStruct.Create();
var result = AdvSimd.OrNot(
AdvSimd.LoadVector64((UInt16*)(&test._fld1)),
AdvSimd.LoadVector64((UInt16*)(&test._fld2))
);
Unsafe.Write(_dataTable.outArrayPtr, result);
ValidateResult(test._fld1, test._fld2, _dataTable.outArrayPtr);
}
public void RunStructFldScenario()
{
TestLibrary.TestFramework.BeginScenario(nameof(RunStructFldScenario));
var test = TestStruct.Create();
test.RunStructFldScenario(this);
}
public void RunStructFldScenario_Load()
{
TestLibrary.TestFramework.BeginScenario(nameof(RunStructFldScenario_Load));
var test = TestStruct.Create();
test.RunStructFldScenario_Load(this);
}
public void RunUnsupportedScenario()
{
TestLibrary.TestFramework.BeginScenario(nameof(RunUnsupportedScenario));
bool succeeded = false;
try
{
RunBasicScenario_UnsafeRead();
}
catch (PlatformNotSupportedException)
{
succeeded = true;
}
if (!succeeded)
{
Succeeded = false;
}
}
private void ValidateResult(Vector64<UInt16> op1, Vector64<UInt16> op2, void* result, [CallerMemberName] string method = "")
{
UInt16[] inArray1 = new UInt16[Op1ElementCount];
UInt16[] inArray2 = new UInt16[Op2ElementCount];
UInt16[] outArray = new UInt16[RetElementCount];
Unsafe.WriteUnaligned(ref Unsafe.As<UInt16, byte>(ref inArray1[0]), op1);
Unsafe.WriteUnaligned(ref Unsafe.As<UInt16, byte>(ref inArray2[0]), op2);
Unsafe.CopyBlockUnaligned(ref Unsafe.As<UInt16, byte>(ref outArray[0]), ref Unsafe.AsRef<byte>(result), (uint)Unsafe.SizeOf<Vector64<UInt16>>());
ValidateResult(inArray1, inArray2, outArray, method);
}
private void ValidateResult(void* op1, void* op2, void* result, [CallerMemberName] string method = "")
{
UInt16[] inArray1 = new UInt16[Op1ElementCount];
UInt16[] inArray2 = new UInt16[Op2ElementCount];
UInt16[] outArray = new UInt16[RetElementCount];
Unsafe.CopyBlockUnaligned(ref Unsafe.As<UInt16, byte>(ref inArray1[0]), ref Unsafe.AsRef<byte>(op1), (uint)Unsafe.SizeOf<Vector64<UInt16>>());
Unsafe.CopyBlockUnaligned(ref Unsafe.As<UInt16, byte>(ref inArray2[0]), ref Unsafe.AsRef<byte>(op2), (uint)Unsafe.SizeOf<Vector64<UInt16>>());
Unsafe.CopyBlockUnaligned(ref Unsafe.As<UInt16, byte>(ref outArray[0]), ref Unsafe.AsRef<byte>(result), (uint)Unsafe.SizeOf<Vector64<UInt16>>());
ValidateResult(inArray1, inArray2, outArray, method);
}
private void ValidateResult(UInt16[] left, UInt16[] right, UInt16[] result, [CallerMemberName] string method = "")
{
bool succeeded = true;
for (var i = 0; i < RetElementCount; i++)
{
if (Helpers.OrNot(left[i], right[i]) != result[i])
{
succeeded = false;
break;
}
}
if (!succeeded)
{
TestLibrary.TestFramework.LogInformation($"{nameof(AdvSimd)}.{nameof(AdvSimd.OrNot)}<UInt16>(Vector64<UInt16>, Vector64<UInt16>): {method} failed:");
TestLibrary.TestFramework.LogInformation($" left: ({string.Join(", ", left)})");
TestLibrary.TestFramework.LogInformation($" right: ({string.Join(", ", right)})");
TestLibrary.TestFramework.LogInformation($" result: ({string.Join(", ", result)})");
TestLibrary.TestFramework.LogInformation(string.Empty);
Succeeded = false;
}
}
}
}
| // Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
/******************************************************************************
* This file is auto-generated from a template file by the GenerateTests.csx *
* script in tests\src\JIT\HardwareIntrinsics.Arm\Shared. In order to make *
* changes, please update the corresponding template and run according to the *
* directions listed in the file. *
******************************************************************************/
using System;
using System.Runtime.CompilerServices;
using System.Runtime.InteropServices;
using System.Runtime.Intrinsics;
using System.Runtime.Intrinsics.Arm;
namespace JIT.HardwareIntrinsics.Arm
{
public static partial class Program
{
private static void OrNot_Vector64_UInt16()
{
var test = new SimpleBinaryOpTest__OrNot_Vector64_UInt16();
if (test.IsSupported)
{
// Validates basic functionality works, using Unsafe.Read
test.RunBasicScenario_UnsafeRead();
if (AdvSimd.IsSupported)
{
// Validates basic functionality works, using Load
test.RunBasicScenario_Load();
}
// Validates calling via reflection works, using Unsafe.Read
test.RunReflectionScenario_UnsafeRead();
if (AdvSimd.IsSupported)
{
// Validates calling via reflection works, using Load
test.RunReflectionScenario_Load();
}
// Validates passing a static member works
test.RunClsVarScenario();
if (AdvSimd.IsSupported)
{
// Validates passing a static member works, using pinning and Load
test.RunClsVarScenario_Load();
}
// Validates passing a local works, using Unsafe.Read
test.RunLclVarScenario_UnsafeRead();
if (AdvSimd.IsSupported)
{
// Validates passing a local works, using Load
test.RunLclVarScenario_Load();
}
// Validates passing the field of a local class works
test.RunClassLclFldScenario();
if (AdvSimd.IsSupported)
{
// Validates passing the field of a local class works, using pinning and Load
test.RunClassLclFldScenario_Load();
}
// Validates passing an instance member of a class works
test.RunClassFldScenario();
if (AdvSimd.IsSupported)
{
// Validates passing an instance member of a class works, using pinning and Load
test.RunClassFldScenario_Load();
}
// Validates passing the field of a local struct works
test.RunStructLclFldScenario();
if (AdvSimd.IsSupported)
{
// Validates passing the field of a local struct works, using pinning and Load
test.RunStructLclFldScenario_Load();
}
// Validates passing an instance member of a struct works
test.RunStructFldScenario();
if (AdvSimd.IsSupported)
{
// Validates passing an instance member of a struct works, using pinning and Load
test.RunStructFldScenario_Load();
}
}
else
{
// Validates we throw on unsupported hardware
test.RunUnsupportedScenario();
}
if (!test.Succeeded)
{
throw new Exception("One or more scenarios did not complete as expected.");
}
}
}
public sealed unsafe class SimpleBinaryOpTest__OrNot_Vector64_UInt16
{
private struct DataTable
{
private byte[] inArray1;
private byte[] inArray2;
private byte[] outArray;
private GCHandle inHandle1;
private GCHandle inHandle2;
private GCHandle outHandle;
private ulong alignment;
public DataTable(UInt16[] inArray1, UInt16[] inArray2, UInt16[] outArray, int alignment)
{
int sizeOfinArray1 = inArray1.Length * Unsafe.SizeOf<UInt16>();
int sizeOfinArray2 = inArray2.Length * Unsafe.SizeOf<UInt16>();
int sizeOfoutArray = outArray.Length * Unsafe.SizeOf<UInt16>();
if ((alignment != 16 && alignment != 8) || (alignment * 2) < sizeOfinArray1 || (alignment * 2) < sizeOfinArray2 || (alignment * 2) < sizeOfoutArray)
{
throw new ArgumentException("Invalid value of alignment");
}
this.inArray1 = new byte[alignment * 2];
this.inArray2 = new byte[alignment * 2];
this.outArray = new byte[alignment * 2];
this.inHandle1 = GCHandle.Alloc(this.inArray1, GCHandleType.Pinned);
this.inHandle2 = GCHandle.Alloc(this.inArray2, GCHandleType.Pinned);
this.outHandle = GCHandle.Alloc(this.outArray, GCHandleType.Pinned);
this.alignment = (ulong)alignment;
Unsafe.CopyBlockUnaligned(ref Unsafe.AsRef<byte>(inArray1Ptr), ref Unsafe.As<UInt16, byte>(ref inArray1[0]), (uint)sizeOfinArray1);
Unsafe.CopyBlockUnaligned(ref Unsafe.AsRef<byte>(inArray2Ptr), ref Unsafe.As<UInt16, byte>(ref inArray2[0]), (uint)sizeOfinArray2);
}
public void* inArray1Ptr => Align((byte*)(inHandle1.AddrOfPinnedObject().ToPointer()), alignment);
public void* inArray2Ptr => Align((byte*)(inHandle2.AddrOfPinnedObject().ToPointer()), alignment);
public void* outArrayPtr => Align((byte*)(outHandle.AddrOfPinnedObject().ToPointer()), alignment);
public void Dispose()
{
inHandle1.Free();
inHandle2.Free();
outHandle.Free();
}
private static unsafe void* Align(byte* buffer, ulong expectedAlignment)
{
return (void*)(((ulong)buffer + expectedAlignment - 1) & ~(expectedAlignment - 1));
}
}
private struct TestStruct
{
public Vector64<UInt16> _fld1;
public Vector64<UInt16> _fld2;
public static TestStruct Create()
{
var testStruct = new TestStruct();
for (var i = 0; i < Op1ElementCount; i++) { _data1[i] = TestLibrary.Generator.GetUInt16(); }
Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector64<UInt16>, byte>(ref testStruct._fld1), ref Unsafe.As<UInt16, byte>(ref _data1[0]), (uint)Unsafe.SizeOf<Vector64<UInt16>>());
for (var i = 0; i < Op2ElementCount; i++) { _data2[i] = TestLibrary.Generator.GetUInt16(); }
Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector64<UInt16>, byte>(ref testStruct._fld2), ref Unsafe.As<UInt16, byte>(ref _data2[0]), (uint)Unsafe.SizeOf<Vector64<UInt16>>());
return testStruct;
}
public void RunStructFldScenario(SimpleBinaryOpTest__OrNot_Vector64_UInt16 testClass)
{
var result = AdvSimd.OrNot(_fld1, _fld2);
Unsafe.Write(testClass._dataTable.outArrayPtr, result);
testClass.ValidateResult(_fld1, _fld2, testClass._dataTable.outArrayPtr);
}
public void RunStructFldScenario_Load(SimpleBinaryOpTest__OrNot_Vector64_UInt16 testClass)
{
fixed (Vector64<UInt16>* pFld1 = &_fld1)
fixed (Vector64<UInt16>* pFld2 = &_fld2)
{
var result = AdvSimd.OrNot(
AdvSimd.LoadVector64((UInt16*)(pFld1)),
AdvSimd.LoadVector64((UInt16*)(pFld2))
);
Unsafe.Write(testClass._dataTable.outArrayPtr, result);
testClass.ValidateResult(_fld1, _fld2, testClass._dataTable.outArrayPtr);
}
}
}
private static readonly int LargestVectorSize = 8;
private static readonly int Op1ElementCount = Unsafe.SizeOf<Vector64<UInt16>>() / sizeof(UInt16);
private static readonly int Op2ElementCount = Unsafe.SizeOf<Vector64<UInt16>>() / sizeof(UInt16);
private static readonly int RetElementCount = Unsafe.SizeOf<Vector64<UInt16>>() / sizeof(UInt16);
private static UInt16[] _data1 = new UInt16[Op1ElementCount];
private static UInt16[] _data2 = new UInt16[Op2ElementCount];
private static Vector64<UInt16> _clsVar1;
private static Vector64<UInt16> _clsVar2;
private Vector64<UInt16> _fld1;
private Vector64<UInt16> _fld2;
private DataTable _dataTable;
static SimpleBinaryOpTest__OrNot_Vector64_UInt16()
{
for (var i = 0; i < Op1ElementCount; i++) { _data1[i] = TestLibrary.Generator.GetUInt16(); }
Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector64<UInt16>, byte>(ref _clsVar1), ref Unsafe.As<UInt16, byte>(ref _data1[0]), (uint)Unsafe.SizeOf<Vector64<UInt16>>());
for (var i = 0; i < Op2ElementCount; i++) { _data2[i] = TestLibrary.Generator.GetUInt16(); }
Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector64<UInt16>, byte>(ref _clsVar2), ref Unsafe.As<UInt16, byte>(ref _data2[0]), (uint)Unsafe.SizeOf<Vector64<UInt16>>());
}
public SimpleBinaryOpTest__OrNot_Vector64_UInt16()
{
Succeeded = true;
for (var i = 0; i < Op1ElementCount; i++) { _data1[i] = TestLibrary.Generator.GetUInt16(); }
Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector64<UInt16>, byte>(ref _fld1), ref Unsafe.As<UInt16, byte>(ref _data1[0]), (uint)Unsafe.SizeOf<Vector64<UInt16>>());
for (var i = 0; i < Op2ElementCount; i++) { _data2[i] = TestLibrary.Generator.GetUInt16(); }
Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector64<UInt16>, byte>(ref _fld2), ref Unsafe.As<UInt16, byte>(ref _data2[0]), (uint)Unsafe.SizeOf<Vector64<UInt16>>());
for (var i = 0; i < Op1ElementCount; i++) { _data1[i] = TestLibrary.Generator.GetUInt16(); }
for (var i = 0; i < Op2ElementCount; i++) { _data2[i] = TestLibrary.Generator.GetUInt16(); }
_dataTable = new DataTable(_data1, _data2, new UInt16[RetElementCount], LargestVectorSize);
}
public bool IsSupported => AdvSimd.IsSupported;
public bool Succeeded { get; set; }
public void RunBasicScenario_UnsafeRead()
{
TestLibrary.TestFramework.BeginScenario(nameof(RunBasicScenario_UnsafeRead));
var result = AdvSimd.OrNot(
Unsafe.Read<Vector64<UInt16>>(_dataTable.inArray1Ptr),
Unsafe.Read<Vector64<UInt16>>(_dataTable.inArray2Ptr)
);
Unsafe.Write(_dataTable.outArrayPtr, result);
ValidateResult(_dataTable.inArray1Ptr, _dataTable.inArray2Ptr, _dataTable.outArrayPtr);
}
public void RunBasicScenario_Load()
{
TestLibrary.TestFramework.BeginScenario(nameof(RunBasicScenario_Load));
var result = AdvSimd.OrNot(
AdvSimd.LoadVector64((UInt16*)(_dataTable.inArray1Ptr)),
AdvSimd.LoadVector64((UInt16*)(_dataTable.inArray2Ptr))
);
Unsafe.Write(_dataTable.outArrayPtr, result);
ValidateResult(_dataTable.inArray1Ptr, _dataTable.inArray2Ptr, _dataTable.outArrayPtr);
}
public void RunReflectionScenario_UnsafeRead()
{
TestLibrary.TestFramework.BeginScenario(nameof(RunReflectionScenario_UnsafeRead));
var result = typeof(AdvSimd).GetMethod(nameof(AdvSimd.OrNot), new Type[] { typeof(Vector64<UInt16>), typeof(Vector64<UInt16>) })
.Invoke(null, new object[] {
Unsafe.Read<Vector64<UInt16>>(_dataTable.inArray1Ptr),
Unsafe.Read<Vector64<UInt16>>(_dataTable.inArray2Ptr)
});
Unsafe.Write(_dataTable.outArrayPtr, (Vector64<UInt16>)(result));
ValidateResult(_dataTable.inArray1Ptr, _dataTable.inArray2Ptr, _dataTable.outArrayPtr);
}
public void RunReflectionScenario_Load()
{
TestLibrary.TestFramework.BeginScenario(nameof(RunReflectionScenario_Load));
var result = typeof(AdvSimd).GetMethod(nameof(AdvSimd.OrNot), new Type[] { typeof(Vector64<UInt16>), typeof(Vector64<UInt16>) })
.Invoke(null, new object[] {
AdvSimd.LoadVector64((UInt16*)(_dataTable.inArray1Ptr)),
AdvSimd.LoadVector64((UInt16*)(_dataTable.inArray2Ptr))
});
Unsafe.Write(_dataTable.outArrayPtr, (Vector64<UInt16>)(result));
ValidateResult(_dataTable.inArray1Ptr, _dataTable.inArray2Ptr, _dataTable.outArrayPtr);
}
public void RunClsVarScenario()
{
TestLibrary.TestFramework.BeginScenario(nameof(RunClsVarScenario));
var result = AdvSimd.OrNot(
_clsVar1,
_clsVar2
);
Unsafe.Write(_dataTable.outArrayPtr, result);
ValidateResult(_clsVar1, _clsVar2, _dataTable.outArrayPtr);
}
public void RunClsVarScenario_Load()
{
TestLibrary.TestFramework.BeginScenario(nameof(RunClsVarScenario_Load));
fixed (Vector64<UInt16>* pClsVar1 = &_clsVar1)
fixed (Vector64<UInt16>* pClsVar2 = &_clsVar2)
{
var result = AdvSimd.OrNot(
AdvSimd.LoadVector64((UInt16*)(pClsVar1)),
AdvSimd.LoadVector64((UInt16*)(pClsVar2))
);
Unsafe.Write(_dataTable.outArrayPtr, result);
ValidateResult(_clsVar1, _clsVar2, _dataTable.outArrayPtr);
}
}
public void RunLclVarScenario_UnsafeRead()
{
TestLibrary.TestFramework.BeginScenario(nameof(RunLclVarScenario_UnsafeRead));
var op1 = Unsafe.Read<Vector64<UInt16>>(_dataTable.inArray1Ptr);
var op2 = Unsafe.Read<Vector64<UInt16>>(_dataTable.inArray2Ptr);
var result = AdvSimd.OrNot(op1, op2);
Unsafe.Write(_dataTable.outArrayPtr, result);
ValidateResult(op1, op2, _dataTable.outArrayPtr);
}
public void RunLclVarScenario_Load()
{
TestLibrary.TestFramework.BeginScenario(nameof(RunLclVarScenario_Load));
var op1 = AdvSimd.LoadVector64((UInt16*)(_dataTable.inArray1Ptr));
var op2 = AdvSimd.LoadVector64((UInt16*)(_dataTable.inArray2Ptr));
var result = AdvSimd.OrNot(op1, op2);
Unsafe.Write(_dataTable.outArrayPtr, result);
ValidateResult(op1, op2, _dataTable.outArrayPtr);
}
public void RunClassLclFldScenario()
{
TestLibrary.TestFramework.BeginScenario(nameof(RunClassLclFldScenario));
var test = new SimpleBinaryOpTest__OrNot_Vector64_UInt16();
var result = AdvSimd.OrNot(test._fld1, test._fld2);
Unsafe.Write(_dataTable.outArrayPtr, result);
ValidateResult(test._fld1, test._fld2, _dataTable.outArrayPtr);
}
public void RunClassLclFldScenario_Load()
{
TestLibrary.TestFramework.BeginScenario(nameof(RunClassLclFldScenario_Load));
var test = new SimpleBinaryOpTest__OrNot_Vector64_UInt16();
fixed (Vector64<UInt16>* pFld1 = &test._fld1)
fixed (Vector64<UInt16>* pFld2 = &test._fld2)
{
var result = AdvSimd.OrNot(
AdvSimd.LoadVector64((UInt16*)(pFld1)),
AdvSimd.LoadVector64((UInt16*)(pFld2))
);
Unsafe.Write(_dataTable.outArrayPtr, result);
ValidateResult(test._fld1, test._fld2, _dataTable.outArrayPtr);
}
}
public void RunClassFldScenario()
{
TestLibrary.TestFramework.BeginScenario(nameof(RunClassFldScenario));
var result = AdvSimd.OrNot(_fld1, _fld2);
Unsafe.Write(_dataTable.outArrayPtr, result);
ValidateResult(_fld1, _fld2, _dataTable.outArrayPtr);
}
public void RunClassFldScenario_Load()
{
TestLibrary.TestFramework.BeginScenario(nameof(RunClassFldScenario_Load));
fixed (Vector64<UInt16>* pFld1 = &_fld1)
fixed (Vector64<UInt16>* pFld2 = &_fld2)
{
var result = AdvSimd.OrNot(
AdvSimd.LoadVector64((UInt16*)(pFld1)),
AdvSimd.LoadVector64((UInt16*)(pFld2))
);
Unsafe.Write(_dataTable.outArrayPtr, result);
ValidateResult(_fld1, _fld2, _dataTable.outArrayPtr);
}
}
public void RunStructLclFldScenario()
{
TestLibrary.TestFramework.BeginScenario(nameof(RunStructLclFldScenario));
var test = TestStruct.Create();
var result = AdvSimd.OrNot(test._fld1, test._fld2);
Unsafe.Write(_dataTable.outArrayPtr, result);
ValidateResult(test._fld1, test._fld2, _dataTable.outArrayPtr);
}
public void RunStructLclFldScenario_Load()
{
TestLibrary.TestFramework.BeginScenario(nameof(RunStructLclFldScenario_Load));
var test = TestStruct.Create();
var result = AdvSimd.OrNot(
AdvSimd.LoadVector64((UInt16*)(&test._fld1)),
AdvSimd.LoadVector64((UInt16*)(&test._fld2))
);
Unsafe.Write(_dataTable.outArrayPtr, result);
ValidateResult(test._fld1, test._fld2, _dataTable.outArrayPtr);
}
public void RunStructFldScenario()
{
TestLibrary.TestFramework.BeginScenario(nameof(RunStructFldScenario));
var test = TestStruct.Create();
test.RunStructFldScenario(this);
}
public void RunStructFldScenario_Load()
{
TestLibrary.TestFramework.BeginScenario(nameof(RunStructFldScenario_Load));
var test = TestStruct.Create();
test.RunStructFldScenario_Load(this);
}
public void RunUnsupportedScenario()
{
TestLibrary.TestFramework.BeginScenario(nameof(RunUnsupportedScenario));
bool succeeded = false;
try
{
RunBasicScenario_UnsafeRead();
}
catch (PlatformNotSupportedException)
{
succeeded = true;
}
if (!succeeded)
{
Succeeded = false;
}
}
private void ValidateResult(Vector64<UInt16> op1, Vector64<UInt16> op2, void* result, [CallerMemberName] string method = "")
{
UInt16[] inArray1 = new UInt16[Op1ElementCount];
UInt16[] inArray2 = new UInt16[Op2ElementCount];
UInt16[] outArray = new UInt16[RetElementCount];
Unsafe.WriteUnaligned(ref Unsafe.As<UInt16, byte>(ref inArray1[0]), op1);
Unsafe.WriteUnaligned(ref Unsafe.As<UInt16, byte>(ref inArray2[0]), op2);
Unsafe.CopyBlockUnaligned(ref Unsafe.As<UInt16, byte>(ref outArray[0]), ref Unsafe.AsRef<byte>(result), (uint)Unsafe.SizeOf<Vector64<UInt16>>());
ValidateResult(inArray1, inArray2, outArray, method);
}
private void ValidateResult(void* op1, void* op2, void* result, [CallerMemberName] string method = "")
{
UInt16[] inArray1 = new UInt16[Op1ElementCount];
UInt16[] inArray2 = new UInt16[Op2ElementCount];
UInt16[] outArray = new UInt16[RetElementCount];
Unsafe.CopyBlockUnaligned(ref Unsafe.As<UInt16, byte>(ref inArray1[0]), ref Unsafe.AsRef<byte>(op1), (uint)Unsafe.SizeOf<Vector64<UInt16>>());
Unsafe.CopyBlockUnaligned(ref Unsafe.As<UInt16, byte>(ref inArray2[0]), ref Unsafe.AsRef<byte>(op2), (uint)Unsafe.SizeOf<Vector64<UInt16>>());
Unsafe.CopyBlockUnaligned(ref Unsafe.As<UInt16, byte>(ref outArray[0]), ref Unsafe.AsRef<byte>(result), (uint)Unsafe.SizeOf<Vector64<UInt16>>());
ValidateResult(inArray1, inArray2, outArray, method);
}
private void ValidateResult(UInt16[] left, UInt16[] right, UInt16[] result, [CallerMemberName] string method = "")
{
bool succeeded = true;
for (var i = 0; i < RetElementCount; i++)
{
if (Helpers.OrNot(left[i], right[i]) != result[i])
{
succeeded = false;
break;
}
}
if (!succeeded)
{
TestLibrary.TestFramework.LogInformation($"{nameof(AdvSimd)}.{nameof(AdvSimd.OrNot)}<UInt16>(Vector64<UInt16>, Vector64<UInt16>): {method} failed:");
TestLibrary.TestFramework.LogInformation($" left: ({string.Join(", ", left)})");
TestLibrary.TestFramework.LogInformation($" right: ({string.Join(", ", right)})");
TestLibrary.TestFramework.LogInformation($" result: ({string.Join(", ", result)})");
TestLibrary.TestFramework.LogInformation(string.Empty);
Succeeded = false;
}
}
}
}
| -1 |
dotnet/runtime | 66,250 | use the name of the header when throwing exception for invalid chars | Fixes #65136 | pedrobsaila | 2022-03-05T20:18:11Z | 2022-03-08T16:15:18Z | 8fe2d2b0bbb7bec841c97372528531c071b29c16 | fa5dcda23c681031b5d2bd68482702baef80b836 | use the name of the header when throwing exception for invalid chars. Fixes #65136 | ./src/libraries/System.Collections.Specialized/tests/NameObjectCollectionBase/NameObjectCollectionBase.SyncRootTests.cs | // Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
using Xunit;
namespace System.Collections.Specialized.Tests
{
public class NameObjectCollectionBaseSyncRootTests
{
[Fact]
public void SyncRoot()
{
ICollection nameObjectCollection1 = new MyNameObjectCollection();
ICollection nameObjectCollection2 = new MyNameObjectCollection();
Assert.False(nameObjectCollection1.IsSynchronized);
Assert.Same(nameObjectCollection1.SyncRoot, nameObjectCollection1.SyncRoot);
Assert.NotSame(nameObjectCollection1.SyncRoot, nameObjectCollection2.SyncRoot);
}
}
}
| // Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
using Xunit;
namespace System.Collections.Specialized.Tests
{
public class NameObjectCollectionBaseSyncRootTests
{
[Fact]
public void SyncRoot()
{
ICollection nameObjectCollection1 = new MyNameObjectCollection();
ICollection nameObjectCollection2 = new MyNameObjectCollection();
Assert.False(nameObjectCollection1.IsSynchronized);
Assert.Same(nameObjectCollection1.SyncRoot, nameObjectCollection1.SyncRoot);
Assert.NotSame(nameObjectCollection1.SyncRoot, nameObjectCollection2.SyncRoot);
}
}
}
| -1 |
dotnet/runtime | 66,250 | use the name of the header when throwing exception for invalid chars | Fixes #65136 | pedrobsaila | 2022-03-05T20:18:11Z | 2022-03-08T16:15:18Z | 8fe2d2b0bbb7bec841c97372528531c071b29c16 | fa5dcda23c681031b5d2bd68482702baef80b836 | use the name of the header when throwing exception for invalid chars. Fixes #65136 | ./src/coreclr/tools/aot/ILCompiler.MetadataTransform/Internal/Metadata/NativeFormat/Writer/ConstantValues.cs | // Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
using System;
using System.IO;
using System.Text;
using System.Diagnostics;
using System.Threading.Tasks;
using System.Collections.Generic;
using System.Reflection;
using Internal.LowLevelLinq;
namespace Internal.Metadata.NativeFormat.Writer
{
public partial class ConstantBooleanValue
{
public override string ToString()
{
//return String.Format("ConstantBooleanValue : {0}", this.Value);
return String.Format("(Boolean){0}", this.Value);
}
}
public partial class ConstantBooleanArray
{
public override string ToString()
{
//return "ConstantBooleanArray : {" + String.Join(", ", this.Value.Select(v => v.ToString())) + "}";
return "(Boolean[]) {" + String.Join(", ", this.Value.Select(v => v.ToString())) + "}";
}
}
public partial class ConstantCharValue
{
public override string ToString()
{
//return String.Format("ConstantCharValue : {0}", this.Value);
return String.Format("'{0}'", this.Value);
}
}
public partial class ConstantCharArray
{
public override string ToString()
{
//return "ConstantCharArray : {" + String.Join(", ", this.Value.Select(v => v.ToString())) + "}";
return "(Char[]) {" + String.Join(", ", this.Value.Select(v => v.ToString())) + "}";
}
}
public partial class ConstantStringValue
{
public override string ToString()
{
//return String.Format("ConstantStringValue : {0}", this.Value);
if (this.Value == null) return "null";
else return String.Format("\"{0}\"", this.Value);
}
}
public partial class ConstantStringArray
{
public override string ToString()
{
//return "ConstantStringArray : {" + String.Join(", ", this.Value.Select(v => v.ToString())) + "}";
return "(String[]) {" + String.Join(", ", this.Value.Select(v => v.ToString())) + "}";
}
}
public partial class ConstantByteValue
{
public override string ToString()
{
//return String.Format("ConstantByteValue : {0}", this.Value);
return String.Format("(Byte){0}", this.Value);
}
}
public partial class ConstantByteArray
{
public override string ToString()
{
//return "ConstantByteArray : {" + String.Join(", ", this.Value.Select(v => v.ToString())) + "}";
return "(Byte[]) {" + String.Join(", ", this.Value.Select(v => v.ToString())) + "}";
}
}
public partial class ConstantSByteValue
{
public override string ToString()
{
//return String.Format("ConstantSByteValue : {0}", this.Value);
return String.Format("(SByte){0}", this.Value);
}
}
public partial class ConstantSByteArray
{
public override string ToString()
{
//return "ConstantSByteArray : {" + String.Join(", ", this.Value.Select(v => v.ToString())) + "}";
return "(SByte[]) {" + String.Join(", ", this.Value.Select(v => v.ToString())) + "}";
}
}
public partial class ConstantInt16Value
{
public override string ToString()
{
//return String.Format("ConstantInt16Value : {0}", this.Value);
return String.Format("(Int16){0}", this.Value);
}
}
public partial class ConstantInt16Array
{
public override string ToString()
{
//return "ConstantInt16Array : {" + String.Join(", ", this.Value.Select(v => v.ToString())) + "}";
return "(Int16[]) {" + String.Join(", ", this.Value.Select(v => v.ToString())) + "}";
}
}
public partial class ConstantUInt16Value
{
public override string ToString()
{
//return String.Format("ConstantUInt16Value : {0}", this.Value);
return String.Format("(UInt16){0}", this.Value);
}
}
public partial class ConstantUInt16Array
{
public override string ToString()
{
//return "ConstantUInt16Array : {" + String.Join(", ", this.Value.Select(v => v.ToString())) + "}";
return "(UInt16[]) {" + String.Join(", ", this.Value.Select(v => v.ToString())) + "}";
}
}
public partial class ConstantInt32Value
{
public override string ToString()
{
//return String.Format("ConstantInt32Value : {0}", this.Value);
return String.Format("(Int32){0}", this.Value);
}
}
public partial class ConstantInt32Array
{
public override string ToString()
{
//return "ConstantInt32Array : {" + String.Join(", ", this.Value.Select(v => v.ToString())) + "}";
return "(Int32[]) {" + String.Join(", ", this.Value.Select(v => v.ToString())) + "}";
}
}
public partial class ConstantUInt32Value
{
public override string ToString()
{
//return String.Format("ConstantUInt32Value : {0}", this.Value);
return String.Format("(UInt32){0}", this.Value);
}
}
public partial class ConstantUInt32Array
{
public override string ToString()
{
//return "ConstantUInt32Array : {" + String.Join(", ", this.Value.Select(v => v.ToString())) + "}";
return "(UInt32[]) {" + String.Join(", ", this.Value.Select(v => v.ToString())) + "}";
}
}
public partial class ConstantInt64Value
{
public override string ToString()
{
//return String.Format("ConstantInt64Value : {0}", this.Value);
return String.Format("(Int64){0}", this.Value);
}
}
public partial class ConstantInt64Array
{
public override string ToString()
{
//return "ConstantInt64Array : {" + String.Join(", ", this.Value.Select(v => v.ToString())) + "}";
return "(Int64[]) {" + String.Join(", ", this.Value.Select(v => v.ToString())) + "}";
}
}
public partial class ConstantUInt64Value
{
public override string ToString()
{
//return String.Format("ConstantUInt64Value : {0}", this.Value);
return String.Format("(UInt64){0}", this.Value);
}
}
public partial class ConstantUInt64Array
{
public override string ToString()
{
//return "ConstantUInt64Array : {" + String.Join(", ", this.Value.Select(v => v.ToString())) + "}";
return "(UInt64[]) {" + String.Join(", ", this.Value.Select(v => v.ToString())) + "}";
}
}
public partial class ConstantSingleValue
{
public override string ToString()
{
//return String.Format("ConstantSingleValue : {0}", this.Value);
return String.Format("(Single){0}", this.Value);
}
}
public partial class ConstantSingleArray
{
public override string ToString()
{
//return "ConstantSingleArray : {" + String.Join(", ", this.Value.Select(v => v.ToString())) + "}";
return "(Single[]) {" + String.Join(", ", this.Value.Select(v => v.ToString())) + "}";
}
}
public partial class ConstantDoubleValue
{
public override string ToString()
{
//return String.Format("ConstantDoubleValue : {0}", this.Value);
return String.Format("(Double){0}", this.Value);
}
}
public partial class ConstantDoubleArray
{
public override string ToString()
{
//return "ConstantDoubleArray : {" + String.Join(", ", this.Value.Select(v => v.ToString())) + "}";
return "(Double[]) {" + String.Join(", ", this.Value.Select(v => v.ToString())) + "}";
}
}
public partial class ConstantReferenceValue
{
public override string ToString()
{
return "null";
}
}
}
| // Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
using System;
using System.IO;
using System.Text;
using System.Diagnostics;
using System.Threading.Tasks;
using System.Collections.Generic;
using System.Reflection;
using Internal.LowLevelLinq;
namespace Internal.Metadata.NativeFormat.Writer
{
public partial class ConstantBooleanValue
{
public override string ToString()
{
//return String.Format("ConstantBooleanValue : {0}", this.Value);
return String.Format("(Boolean){0}", this.Value);
}
}
public partial class ConstantBooleanArray
{
public override string ToString()
{
//return "ConstantBooleanArray : {" + String.Join(", ", this.Value.Select(v => v.ToString())) + "}";
return "(Boolean[]) {" + String.Join(", ", this.Value.Select(v => v.ToString())) + "}";
}
}
public partial class ConstantCharValue
{
public override string ToString()
{
//return String.Format("ConstantCharValue : {0}", this.Value);
return String.Format("'{0}'", this.Value);
}
}
public partial class ConstantCharArray
{
public override string ToString()
{
//return "ConstantCharArray : {" + String.Join(", ", this.Value.Select(v => v.ToString())) + "}";
return "(Char[]) {" + String.Join(", ", this.Value.Select(v => v.ToString())) + "}";
}
}
public partial class ConstantStringValue
{
public override string ToString()
{
//return String.Format("ConstantStringValue : {0}", this.Value);
if (this.Value == null) return "null";
else return String.Format("\"{0}\"", this.Value);
}
}
public partial class ConstantStringArray
{
public override string ToString()
{
//return "ConstantStringArray : {" + String.Join(", ", this.Value.Select(v => v.ToString())) + "}";
return "(String[]) {" + String.Join(", ", this.Value.Select(v => v.ToString())) + "}";
}
}
public partial class ConstantByteValue
{
public override string ToString()
{
//return String.Format("ConstantByteValue : {0}", this.Value);
return String.Format("(Byte){0}", this.Value);
}
}
public partial class ConstantByteArray
{
public override string ToString()
{
//return "ConstantByteArray : {" + String.Join(", ", this.Value.Select(v => v.ToString())) + "}";
return "(Byte[]) {" + String.Join(", ", this.Value.Select(v => v.ToString())) + "}";
}
}
public partial class ConstantSByteValue
{
public override string ToString()
{
//return String.Format("ConstantSByteValue : {0}", this.Value);
return String.Format("(SByte){0}", this.Value);
}
}
public partial class ConstantSByteArray
{
public override string ToString()
{
//return "ConstantSByteArray : {" + String.Join(", ", this.Value.Select(v => v.ToString())) + "}";
return "(SByte[]) {" + String.Join(", ", this.Value.Select(v => v.ToString())) + "}";
}
}
public partial class ConstantInt16Value
{
public override string ToString()
{
//return String.Format("ConstantInt16Value : {0}", this.Value);
return String.Format("(Int16){0}", this.Value);
}
}
public partial class ConstantInt16Array
{
public override string ToString()
{
//return "ConstantInt16Array : {" + String.Join(", ", this.Value.Select(v => v.ToString())) + "}";
return "(Int16[]) {" + String.Join(", ", this.Value.Select(v => v.ToString())) + "}";
}
}
public partial class ConstantUInt16Value
{
public override string ToString()
{
//return String.Format("ConstantUInt16Value : {0}", this.Value);
return String.Format("(UInt16){0}", this.Value);
}
}
public partial class ConstantUInt16Array
{
public override string ToString()
{
//return "ConstantUInt16Array : {" + String.Join(", ", this.Value.Select(v => v.ToString())) + "}";
return "(UInt16[]) {" + String.Join(", ", this.Value.Select(v => v.ToString())) + "}";
}
}
public partial class ConstantInt32Value
{
public override string ToString()
{
//return String.Format("ConstantInt32Value : {0}", this.Value);
return String.Format("(Int32){0}", this.Value);
}
}
public partial class ConstantInt32Array
{
public override string ToString()
{
//return "ConstantInt32Array : {" + String.Join(", ", this.Value.Select(v => v.ToString())) + "}";
return "(Int32[]) {" + String.Join(", ", this.Value.Select(v => v.ToString())) + "}";
}
}
public partial class ConstantUInt32Value
{
public override string ToString()
{
//return String.Format("ConstantUInt32Value : {0}", this.Value);
return String.Format("(UInt32){0}", this.Value);
}
}
public partial class ConstantUInt32Array
{
public override string ToString()
{
//return "ConstantUInt32Array : {" + String.Join(", ", this.Value.Select(v => v.ToString())) + "}";
return "(UInt32[]) {" + String.Join(", ", this.Value.Select(v => v.ToString())) + "}";
}
}
public partial class ConstantInt64Value
{
public override string ToString()
{
//return String.Format("ConstantInt64Value : {0}", this.Value);
return String.Format("(Int64){0}", this.Value);
}
}
public partial class ConstantInt64Array
{
public override string ToString()
{
//return "ConstantInt64Array : {" + String.Join(", ", this.Value.Select(v => v.ToString())) + "}";
return "(Int64[]) {" + String.Join(", ", this.Value.Select(v => v.ToString())) + "}";
}
}
public partial class ConstantUInt64Value
{
public override string ToString()
{
//return String.Format("ConstantUInt64Value : {0}", this.Value);
return String.Format("(UInt64){0}", this.Value);
}
}
public partial class ConstantUInt64Array
{
public override string ToString()
{
//return "ConstantUInt64Array : {" + String.Join(", ", this.Value.Select(v => v.ToString())) + "}";
return "(UInt64[]) {" + String.Join(", ", this.Value.Select(v => v.ToString())) + "}";
}
}
public partial class ConstantSingleValue
{
public override string ToString()
{
//return String.Format("ConstantSingleValue : {0}", this.Value);
return String.Format("(Single){0}", this.Value);
}
}
public partial class ConstantSingleArray
{
public override string ToString()
{
//return "ConstantSingleArray : {" + String.Join(", ", this.Value.Select(v => v.ToString())) + "}";
return "(Single[]) {" + String.Join(", ", this.Value.Select(v => v.ToString())) + "}";
}
}
public partial class ConstantDoubleValue
{
public override string ToString()
{
//return String.Format("ConstantDoubleValue : {0}", this.Value);
return String.Format("(Double){0}", this.Value);
}
}
public partial class ConstantDoubleArray
{
public override string ToString()
{
//return "ConstantDoubleArray : {" + String.Join(", ", this.Value.Select(v => v.ToString())) + "}";
return "(Double[]) {" + String.Join(", ", this.Value.Select(v => v.ToString())) + "}";
}
}
public partial class ConstantReferenceValue
{
public override string ToString()
{
return "null";
}
}
}
| -1 |
dotnet/runtime | 66,250 | use the name of the header when throwing exception for invalid chars | Fixes #65136 | pedrobsaila | 2022-03-05T20:18:11Z | 2022-03-08T16:15:18Z | 8fe2d2b0bbb7bec841c97372528531c071b29c16 | fa5dcda23c681031b5d2bd68482702baef80b836 | use the name of the header when throwing exception for invalid chars. Fixes #65136 | ./src/libraries/Microsoft.CSharp/src/Microsoft/CSharp/RuntimeBinder/ComInterop/ComObject.cs | // Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
using System;
using System.Collections.Generic;
using System.Diagnostics;
using System.Diagnostics.CodeAnalysis;
using System.Dynamic;
using System.Linq.Expressions;
using System.Reflection;
using System.Runtime.InteropServices;
namespace Microsoft.CSharp.RuntimeBinder.ComInterop
{
/// <summary>
/// The ComObject class wraps a runtime-callable-wrapper and enables it to be used with the Dynamic Language Runtime and the C# dynamic keyword.
/// </summary>
internal class ComObject : IDynamicMetaObjectProvider
{
internal ComObject(object rcw)
{
Debug.Assert(ComBinder.IsComObject(rcw));
RuntimeCallableWrapper = rcw;
}
internal object RuntimeCallableWrapper { get; }
private static readonly object s_comObjectInfoKey = new object();
/// <summary>
/// Gets a <see cref="ComObject"/> that wraps the runtime-callable-wrapper, or creates one if none currently exists.
/// </summary>
/// <returns></returns>
[RequiresUnreferencedCode(Binder.TrimmerWarning)]
public static ComObject ObjectToComObject(object rcw)
{
Debug.Assert(ComBinder.IsComObject(rcw));
object data = Marshal.GetComObjectData(rcw, s_comObjectInfoKey);
if (data != null)
{
return (ComObject)data;
}
lock (s_comObjectInfoKey)
{
data = Marshal.GetComObjectData(rcw, s_comObjectInfoKey);
if (data != null)
{
return (ComObject)data;
}
ComObject comObjectInfo = CreateComObject(rcw);
if (!Marshal.SetComObjectData(rcw, s_comObjectInfoKey, comObjectInfo))
{
throw Error.SetComObjectDataFailed();
}
return comObjectInfo;
}
}
// Expression that unwraps ComObject
internal static MemberExpression RcwFromComObject(Expression comObject)
{
Debug.Assert(comObject != null && (typeof(ComObject).IsAssignableFrom(comObject.Type) || comObject.Type == typeof(object)), "must be ComObject");
return Expression.Property(
Helpers.Convert(comObject, typeof(ComObject)),
typeof(ComObject).GetProperty(nameof(RuntimeCallableWrapper), BindingFlags.NonPublic | BindingFlags.Instance)
);
}
// Expression that finds or creates a ComObject that corresponds to given Rcw
[RequiresUnreferencedCode(Binder.TrimmerWarning)]
internal static MethodCallExpression RcwToComObject(Expression rcw)
{
return Expression.Call(
typeof(ComObject).GetMethod(nameof(ObjectToComObject)),
Helpers.Convert(rcw, typeof(object))
);
}
[RequiresUnreferencedCode(Binder.TrimmerWarning)]
private static ComObject CreateComObject(object rcw)
{
if (rcw is IDispatch dispatchObject)
{
// We can do method invocations on IDispatch objects
return new IDispatchComObject(dispatchObject);
}
// There is not much we can do in this case
return new ComObject(rcw);
}
[RequiresUnreferencedCode(Binder.TrimmerWarning)]
internal virtual IList<string> GetMemberNames(bool dataOnly)
{
return Array.Empty<string>();
}
[RequiresUnreferencedCode(Binder.TrimmerWarning)]
internal virtual IList<KeyValuePair<string, object>> GetMembers(IEnumerable<string> names)
{
return Array.Empty<KeyValuePair<string, object>>();
}
DynamicMetaObject IDynamicMetaObjectProvider.GetMetaObject(Expression parameter)
{
return new ComFallbackMetaObject(parameter, BindingRestrictions.Empty, this);
}
}
}
| // Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
using System;
using System.Collections.Generic;
using System.Diagnostics;
using System.Diagnostics.CodeAnalysis;
using System.Dynamic;
using System.Linq.Expressions;
using System.Reflection;
using System.Runtime.InteropServices;
namespace Microsoft.CSharp.RuntimeBinder.ComInterop
{
/// <summary>
/// The ComObject class wraps a runtime-callable-wrapper and enables it to be used with the Dynamic Language Runtime and the C# dynamic keyword.
/// </summary>
internal class ComObject : IDynamicMetaObjectProvider
{
internal ComObject(object rcw)
{
Debug.Assert(ComBinder.IsComObject(rcw));
RuntimeCallableWrapper = rcw;
}
internal object RuntimeCallableWrapper { get; }
private static readonly object s_comObjectInfoKey = new object();
/// <summary>
/// Gets a <see cref="ComObject"/> that wraps the runtime-callable-wrapper, or creates one if none currently exists.
/// </summary>
/// <returns></returns>
[RequiresUnreferencedCode(Binder.TrimmerWarning)]
public static ComObject ObjectToComObject(object rcw)
{
Debug.Assert(ComBinder.IsComObject(rcw));
object data = Marshal.GetComObjectData(rcw, s_comObjectInfoKey);
if (data != null)
{
return (ComObject)data;
}
lock (s_comObjectInfoKey)
{
data = Marshal.GetComObjectData(rcw, s_comObjectInfoKey);
if (data != null)
{
return (ComObject)data;
}
ComObject comObjectInfo = CreateComObject(rcw);
if (!Marshal.SetComObjectData(rcw, s_comObjectInfoKey, comObjectInfo))
{
throw Error.SetComObjectDataFailed();
}
return comObjectInfo;
}
}
// Expression that unwraps ComObject
internal static MemberExpression RcwFromComObject(Expression comObject)
{
Debug.Assert(comObject != null && (typeof(ComObject).IsAssignableFrom(comObject.Type) || comObject.Type == typeof(object)), "must be ComObject");
return Expression.Property(
Helpers.Convert(comObject, typeof(ComObject)),
typeof(ComObject).GetProperty(nameof(RuntimeCallableWrapper), BindingFlags.NonPublic | BindingFlags.Instance)
);
}
// Expression that finds or creates a ComObject that corresponds to given Rcw
[RequiresUnreferencedCode(Binder.TrimmerWarning)]
internal static MethodCallExpression RcwToComObject(Expression rcw)
{
return Expression.Call(
typeof(ComObject).GetMethod(nameof(ObjectToComObject)),
Helpers.Convert(rcw, typeof(object))
);
}
[RequiresUnreferencedCode(Binder.TrimmerWarning)]
private static ComObject CreateComObject(object rcw)
{
if (rcw is IDispatch dispatchObject)
{
// We can do method invocations on IDispatch objects
return new IDispatchComObject(dispatchObject);
}
// There is not much we can do in this case
return new ComObject(rcw);
}
[RequiresUnreferencedCode(Binder.TrimmerWarning)]
internal virtual IList<string> GetMemberNames(bool dataOnly)
{
return Array.Empty<string>();
}
[RequiresUnreferencedCode(Binder.TrimmerWarning)]
internal virtual IList<KeyValuePair<string, object>> GetMembers(IEnumerable<string> names)
{
return Array.Empty<KeyValuePair<string, object>>();
}
DynamicMetaObject IDynamicMetaObjectProvider.GetMetaObject(Expression parameter)
{
return new ComFallbackMetaObject(parameter, BindingRestrictions.Empty, this);
}
}
}
| -1 |
dotnet/runtime | 66,250 | use the name of the header when throwing exception for invalid chars | Fixes #65136 | pedrobsaila | 2022-03-05T20:18:11Z | 2022-03-08T16:15:18Z | 8fe2d2b0bbb7bec841c97372528531c071b29c16 | fa5dcda23c681031b5d2bd68482702baef80b836 | use the name of the header when throwing exception for invalid chars. Fixes #65136 | ./src/libraries/System.IO.Ports/src/System/IO/Ports/SerialStream.cs | // Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
using Microsoft.Win32.SafeHandles;
using System.Collections;
using System.Diagnostics;
using System.Runtime.InteropServices;
using System.IO.Ports;
using System.Net.Sockets;
namespace System.IO.Ports
{
#pragma warning disable CA1844
internal sealed partial class SerialStream : Stream
#pragma warning restore CA1844
{
private const int MaxDataBits = 8;
private const int MinDataBits = 5;
// members supporting properties exposed to SerialPort
private readonly string _portName;
private bool _inBreak;
private Handshake _handshake;
#pragma warning disable CS0067 // Events shared by Windows and Linux, on Linux we currently never call them
// called when any runtime error occurs on the port (frame, overrun, parity, etc.)
internal event SerialErrorReceivedEventHandler ErrorReceived;
#pragma warning restore CS0067
// ----SECTION: inherited properties from Stream class ------------*
// These six properites are required for SerialStream to inherit from the abstract Stream class.
// Note four of them are always true or false, and two of them throw exceptions, so these
// are not usefully queried by applications which know they have a SerialStream, etc...
public override bool CanRead
{
get { return (_handle != null); }
}
public override bool CanSeek
{
get { return false; }
}
public override bool CanTimeout
{
get { return (_handle != null); }
}
public override bool CanWrite
{
get { return (_handle != null); }
}
public override long Length
{
get { throw new NotSupportedException(SR.NotSupported_UnseekableStream); }
}
public override long Position
{
get { throw new NotSupportedException(SR.NotSupported_UnseekableStream); }
set { throw new NotSupportedException(SR.NotSupported_UnseekableStream); }
}
public override long Seek(long offset, SeekOrigin origin)
{
throw new NotSupportedException(SR.NotSupported_UnseekableStream);
}
public override void SetLength(long value)
{
throw new NotSupportedException(SR.NotSupported_UnseekableStream);
}
public override int ReadByte()
{
return ReadByte(ReadTimeout);
}
public override void Write(byte[] array, int offset, int count)
{
Write(array, offset, count, WriteTimeout);
}
~SerialStream()
{
Dispose(false);
}
private void CheckArrayArguments(byte[] array!!, int offset, int count)
{
if (offset < 0)
throw new ArgumentOutOfRangeException(nameof(offset), SR.ArgumentOutOfRange_NeedNonNegNumRequired);
if (count < 0)
throw new ArgumentOutOfRangeException(nameof(count), SR.ArgumentOutOfRange_NeedNonNegNumRequired);
if (array.Length - offset < count)
throw new ArgumentException(SR.Argument_InvalidOffLen);
}
private void CheckHandle()
{
if (_handle == null)
InternalResources.FileNotOpen();
}
private void CheckReadWriteArguments(byte[] array, int offset, int count)
{
CheckArrayArguments(array, offset, count);
CheckHandle();
}
private void CheckWriteArguments()
{
if (_inBreak)
throw new InvalidOperationException(SR.In_Break_State);
CheckHandle();
}
private void CheckWriteArguments(byte[] array, int offset, int count)
{
if (_inBreak)
throw new InvalidOperationException(SR.In_Break_State);
CheckReadWriteArguments(array, offset, count);
}
}
}
| // Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
using Microsoft.Win32.SafeHandles;
using System.Collections;
using System.Diagnostics;
using System.Runtime.InteropServices;
using System.IO.Ports;
using System.Net.Sockets;
namespace System.IO.Ports
{
#pragma warning disable CA1844
internal sealed partial class SerialStream : Stream
#pragma warning restore CA1844
{
private const int MaxDataBits = 8;
private const int MinDataBits = 5;
// members supporting properties exposed to SerialPort
private readonly string _portName;
private bool _inBreak;
private Handshake _handshake;
#pragma warning disable CS0067 // Events shared by Windows and Linux, on Linux we currently never call them
// called when any runtime error occurs on the port (frame, overrun, parity, etc.)
internal event SerialErrorReceivedEventHandler ErrorReceived;
#pragma warning restore CS0067
// ----SECTION: inherited properties from Stream class ------------*
// These six properites are required for SerialStream to inherit from the abstract Stream class.
// Note four of them are always true or false, and two of them throw exceptions, so these
// are not usefully queried by applications which know they have a SerialStream, etc...
public override bool CanRead
{
get { return (_handle != null); }
}
public override bool CanSeek
{
get { return false; }
}
public override bool CanTimeout
{
get { return (_handle != null); }
}
public override bool CanWrite
{
get { return (_handle != null); }
}
public override long Length
{
get { throw new NotSupportedException(SR.NotSupported_UnseekableStream); }
}
public override long Position
{
get { throw new NotSupportedException(SR.NotSupported_UnseekableStream); }
set { throw new NotSupportedException(SR.NotSupported_UnseekableStream); }
}
public override long Seek(long offset, SeekOrigin origin)
{
throw new NotSupportedException(SR.NotSupported_UnseekableStream);
}
public override void SetLength(long value)
{
throw new NotSupportedException(SR.NotSupported_UnseekableStream);
}
public override int ReadByte()
{
return ReadByte(ReadTimeout);
}
public override void Write(byte[] array, int offset, int count)
{
Write(array, offset, count, WriteTimeout);
}
~SerialStream()
{
Dispose(false);
}
private void CheckArrayArguments(byte[] array!!, int offset, int count)
{
if (offset < 0)
throw new ArgumentOutOfRangeException(nameof(offset), SR.ArgumentOutOfRange_NeedNonNegNumRequired);
if (count < 0)
throw new ArgumentOutOfRangeException(nameof(count), SR.ArgumentOutOfRange_NeedNonNegNumRequired);
if (array.Length - offset < count)
throw new ArgumentException(SR.Argument_InvalidOffLen);
}
private void CheckHandle()
{
if (_handle == null)
InternalResources.FileNotOpen();
}
private void CheckReadWriteArguments(byte[] array, int offset, int count)
{
CheckArrayArguments(array, offset, count);
CheckHandle();
}
private void CheckWriteArguments()
{
if (_inBreak)
throw new InvalidOperationException(SR.In_Break_State);
CheckHandle();
}
private void CheckWriteArguments(byte[] array, int offset, int count)
{
if (_inBreak)
throw new InvalidOperationException(SR.In_Break_State);
CheckReadWriteArguments(array, offset, count);
}
}
}
| -1 |
dotnet/runtime | 66,250 | use the name of the header when throwing exception for invalid chars | Fixes #65136 | pedrobsaila | 2022-03-05T20:18:11Z | 2022-03-08T16:15:18Z | 8fe2d2b0bbb7bec841c97372528531c071b29c16 | fa5dcda23c681031b5d2bd68482702baef80b836 | use the name of the header when throwing exception for invalid chars. Fixes #65136 | ./src/tests/JIT/HardwareIntrinsics/X86/Avx1/HorizontalSubtract.cs | // Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
//
using System;
using System.Runtime.CompilerServices;
using System.Runtime.InteropServices;
using System.Runtime.Intrinsics.X86;
using System.Runtime.Intrinsics;
namespace IntelHardwareIntrinsicTest
{
class Program
{
const int Pass = 100;
const int Fail = 0;
static unsafe int Main(string[] args)
{
int testResult = Pass;
if (Avx.IsSupported)
{
using (TestTable<float> floatTable = new TestTable<float>(new float[8] {22, -1, -50, 0, 22, -1, -50, 0 }, new float[8] { 22, -1, -50, 0, 22, -1, -50, 0 }, new float[8]))
using (TestTable<double> doubleTable = new TestTable<double>(new double[4] { 1, -5, 100, 0 }, new double[4] { 22, -1, -50, 0 }, new double[4]))
{
var vf1 = Unsafe.Read<Vector256<float>>(floatTable.inArray1Ptr);
var vf2 = Unsafe.Read<Vector256<float>>(floatTable.inArray2Ptr);
var vf3 = Avx.HorizontalSubtract(vf1, vf2);
Unsafe.Write(floatTable.outArrayPtr, vf3);
if (!floatTable.CheckResult((left, right, result) =>
(left[0] - left[1] == result[0]) && (right[0] - right[1] == result[2]) &&
(left[2] - left[3] == result[1]) && (right[2] - right[3] == result[3]) &&
(left[4] - left[5] == result[4]) && (right[4] - right[5] == result[6]) &&
(left[6] - left[7] == result[5]) && (right[6] - right[7] == result[7])))
{
Console.WriteLine("Avx HorizontalAdd failed on float:");
foreach (var item in floatTable.outArray)
{
Console.Write(item + ", ");
}
Console.WriteLine();
testResult = Fail;
}
var vd1 = Unsafe.Read<Vector256<double>>(doubleTable.inArray1Ptr);
var vd2 = Unsafe.Read<Vector256<double>>(doubleTable.inArray2Ptr);
var vd3 = Avx.HorizontalSubtract(vd1, vd2);
Unsafe.Write(doubleTable.outArrayPtr, vd3);
if (!doubleTable.CheckResult((left, right, result) =>
(left[0] - left[1] == result[0]) && (right[0] - right[1] == result[1]) &&
(left[2] - left[3] == result[2]) && (right[2] - right[3] == result[3])))
{
Console.WriteLine("Avx HorizontalAdd failed on double:");
foreach (var item in doubleTable.outArray)
{
Console.Write(item + ", ");
}
Console.WriteLine();
testResult = Fail;
}
}
}
return testResult;
}
public unsafe struct TestTable<T> : IDisposable where T : struct
{
public T[] inArray1;
public T[] inArray2;
public T[] outArray;
public void* inArray1Ptr => inHandle1.AddrOfPinnedObject().ToPointer();
public void* inArray2Ptr => inHandle2.AddrOfPinnedObject().ToPointer();
public void* outArrayPtr => outHandle.AddrOfPinnedObject().ToPointer();
GCHandle inHandle1;
GCHandle inHandle2;
GCHandle outHandle;
public TestTable(T[] a, T[] b, T[] c)
{
this.inArray1 = a;
this.inArray2 = b;
this.outArray = c;
inHandle1 = GCHandle.Alloc(inArray1, GCHandleType.Pinned);
inHandle2 = GCHandle.Alloc(inArray2, GCHandleType.Pinned);
outHandle = GCHandle.Alloc(outArray, GCHandleType.Pinned);
}
public bool CheckResult(Func<T[], T[], T[], bool> check)
{
return check(inArray1, inArray2, outArray);
}
public void Dispose()
{
inHandle1.Free();
inHandle2.Free();
outHandle.Free();
}
}
}
}
| // Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
//
using System;
using System.Runtime.CompilerServices;
using System.Runtime.InteropServices;
using System.Runtime.Intrinsics.X86;
using System.Runtime.Intrinsics;
namespace IntelHardwareIntrinsicTest
{
class Program
{
const int Pass = 100;
const int Fail = 0;
static unsafe int Main(string[] args)
{
int testResult = Pass;
if (Avx.IsSupported)
{
using (TestTable<float> floatTable = new TestTable<float>(new float[8] {22, -1, -50, 0, 22, -1, -50, 0 }, new float[8] { 22, -1, -50, 0, 22, -1, -50, 0 }, new float[8]))
using (TestTable<double> doubleTable = new TestTable<double>(new double[4] { 1, -5, 100, 0 }, new double[4] { 22, -1, -50, 0 }, new double[4]))
{
var vf1 = Unsafe.Read<Vector256<float>>(floatTable.inArray1Ptr);
var vf2 = Unsafe.Read<Vector256<float>>(floatTable.inArray2Ptr);
var vf3 = Avx.HorizontalSubtract(vf1, vf2);
Unsafe.Write(floatTable.outArrayPtr, vf3);
if (!floatTable.CheckResult((left, right, result) =>
(left[0] - left[1] == result[0]) && (right[0] - right[1] == result[2]) &&
(left[2] - left[3] == result[1]) && (right[2] - right[3] == result[3]) &&
(left[4] - left[5] == result[4]) && (right[4] - right[5] == result[6]) &&
(left[6] - left[7] == result[5]) && (right[6] - right[7] == result[7])))
{
Console.WriteLine("Avx HorizontalAdd failed on float:");
foreach (var item in floatTable.outArray)
{
Console.Write(item + ", ");
}
Console.WriteLine();
testResult = Fail;
}
var vd1 = Unsafe.Read<Vector256<double>>(doubleTable.inArray1Ptr);
var vd2 = Unsafe.Read<Vector256<double>>(doubleTable.inArray2Ptr);
var vd3 = Avx.HorizontalSubtract(vd1, vd2);
Unsafe.Write(doubleTable.outArrayPtr, vd3);
if (!doubleTable.CheckResult((left, right, result) =>
(left[0] - left[1] == result[0]) && (right[0] - right[1] == result[1]) &&
(left[2] - left[3] == result[2]) && (right[2] - right[3] == result[3])))
{
Console.WriteLine("Avx HorizontalAdd failed on double:");
foreach (var item in doubleTable.outArray)
{
Console.Write(item + ", ");
}
Console.WriteLine();
testResult = Fail;
}
}
}
return testResult;
}
public unsafe struct TestTable<T> : IDisposable where T : struct
{
public T[] inArray1;
public T[] inArray2;
public T[] outArray;
public void* inArray1Ptr => inHandle1.AddrOfPinnedObject().ToPointer();
public void* inArray2Ptr => inHandle2.AddrOfPinnedObject().ToPointer();
public void* outArrayPtr => outHandle.AddrOfPinnedObject().ToPointer();
GCHandle inHandle1;
GCHandle inHandle2;
GCHandle outHandle;
public TestTable(T[] a, T[] b, T[] c)
{
this.inArray1 = a;
this.inArray2 = b;
this.outArray = c;
inHandle1 = GCHandle.Alloc(inArray1, GCHandleType.Pinned);
inHandle2 = GCHandle.Alloc(inArray2, GCHandleType.Pinned);
outHandle = GCHandle.Alloc(outArray, GCHandleType.Pinned);
}
public bool CheckResult(Func<T[], T[], T[], bool> check)
{
return check(inArray1, inArray2, outArray);
}
public void Dispose()
{
inHandle1.Free();
inHandle2.Free();
outHandle.Free();
}
}
}
}
| -1 |
dotnet/runtime | 66,250 | use the name of the header when throwing exception for invalid chars | Fixes #65136 | pedrobsaila | 2022-03-05T20:18:11Z | 2022-03-08T16:15:18Z | 8fe2d2b0bbb7bec841c97372528531c071b29c16 | fa5dcda23c681031b5d2bd68482702baef80b836 | use the name of the header when throwing exception for invalid chars. Fixes #65136 | ./src/libraries/System.Memory/tests/Memory/GetHashCode.cs | // Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
using Xunit;
namespace System.MemoryTests
{
public static partial class MemoryTests
{
[Fact]
public static void SameObjectsHaveSameHashCodes()
{
int[] a = { 91, 92, 93, 94, 95 };
var left = new Memory<int>(a, 2, 3);
var right = new Memory<int>(a, 2, 3);
int[] b = { 1, 2, 3, 4, 5 };
var different = new Memory<int>(b, 2, 3);
Assert.Equal(left.GetHashCode(), right.GetHashCode());
Assert.NotEqual(left.GetHashCode(), different.GetHashCode());
}
[Fact]
public static void HashCodeIncludesLength()
{
int[] a = { 91, 92, 93, 94, 95 };
var left = new Memory<int>(a, 2, 1);
var right = new Memory<int>(a, 2, 3);
Assert.NotEqual(left.GetHashCode(), right.GetHashCode());
}
[Fact]
public static void HashCodeIncludesBase()
{
int[] a = { 91, 92, 93, 94, 95 };
var left = new Memory<int>(a, 1, 3);
var right = new Memory<int>(a, 2, 3);
Assert.NotEqual(left.GetHashCode(), right.GetHashCode());
}
[Fact]
public static void HashCodesDifferentForSameContent()
{
var left = new Memory<int>(new int[] { 0, 1, 2 }, 1, 1);
var right = new Memory<int>(new int[] { 0, 1, 2 }, 1, 1);
Assert.NotEqual(left.GetHashCode(), right.GetHashCode());
}
[Fact]
public static void EmptyMemoryHashCodeNotUnified()
{
var left = new Memory<int>(new int[0]);
var right = new Memory<int>(new int[0]);
Memory<int> memoryFromNonEmptyArrayButWithZeroLength = new Memory<int>(new int[1] { 123 }).Slice(0, 0);
Assert.NotEqual(left.GetHashCode(), right.GetHashCode());
Assert.NotEqual(left.GetHashCode(), memoryFromNonEmptyArrayButWithZeroLength.GetHashCode());
Assert.NotEqual(right.GetHashCode(), memoryFromNonEmptyArrayButWithZeroLength.GetHashCode());
// Empty property hashcode is equal
left = Memory<int>.Empty;
right = Memory<int>.Empty;
Assert.Equal(left.GetHashCode(), right.GetHashCode());
}
[Fact]
public static void HashCodesForImplicitCastsAreEqual()
{
byte[] bytes = new byte[10];
var memory = new Memory<byte>(bytes);
var readOnlyMemory = new ReadOnlyMemory<byte>(bytes);
ReadOnlyMemory<byte> implicitReadOnlyMemory = memory;
Memory<byte> implicitMemoryArray = bytes;
ReadOnlyMemory<byte> implicitReadOnlyMemoryArray = bytes;
Assert.Equal(readOnlyMemory.GetHashCode(), memory.GetHashCode());
Assert.Equal(implicitReadOnlyMemory.GetHashCode(), memory.GetHashCode());
Assert.Equal(implicitMemoryArray.GetHashCode(), memory.GetHashCode());
Assert.Equal(implicitReadOnlyMemoryArray.GetHashCode(), memory.GetHashCode());
}
[Fact]
public static void DefaultMemoryHashCode()
{
Memory<int> memory = default;
Assert.Equal(0, memory.GetHashCode());
Memory<int> memory2 = default;
Assert.Equal(memory2.GetHashCode(), memory.GetHashCode());
}
}
}
| // Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
using Xunit;
namespace System.MemoryTests
{
public static partial class MemoryTests
{
[Fact]
public static void SameObjectsHaveSameHashCodes()
{
int[] a = { 91, 92, 93, 94, 95 };
var left = new Memory<int>(a, 2, 3);
var right = new Memory<int>(a, 2, 3);
int[] b = { 1, 2, 3, 4, 5 };
var different = new Memory<int>(b, 2, 3);
Assert.Equal(left.GetHashCode(), right.GetHashCode());
Assert.NotEqual(left.GetHashCode(), different.GetHashCode());
}
[Fact]
public static void HashCodeIncludesLength()
{
int[] a = { 91, 92, 93, 94, 95 };
var left = new Memory<int>(a, 2, 1);
var right = new Memory<int>(a, 2, 3);
Assert.NotEqual(left.GetHashCode(), right.GetHashCode());
}
[Fact]
public static void HashCodeIncludesBase()
{
int[] a = { 91, 92, 93, 94, 95 };
var left = new Memory<int>(a, 1, 3);
var right = new Memory<int>(a, 2, 3);
Assert.NotEqual(left.GetHashCode(), right.GetHashCode());
}
[Fact]
public static void HashCodesDifferentForSameContent()
{
var left = new Memory<int>(new int[] { 0, 1, 2 }, 1, 1);
var right = new Memory<int>(new int[] { 0, 1, 2 }, 1, 1);
Assert.NotEqual(left.GetHashCode(), right.GetHashCode());
}
[Fact]
public static void EmptyMemoryHashCodeNotUnified()
{
var left = new Memory<int>(new int[0]);
var right = new Memory<int>(new int[0]);
Memory<int> memoryFromNonEmptyArrayButWithZeroLength = new Memory<int>(new int[1] { 123 }).Slice(0, 0);
Assert.NotEqual(left.GetHashCode(), right.GetHashCode());
Assert.NotEqual(left.GetHashCode(), memoryFromNonEmptyArrayButWithZeroLength.GetHashCode());
Assert.NotEqual(right.GetHashCode(), memoryFromNonEmptyArrayButWithZeroLength.GetHashCode());
// Empty property hashcode is equal
left = Memory<int>.Empty;
right = Memory<int>.Empty;
Assert.Equal(left.GetHashCode(), right.GetHashCode());
}
[Fact]
public static void HashCodesForImplicitCastsAreEqual()
{
byte[] bytes = new byte[10];
var memory = new Memory<byte>(bytes);
var readOnlyMemory = new ReadOnlyMemory<byte>(bytes);
ReadOnlyMemory<byte> implicitReadOnlyMemory = memory;
Memory<byte> implicitMemoryArray = bytes;
ReadOnlyMemory<byte> implicitReadOnlyMemoryArray = bytes;
Assert.Equal(readOnlyMemory.GetHashCode(), memory.GetHashCode());
Assert.Equal(implicitReadOnlyMemory.GetHashCode(), memory.GetHashCode());
Assert.Equal(implicitMemoryArray.GetHashCode(), memory.GetHashCode());
Assert.Equal(implicitReadOnlyMemoryArray.GetHashCode(), memory.GetHashCode());
}
[Fact]
public static void DefaultMemoryHashCode()
{
Memory<int> memory = default;
Assert.Equal(0, memory.GetHashCode());
Memory<int> memory2 = default;
Assert.Equal(memory2.GetHashCode(), memory.GetHashCode());
}
}
}
| -1 |
dotnet/runtime | 66,250 | use the name of the header when throwing exception for invalid chars | Fixes #65136 | pedrobsaila | 2022-03-05T20:18:11Z | 2022-03-08T16:15:18Z | 8fe2d2b0bbb7bec841c97372528531c071b29c16 | fa5dcda23c681031b5d2bd68482702baef80b836 | use the name of the header when throwing exception for invalid chars. Fixes #65136 | ./src/coreclr/tools/Common/TypeSystem/Interop/IL/NativeStructType.Sorting.cs | // Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
namespace Internal.TypeSystem.Interop
{
// Functionality related to determinstic ordering of types
partial class NativeStructType
{
protected override int ClassCode => -377751537;
protected override int CompareToImpl(TypeDesc other, TypeSystemComparer comparer)
{
return comparer.Compare(ManagedStructType, ((NativeStructType)other).ManagedStructType);
}
partial class NativeStructField
{
protected override int ClassCode => 1580219745;
protected override int CompareToImpl(FieldDesc other, TypeSystemComparer comparer)
{
return comparer.Compare(_managedField, ((NativeStructField)other)._managedField);
}
}
}
}
| // Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
namespace Internal.TypeSystem.Interop
{
// Functionality related to determinstic ordering of types
partial class NativeStructType
{
protected override int ClassCode => -377751537;
protected override int CompareToImpl(TypeDesc other, TypeSystemComparer comparer)
{
return comparer.Compare(ManagedStructType, ((NativeStructType)other).ManagedStructType);
}
partial class NativeStructField
{
protected override int ClassCode => 1580219745;
protected override int CompareToImpl(FieldDesc other, TypeSystemComparer comparer)
{
return comparer.Compare(_managedField, ((NativeStructField)other)._managedField);
}
}
}
}
| -1 |
dotnet/runtime | 66,250 | use the name of the header when throwing exception for invalid chars | Fixes #65136 | pedrobsaila | 2022-03-05T20:18:11Z | 2022-03-08T16:15:18Z | 8fe2d2b0bbb7bec841c97372528531c071b29c16 | fa5dcda23c681031b5d2bd68482702baef80b836 | use the name of the header when throwing exception for invalid chars. Fixes #65136 | ./src/tests/JIT/HardwareIntrinsics/General/Vector256/ConvertToDouble.UInt64.cs | // Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
/******************************************************************************
* This file is auto-generated from a template file by the GenerateTests.csx *
* script in tests\src\JIT\HardwareIntrinsics\X86\Shared. In order to make *
* changes, please update the corresponding template and run according to the *
* directions listed in the file. *
******************************************************************************/
using System;
using System.Runtime.CompilerServices;
using System.Runtime.InteropServices;
using System.Runtime.Intrinsics;
namespace JIT.HardwareIntrinsics.General
{
public static partial class Program
{
private static void ConvertToDoubleUInt64()
{
var test = new VectorUnaryOpTest__ConvertToDoubleUInt64();
// Validates basic functionality works, using Unsafe.Read
test.RunBasicScenario_UnsafeRead();
// Validates calling via reflection works, using Unsafe.Read
test.RunReflectionScenario_UnsafeRead();
// Validates passing a static member works
test.RunClsVarScenario();
// Validates passing a local works, using Unsafe.Read
test.RunLclVarScenario_UnsafeRead();
// Validates passing the field of a local class works
test.RunClassLclFldScenario();
// Validates passing an instance member of a class works
test.RunClassFldScenario();
// Validates passing the field of a local struct works
test.RunStructLclFldScenario();
// Validates passing an instance member of a struct works
test.RunStructFldScenario();
if (!test.Succeeded)
{
throw new Exception("One or more scenarios did not complete as expected.");
}
}
}
public sealed unsafe class VectorUnaryOpTest__ConvertToDoubleUInt64
{
private struct DataTable
{
private byte[] inArray1;
private byte[] outArray;
private GCHandle inHandle1;
private GCHandle outHandle;
private ulong alignment;
public DataTable(UInt64[] inArray1, Double[] outArray, int alignment)
{
int sizeOfinArray1 = inArray1.Length * Unsafe.SizeOf<UInt64>();
int sizeOfoutArray = outArray.Length * Unsafe.SizeOf<Double>();
if ((alignment != 32 && alignment != 16 && alignment != 8) || (alignment * 2) < sizeOfinArray1 || (alignment * 2) < sizeOfoutArray)
{
throw new ArgumentException("Invalid value of alignment");
}
this.inArray1 = new byte[alignment * 2];
this.outArray = new byte[alignment * 2];
this.inHandle1 = GCHandle.Alloc(this.inArray1, GCHandleType.Pinned);
this.outHandle = GCHandle.Alloc(this.outArray, GCHandleType.Pinned);
this.alignment = (ulong)alignment;
Unsafe.CopyBlockUnaligned(ref Unsafe.AsRef<byte>(inArray1Ptr), ref Unsafe.As<UInt64, byte>(ref inArray1[0]), (uint)sizeOfinArray1);
}
public void* inArray1Ptr => Align((byte*)(inHandle1.AddrOfPinnedObject().ToPointer()), alignment);
public void* outArrayPtr => Align((byte*)(outHandle.AddrOfPinnedObject().ToPointer()), alignment);
public void Dispose()
{
inHandle1.Free();
outHandle.Free();
}
private static unsafe void* Align(byte* buffer, ulong expectedAlignment)
{
return (void*)(((ulong)buffer + expectedAlignment - 1) & ~(expectedAlignment - 1));
}
}
private struct TestStruct
{
public Vector256<UInt64> _fld1;
public static TestStruct Create()
{
var testStruct = new TestStruct();
for (var i = 0; i < Op1ElementCount; i++) { _data1[i] = Math.Min(long.MaxValue, TestLibrary.Generator.GetUInt64()); }
Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector256<UInt64>, byte>(ref testStruct._fld1), ref Unsafe.As<UInt64, byte>(ref _data1[0]), (uint)Unsafe.SizeOf<Vector256<UInt64>>());
return testStruct;
}
public void RunStructFldScenario(VectorUnaryOpTest__ConvertToDoubleUInt64 testClass)
{
var result = Vector256.ConvertToDouble(_fld1);
Unsafe.Write(testClass._dataTable.outArrayPtr, result);
testClass.ValidateResult(_fld1, testClass._dataTable.outArrayPtr);
}
}
private static readonly int LargestVectorSize = 32;
private static readonly int Op1ElementCount = Unsafe.SizeOf<Vector256<UInt64>>() / sizeof(UInt64);
private static readonly int RetElementCount = Unsafe.SizeOf<Vector256<Double>>() / sizeof(Double);
private static UInt64[] _data1 = new UInt64[Op1ElementCount];
private static Vector256<UInt64> _clsVar1;
private Vector256<UInt64> _fld1;
private DataTable _dataTable;
static VectorUnaryOpTest__ConvertToDoubleUInt64()
{
for (var i = 0; i < Op1ElementCount; i++) { _data1[i] = Math.Min(long.MaxValue, TestLibrary.Generator.GetUInt64()); }
Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector256<UInt64>, byte>(ref _clsVar1), ref Unsafe.As<UInt64, byte>(ref _data1[0]), (uint)Unsafe.SizeOf<Vector256<UInt64>>());
}
public VectorUnaryOpTest__ConvertToDoubleUInt64()
{
Succeeded = true;
for (var i = 0; i < Op1ElementCount; i++) { _data1[i] = Math.Min(long.MaxValue, TestLibrary.Generator.GetUInt64()); }
Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector256<UInt64>, byte>(ref _fld1), ref Unsafe.As<UInt64, byte>(ref _data1[0]), (uint)Unsafe.SizeOf<Vector256<UInt64>>());
for (var i = 0; i < Op1ElementCount; i++) { _data1[i] = Math.Min(long.MaxValue, TestLibrary.Generator.GetUInt64()); }
_dataTable = new DataTable(_data1, new Double[RetElementCount], LargestVectorSize);
}
public bool Succeeded { get; set; }
public void RunBasicScenario_UnsafeRead()
{
TestLibrary.TestFramework.BeginScenario(nameof(RunBasicScenario_UnsafeRead));
var result = Vector256.ConvertToDouble(
Unsafe.Read<Vector256<UInt64>>(_dataTable.inArray1Ptr)
);
Unsafe.Write(_dataTable.outArrayPtr, result);
ValidateResult(_dataTable.inArray1Ptr, _dataTable.outArrayPtr);
}
public void RunReflectionScenario_UnsafeRead()
{
TestLibrary.TestFramework.BeginScenario(nameof(RunReflectionScenario_UnsafeRead));
var method = typeof(Vector256).GetMethod(nameof(Vector256.ConvertToDouble), new Type[] {
typeof(Vector256<UInt64>)
});
if (method is null)
{
method = typeof(Vector256).GetMethod(nameof(Vector256.ConvertToDouble), 1, new Type[] {
typeof(Vector256<>).MakeGenericType(Type.MakeGenericMethodParameter(0))
});
}
if (method.IsGenericMethodDefinition)
{
method = method.MakeGenericMethod(typeof(Double));
}
var result = method.Invoke(null, new object[] {
Unsafe.Read<Vector256<UInt64>>(_dataTable.inArray1Ptr)
});
Unsafe.Write(_dataTable.outArrayPtr, (Vector256<Double>)(result));
ValidateResult(_dataTable.inArray1Ptr, _dataTable.outArrayPtr);
}
public void RunClsVarScenario()
{
TestLibrary.TestFramework.BeginScenario(nameof(RunClsVarScenario));
var result = Vector256.ConvertToDouble(
_clsVar1
);
Unsafe.Write(_dataTable.outArrayPtr, result);
ValidateResult(_clsVar1, _dataTable.outArrayPtr);
}
public void RunLclVarScenario_UnsafeRead()
{
TestLibrary.TestFramework.BeginScenario(nameof(RunLclVarScenario_UnsafeRead));
var op1 = Unsafe.Read<Vector256<UInt64>>(_dataTable.inArray1Ptr);
var result = Vector256.ConvertToDouble(op1);
Unsafe.Write(_dataTable.outArrayPtr, result);
ValidateResult(op1, _dataTable.outArrayPtr);
}
public void RunClassLclFldScenario()
{
TestLibrary.TestFramework.BeginScenario(nameof(RunClassLclFldScenario));
var test = new VectorUnaryOpTest__ConvertToDoubleUInt64();
var result = Vector256.ConvertToDouble(test._fld1);
Unsafe.Write(_dataTable.outArrayPtr, result);
ValidateResult(test._fld1, _dataTable.outArrayPtr);
}
public void RunClassFldScenario()
{
TestLibrary.TestFramework.BeginScenario(nameof(RunClassFldScenario));
var result = Vector256.ConvertToDouble(_fld1);
Unsafe.Write(_dataTable.outArrayPtr, result);
ValidateResult(_fld1, _dataTable.outArrayPtr);
}
public void RunStructLclFldScenario()
{
TestLibrary.TestFramework.BeginScenario(nameof(RunStructLclFldScenario));
var test = TestStruct.Create();
var result = Vector256.ConvertToDouble(test._fld1);
Unsafe.Write(_dataTable.outArrayPtr, result);
ValidateResult(test._fld1, _dataTable.outArrayPtr);
}
public void RunStructFldScenario()
{
TestLibrary.TestFramework.BeginScenario(nameof(RunStructFldScenario));
var test = TestStruct.Create();
test.RunStructFldScenario(this);
}
private void ValidateResult(Vector256<UInt64> op1, void* result, [CallerMemberName] string method = "")
{
UInt64[] inArray1 = new UInt64[Op1ElementCount];
Double[] outArray = new Double[RetElementCount];
Unsafe.WriteUnaligned(ref Unsafe.As<UInt64, byte>(ref inArray1[0]), op1);
Unsafe.CopyBlockUnaligned(ref Unsafe.As<Double, byte>(ref outArray[0]), ref Unsafe.AsRef<byte>(result), (uint)Unsafe.SizeOf<Vector256<Double>>());
ValidateResult(inArray1, outArray, method);
}
private void ValidateResult(void* op1, void* result, [CallerMemberName] string method = "")
{
UInt64[] inArray1 = new UInt64[Op1ElementCount];
Double[] outArray = new Double[RetElementCount];
Unsafe.CopyBlockUnaligned(ref Unsafe.As<UInt64, byte>(ref inArray1[0]), ref Unsafe.AsRef<byte>(op1), (uint)Unsafe.SizeOf<Vector256<UInt64>>());
Unsafe.CopyBlockUnaligned(ref Unsafe.As<Double, byte>(ref outArray[0]), ref Unsafe.AsRef<byte>(result), (uint)Unsafe.SizeOf<Vector256<Double>>());
ValidateResult(inArray1, outArray, method);
}
private void ValidateResult(UInt64[] firstOp, Double[] result, [CallerMemberName] string method = "")
{
bool succeeded = true;
if (result[0] != (double)(firstOp[0]))
{
succeeded = false;
}
else
{
for (var i = 1; i < RetElementCount; i++)
{
if (result[i] != (double)(firstOp[i]))
{
succeeded = false;
break;
}
}
}
if (!succeeded)
{
TestLibrary.TestFramework.LogInformation($"{nameof(Vector256)}.{nameof(Vector256.ConvertToDouble)}<Double>(Vector256<UInt64>): {method} failed:");
TestLibrary.TestFramework.LogInformation($" firstOp: ({string.Join(", ", firstOp)})");
TestLibrary.TestFramework.LogInformation($" result: ({string.Join(", ", result)})");
TestLibrary.TestFramework.LogInformation(string.Empty);
Succeeded = false;
}
}
}
}
| // Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
/******************************************************************************
* This file is auto-generated from a template file by the GenerateTests.csx *
* script in tests\src\JIT\HardwareIntrinsics\X86\Shared. In order to make *
* changes, please update the corresponding template and run according to the *
* directions listed in the file. *
******************************************************************************/
using System;
using System.Runtime.CompilerServices;
using System.Runtime.InteropServices;
using System.Runtime.Intrinsics;
namespace JIT.HardwareIntrinsics.General
{
public static partial class Program
{
private static void ConvertToDoubleUInt64()
{
var test = new VectorUnaryOpTest__ConvertToDoubleUInt64();
// Validates basic functionality works, using Unsafe.Read
test.RunBasicScenario_UnsafeRead();
// Validates calling via reflection works, using Unsafe.Read
test.RunReflectionScenario_UnsafeRead();
// Validates passing a static member works
test.RunClsVarScenario();
// Validates passing a local works, using Unsafe.Read
test.RunLclVarScenario_UnsafeRead();
// Validates passing the field of a local class works
test.RunClassLclFldScenario();
// Validates passing an instance member of a class works
test.RunClassFldScenario();
// Validates passing the field of a local struct works
test.RunStructLclFldScenario();
// Validates passing an instance member of a struct works
test.RunStructFldScenario();
if (!test.Succeeded)
{
throw new Exception("One or more scenarios did not complete as expected.");
}
}
}
public sealed unsafe class VectorUnaryOpTest__ConvertToDoubleUInt64
{
private struct DataTable
{
private byte[] inArray1;
private byte[] outArray;
private GCHandle inHandle1;
private GCHandle outHandle;
private ulong alignment;
public DataTable(UInt64[] inArray1, Double[] outArray, int alignment)
{
int sizeOfinArray1 = inArray1.Length * Unsafe.SizeOf<UInt64>();
int sizeOfoutArray = outArray.Length * Unsafe.SizeOf<Double>();
if ((alignment != 32 && alignment != 16 && alignment != 8) || (alignment * 2) < sizeOfinArray1 || (alignment * 2) < sizeOfoutArray)
{
throw new ArgumentException("Invalid value of alignment");
}
this.inArray1 = new byte[alignment * 2];
this.outArray = new byte[alignment * 2];
this.inHandle1 = GCHandle.Alloc(this.inArray1, GCHandleType.Pinned);
this.outHandle = GCHandle.Alloc(this.outArray, GCHandleType.Pinned);
this.alignment = (ulong)alignment;
Unsafe.CopyBlockUnaligned(ref Unsafe.AsRef<byte>(inArray1Ptr), ref Unsafe.As<UInt64, byte>(ref inArray1[0]), (uint)sizeOfinArray1);
}
public void* inArray1Ptr => Align((byte*)(inHandle1.AddrOfPinnedObject().ToPointer()), alignment);
public void* outArrayPtr => Align((byte*)(outHandle.AddrOfPinnedObject().ToPointer()), alignment);
public void Dispose()
{
inHandle1.Free();
outHandle.Free();
}
private static unsafe void* Align(byte* buffer, ulong expectedAlignment)
{
return (void*)(((ulong)buffer + expectedAlignment - 1) & ~(expectedAlignment - 1));
}
}
private struct TestStruct
{
public Vector256<UInt64> _fld1;
public static TestStruct Create()
{
var testStruct = new TestStruct();
for (var i = 0; i < Op1ElementCount; i++) { _data1[i] = Math.Min(long.MaxValue, TestLibrary.Generator.GetUInt64()); }
Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector256<UInt64>, byte>(ref testStruct._fld1), ref Unsafe.As<UInt64, byte>(ref _data1[0]), (uint)Unsafe.SizeOf<Vector256<UInt64>>());
return testStruct;
}
public void RunStructFldScenario(VectorUnaryOpTest__ConvertToDoubleUInt64 testClass)
{
var result = Vector256.ConvertToDouble(_fld1);
Unsafe.Write(testClass._dataTable.outArrayPtr, result);
testClass.ValidateResult(_fld1, testClass._dataTable.outArrayPtr);
}
}
private static readonly int LargestVectorSize = 32;
private static readonly int Op1ElementCount = Unsafe.SizeOf<Vector256<UInt64>>() / sizeof(UInt64);
private static readonly int RetElementCount = Unsafe.SizeOf<Vector256<Double>>() / sizeof(Double);
private static UInt64[] _data1 = new UInt64[Op1ElementCount];
private static Vector256<UInt64> _clsVar1;
private Vector256<UInt64> _fld1;
private DataTable _dataTable;
static VectorUnaryOpTest__ConvertToDoubleUInt64()
{
for (var i = 0; i < Op1ElementCount; i++) { _data1[i] = Math.Min(long.MaxValue, TestLibrary.Generator.GetUInt64()); }
Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector256<UInt64>, byte>(ref _clsVar1), ref Unsafe.As<UInt64, byte>(ref _data1[0]), (uint)Unsafe.SizeOf<Vector256<UInt64>>());
}
public VectorUnaryOpTest__ConvertToDoubleUInt64()
{
Succeeded = true;
for (var i = 0; i < Op1ElementCount; i++) { _data1[i] = Math.Min(long.MaxValue, TestLibrary.Generator.GetUInt64()); }
Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector256<UInt64>, byte>(ref _fld1), ref Unsafe.As<UInt64, byte>(ref _data1[0]), (uint)Unsafe.SizeOf<Vector256<UInt64>>());
for (var i = 0; i < Op1ElementCount; i++) { _data1[i] = Math.Min(long.MaxValue, TestLibrary.Generator.GetUInt64()); }
_dataTable = new DataTable(_data1, new Double[RetElementCount], LargestVectorSize);
}
public bool Succeeded { get; set; }
public void RunBasicScenario_UnsafeRead()
{
TestLibrary.TestFramework.BeginScenario(nameof(RunBasicScenario_UnsafeRead));
var result = Vector256.ConvertToDouble(
Unsafe.Read<Vector256<UInt64>>(_dataTable.inArray1Ptr)
);
Unsafe.Write(_dataTable.outArrayPtr, result);
ValidateResult(_dataTable.inArray1Ptr, _dataTable.outArrayPtr);
}
public void RunReflectionScenario_UnsafeRead()
{
TestLibrary.TestFramework.BeginScenario(nameof(RunReflectionScenario_UnsafeRead));
var method = typeof(Vector256).GetMethod(nameof(Vector256.ConvertToDouble), new Type[] {
typeof(Vector256<UInt64>)
});
if (method is null)
{
method = typeof(Vector256).GetMethod(nameof(Vector256.ConvertToDouble), 1, new Type[] {
typeof(Vector256<>).MakeGenericType(Type.MakeGenericMethodParameter(0))
});
}
if (method.IsGenericMethodDefinition)
{
method = method.MakeGenericMethod(typeof(Double));
}
var result = method.Invoke(null, new object[] {
Unsafe.Read<Vector256<UInt64>>(_dataTable.inArray1Ptr)
});
Unsafe.Write(_dataTable.outArrayPtr, (Vector256<Double>)(result));
ValidateResult(_dataTable.inArray1Ptr, _dataTable.outArrayPtr);
}
public void RunClsVarScenario()
{
TestLibrary.TestFramework.BeginScenario(nameof(RunClsVarScenario));
var result = Vector256.ConvertToDouble(
_clsVar1
);
Unsafe.Write(_dataTable.outArrayPtr, result);
ValidateResult(_clsVar1, _dataTable.outArrayPtr);
}
public void RunLclVarScenario_UnsafeRead()
{
TestLibrary.TestFramework.BeginScenario(nameof(RunLclVarScenario_UnsafeRead));
var op1 = Unsafe.Read<Vector256<UInt64>>(_dataTable.inArray1Ptr);
var result = Vector256.ConvertToDouble(op1);
Unsafe.Write(_dataTable.outArrayPtr, result);
ValidateResult(op1, _dataTable.outArrayPtr);
}
public void RunClassLclFldScenario()
{
TestLibrary.TestFramework.BeginScenario(nameof(RunClassLclFldScenario));
var test = new VectorUnaryOpTest__ConvertToDoubleUInt64();
var result = Vector256.ConvertToDouble(test._fld1);
Unsafe.Write(_dataTable.outArrayPtr, result);
ValidateResult(test._fld1, _dataTable.outArrayPtr);
}
public void RunClassFldScenario()
{
TestLibrary.TestFramework.BeginScenario(nameof(RunClassFldScenario));
var result = Vector256.ConvertToDouble(_fld1);
Unsafe.Write(_dataTable.outArrayPtr, result);
ValidateResult(_fld1, _dataTable.outArrayPtr);
}
public void RunStructLclFldScenario()
{
TestLibrary.TestFramework.BeginScenario(nameof(RunStructLclFldScenario));
var test = TestStruct.Create();
var result = Vector256.ConvertToDouble(test._fld1);
Unsafe.Write(_dataTable.outArrayPtr, result);
ValidateResult(test._fld1, _dataTable.outArrayPtr);
}
public void RunStructFldScenario()
{
TestLibrary.TestFramework.BeginScenario(nameof(RunStructFldScenario));
var test = TestStruct.Create();
test.RunStructFldScenario(this);
}
private void ValidateResult(Vector256<UInt64> op1, void* result, [CallerMemberName] string method = "")
{
UInt64[] inArray1 = new UInt64[Op1ElementCount];
Double[] outArray = new Double[RetElementCount];
Unsafe.WriteUnaligned(ref Unsafe.As<UInt64, byte>(ref inArray1[0]), op1);
Unsafe.CopyBlockUnaligned(ref Unsafe.As<Double, byte>(ref outArray[0]), ref Unsafe.AsRef<byte>(result), (uint)Unsafe.SizeOf<Vector256<Double>>());
ValidateResult(inArray1, outArray, method);
}
private void ValidateResult(void* op1, void* result, [CallerMemberName] string method = "")
{
UInt64[] inArray1 = new UInt64[Op1ElementCount];
Double[] outArray = new Double[RetElementCount];
Unsafe.CopyBlockUnaligned(ref Unsafe.As<UInt64, byte>(ref inArray1[0]), ref Unsafe.AsRef<byte>(op1), (uint)Unsafe.SizeOf<Vector256<UInt64>>());
Unsafe.CopyBlockUnaligned(ref Unsafe.As<Double, byte>(ref outArray[0]), ref Unsafe.AsRef<byte>(result), (uint)Unsafe.SizeOf<Vector256<Double>>());
ValidateResult(inArray1, outArray, method);
}
private void ValidateResult(UInt64[] firstOp, Double[] result, [CallerMemberName] string method = "")
{
bool succeeded = true;
if (result[0] != (double)(firstOp[0]))
{
succeeded = false;
}
else
{
for (var i = 1; i < RetElementCount; i++)
{
if (result[i] != (double)(firstOp[i]))
{
succeeded = false;
break;
}
}
}
if (!succeeded)
{
TestLibrary.TestFramework.LogInformation($"{nameof(Vector256)}.{nameof(Vector256.ConvertToDouble)}<Double>(Vector256<UInt64>): {method} failed:");
TestLibrary.TestFramework.LogInformation($" firstOp: ({string.Join(", ", firstOp)})");
TestLibrary.TestFramework.LogInformation($" result: ({string.Join(", ", result)})");
TestLibrary.TestFramework.LogInformation(string.Empty);
Succeeded = false;
}
}
}
}
| -1 |
dotnet/runtime | 66,250 | use the name of the header when throwing exception for invalid chars | Fixes #65136 | pedrobsaila | 2022-03-05T20:18:11Z | 2022-03-08T16:15:18Z | 8fe2d2b0bbb7bec841c97372528531c071b29c16 | fa5dcda23c681031b5d2bd68482702baef80b836 | use the name of the header when throwing exception for invalid chars. Fixes #65136 | ./src/libraries/System.Speech/src/Recognition/UpdateEventArgs.cs | // Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
namespace System.Speech.Recognition
{
// Event args used in the RecognizerUpdateReached event, which is raised after a call is made to RequestRecognizerUpdate.
public class RecognizerUpdateReachedEventArgs : EventArgs
{
#region Constructors
internal RecognizerUpdateReachedEventArgs(object userToken, TimeSpan audioPosition)
{
_userToken = userToken;
_audioPosition = audioPosition;
}
#endregion
#region Public Properties
// Application supplied object reference.
public object UserToken
{
get { return _userToken; }
}
public TimeSpan AudioPosition
{
get { return _audioPosition; }
}
#endregion
#region Private Fields
private object _userToken;
private TimeSpan _audioPosition;
#endregion
}
}
| // Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
namespace System.Speech.Recognition
{
// Event args used in the RecognizerUpdateReached event, which is raised after a call is made to RequestRecognizerUpdate.
public class RecognizerUpdateReachedEventArgs : EventArgs
{
#region Constructors
internal RecognizerUpdateReachedEventArgs(object userToken, TimeSpan audioPosition)
{
_userToken = userToken;
_audioPosition = audioPosition;
}
#endregion
#region Public Properties
// Application supplied object reference.
public object UserToken
{
get { return _userToken; }
}
public TimeSpan AudioPosition
{
get { return _audioPosition; }
}
#endregion
#region Private Fields
private object _userToken;
private TimeSpan _audioPosition;
#endregion
}
}
| -1 |
dotnet/runtime | 66,250 | use the name of the header when throwing exception for invalid chars | Fixes #65136 | pedrobsaila | 2022-03-05T20:18:11Z | 2022-03-08T16:15:18Z | 8fe2d2b0bbb7bec841c97372528531c071b29c16 | fa5dcda23c681031b5d2bd68482702baef80b836 | use the name of the header when throwing exception for invalid chars. Fixes #65136 | ./src/tests/JIT/HardwareIntrinsics/General/Vector128/LessThanAny.Int32.cs | // Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
/******************************************************************************
* This file is auto-generated from a template file by the GenerateTests.csx *
* script in tests\src\JIT\HardwareIntrinsics\X86\Shared. In order to make *
* changes, please update the corresponding template and run according to the *
* directions listed in the file. *
******************************************************************************/
using System;
using System.Runtime.CompilerServices;
using System.Runtime.InteropServices;
using System.Runtime.Intrinsics;
namespace JIT.HardwareIntrinsics.General
{
public static partial class Program
{
private static void LessThanAnyInt32()
{
var test = new VectorBooleanBinaryOpTest__LessThanAnyInt32();
// Validates basic functionality works, using Unsafe.Read
test.RunBasicScenario_UnsafeRead();
// Validates calling via reflection works, using Unsafe.Read
test.RunReflectionScenario_UnsafeRead();
// Validates passing a static member works
test.RunClsVarScenario();
// Validates passing a local works, using Unsafe.Read
test.RunLclVarScenario_UnsafeRead();
// Validates passing the field of a local class works
test.RunClassLclFldScenario();
// Validates passing an instance member of a class works
test.RunClassFldScenario();
// Validates passing the field of a local struct works
test.RunStructLclFldScenario();
// Validates passing an instance member of a struct works
test.RunStructFldScenario();
if (!test.Succeeded)
{
throw new Exception("One or more scenarios did not complete as expected.");
}
}
}
public sealed unsafe class VectorBooleanBinaryOpTest__LessThanAnyInt32
{
private struct DataTable
{
private byte[] inArray1;
private byte[] inArray2;
private GCHandle inHandle1;
private GCHandle inHandle2;
private ulong alignment;
public DataTable(Int32[] inArray1, Int32[] inArray2, int alignment)
{
int sizeOfinArray1 = inArray1.Length * Unsafe.SizeOf<Int32>();
int sizeOfinArray2 = inArray2.Length * Unsafe.SizeOf<Int32>();
if ((alignment != 32 && alignment != 16 && alignment != 8) || (alignment * 2) < sizeOfinArray1 || (alignment * 2) < sizeOfinArray2)
{
throw new ArgumentException("Invalid value of alignment");
}
this.inArray1 = new byte[alignment * 2];
this.inArray2 = new byte[alignment * 2];
this.inHandle1 = GCHandle.Alloc(this.inArray1, GCHandleType.Pinned);
this.inHandle2 = GCHandle.Alloc(this.inArray2, GCHandleType.Pinned);
this.alignment = (ulong)alignment;
Unsafe.CopyBlockUnaligned(ref Unsafe.AsRef<byte>(inArray1Ptr), ref Unsafe.As<Int32, byte>(ref inArray1[0]), (uint)sizeOfinArray1);
Unsafe.CopyBlockUnaligned(ref Unsafe.AsRef<byte>(inArray2Ptr), ref Unsafe.As<Int32, byte>(ref inArray2[0]), (uint)sizeOfinArray2);
}
public void* inArray1Ptr => Align((byte*)(inHandle1.AddrOfPinnedObject().ToPointer()), alignment);
public void* inArray2Ptr => Align((byte*)(inHandle2.AddrOfPinnedObject().ToPointer()), alignment);
public void Dispose()
{
inHandle1.Free();
inHandle2.Free();
}
private static unsafe void* Align(byte* buffer, ulong expectedAlignment)
{
return (void*)(((ulong)buffer + expectedAlignment - 1) & ~(expectedAlignment - 1));
}
}
private struct TestStruct
{
public Vector128<Int32> _fld1;
public Vector128<Int32> _fld2;
public static TestStruct Create()
{
var testStruct = new TestStruct();
for (var i = 0; i < Op1ElementCount; i++) { _data1[i] = TestLibrary.Generator.GetInt32(); }
Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector128<Int32>, byte>(ref testStruct._fld1), ref Unsafe.As<Int32, byte>(ref _data1[0]), (uint)Unsafe.SizeOf<Vector128<Int32>>());
for (var i = 0; i < Op2ElementCount; i++) { _data2[i] = TestLibrary.Generator.GetInt32(); }
Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector128<Int32>, byte>(ref testStruct._fld2), ref Unsafe.As<Int32, byte>(ref _data2[0]), (uint)Unsafe.SizeOf<Vector128<Int32>>());
return testStruct;
}
public void RunStructFldScenario(VectorBooleanBinaryOpTest__LessThanAnyInt32 testClass)
{
var result = Vector128.LessThanAny(_fld1, _fld2);
testClass.ValidateResult(_fld1, _fld2, result);
}
}
private static readonly int LargestVectorSize = 16;
private static readonly int Op1ElementCount = Unsafe.SizeOf<Vector128<Int32>>() / sizeof(Int32);
private static readonly int Op2ElementCount = Unsafe.SizeOf<Vector128<Int32>>() / sizeof(Int32);
private static Int32[] _data1 = new Int32[Op1ElementCount];
private static Int32[] _data2 = new Int32[Op2ElementCount];
private static Vector128<Int32> _clsVar1;
private static Vector128<Int32> _clsVar2;
private Vector128<Int32> _fld1;
private Vector128<Int32> _fld2;
private DataTable _dataTable;
static VectorBooleanBinaryOpTest__LessThanAnyInt32()
{
for (var i = 0; i < Op1ElementCount; i++) { _data1[i] = TestLibrary.Generator.GetInt32(); }
Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector128<Int32>, byte>(ref _clsVar1), ref Unsafe.As<Int32, byte>(ref _data1[0]), (uint)Unsafe.SizeOf<Vector128<Int32>>());
for (var i = 0; i < Op2ElementCount; i++) { _data2[i] = TestLibrary.Generator.GetInt32(); }
Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector128<Int32>, byte>(ref _clsVar2), ref Unsafe.As<Int32, byte>(ref _data2[0]), (uint)Unsafe.SizeOf<Vector128<Int32>>());
}
public VectorBooleanBinaryOpTest__LessThanAnyInt32()
{
Succeeded = true;
for (var i = 0; i < Op1ElementCount; i++) { _data1[i] = TestLibrary.Generator.GetInt32(); }
Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector128<Int32>, byte>(ref _fld1), ref Unsafe.As<Int32, byte>(ref _data1[0]), (uint)Unsafe.SizeOf<Vector128<Int32>>());
for (var i = 0; i < Op2ElementCount; i++) { _data2[i] = TestLibrary.Generator.GetInt32(); }
Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector128<Int32>, byte>(ref _fld2), ref Unsafe.As<Int32, byte>(ref _data2[0]), (uint)Unsafe.SizeOf<Vector128<Int32>>());
for (var i = 0; i < Op1ElementCount; i++) { _data1[i] = TestLibrary.Generator.GetInt32(); }
for (var i = 0; i < Op2ElementCount; i++) { _data2[i] = TestLibrary.Generator.GetInt32(); }
_dataTable = new DataTable(_data1, _data2, LargestVectorSize);
}
public bool Succeeded { get; set; }
public void RunBasicScenario_UnsafeRead()
{
TestLibrary.TestFramework.BeginScenario(nameof(RunBasicScenario_UnsafeRead));
var result = Vector128.LessThanAny(
Unsafe.Read<Vector128<Int32>>(_dataTable.inArray1Ptr),
Unsafe.Read<Vector128<Int32>>(_dataTable.inArray2Ptr)
);
ValidateResult(_dataTable.inArray1Ptr, _dataTable.inArray2Ptr, result);
}
public void RunReflectionScenario_UnsafeRead()
{
TestLibrary.TestFramework.BeginScenario(nameof(RunReflectionScenario_UnsafeRead));
var method = typeof(Vector128).GetMethod(nameof(Vector128.LessThanAny), new Type[] {
typeof(Vector128<Int32>),
typeof(Vector128<Int32>)
});
if (method is null)
{
method = typeof(Vector128).GetMethod(nameof(Vector128.LessThanAny), 1, new Type[] {
typeof(Vector128<>).MakeGenericType(Type.MakeGenericMethodParameter(0)),
typeof(Vector128<>).MakeGenericType(Type.MakeGenericMethodParameter(0))
});
}
if (method.IsGenericMethodDefinition)
{
method = method.MakeGenericMethod(typeof(Int32));
}
var result = method.Invoke(null, new object[] {
Unsafe.Read<Vector128<Int32>>(_dataTable.inArray1Ptr),
Unsafe.Read<Vector128<Int32>>(_dataTable.inArray2Ptr)
});
ValidateResult(_dataTable.inArray1Ptr, _dataTable.inArray2Ptr, (bool)(result));
}
public void RunClsVarScenario()
{
TestLibrary.TestFramework.BeginScenario(nameof(RunClsVarScenario));
var result = Vector128.LessThanAny(
_clsVar1,
_clsVar2
);
ValidateResult(_clsVar1, _clsVar2, result);
}
public void RunLclVarScenario_UnsafeRead()
{
TestLibrary.TestFramework.BeginScenario(nameof(RunLclVarScenario_UnsafeRead));
var op1 = Unsafe.Read<Vector128<Int32>>(_dataTable.inArray1Ptr);
var op2 = Unsafe.Read<Vector128<Int32>>(_dataTable.inArray2Ptr);
var result = Vector128.LessThanAny(op1, op2);
ValidateResult(op1, op2, result);
}
public void RunClassLclFldScenario()
{
TestLibrary.TestFramework.BeginScenario(nameof(RunClassLclFldScenario));
var test = new VectorBooleanBinaryOpTest__LessThanAnyInt32();
var result = Vector128.LessThanAny(test._fld1, test._fld2);
ValidateResult(test._fld1, test._fld2, result);
}
public void RunClassFldScenario()
{
TestLibrary.TestFramework.BeginScenario(nameof(RunClassFldScenario));
var result = Vector128.LessThanAny(_fld1, _fld2);
ValidateResult(_fld1, _fld2, result);
}
public void RunStructLclFldScenario()
{
TestLibrary.TestFramework.BeginScenario(nameof(RunStructLclFldScenario));
var test = TestStruct.Create();
var result = Vector128.LessThanAny(test._fld1, test._fld2);
ValidateResult(test._fld1, test._fld2, result);
}
public void RunStructFldScenario()
{
TestLibrary.TestFramework.BeginScenario(nameof(RunStructFldScenario));
var test = TestStruct.Create();
test.RunStructFldScenario(this);
}
private void ValidateResult(Vector128<Int32> op1, Vector128<Int32> op2, bool result, [CallerMemberName] string method = "")
{
Int32[] inArray1 = new Int32[Op1ElementCount];
Int32[] inArray2 = new Int32[Op2ElementCount];
Unsafe.WriteUnaligned(ref Unsafe.As<Int32, byte>(ref inArray1[0]), op1);
Unsafe.WriteUnaligned(ref Unsafe.As<Int32, byte>(ref inArray2[0]), op2);
ValidateResult(inArray1, inArray2, result, method);
}
private void ValidateResult(void* op1, void* op2, bool result, [CallerMemberName] string method = "")
{
Int32[] inArray1 = new Int32[Op1ElementCount];
Int32[] inArray2 = new Int32[Op2ElementCount];
Unsafe.CopyBlockUnaligned(ref Unsafe.As<Int32, byte>(ref inArray1[0]), ref Unsafe.AsRef<byte>(op1), (uint)Unsafe.SizeOf<Vector128<Int32>>());
Unsafe.CopyBlockUnaligned(ref Unsafe.As<Int32, byte>(ref inArray2[0]), ref Unsafe.AsRef<byte>(op2), (uint)Unsafe.SizeOf<Vector128<Int32>>());
ValidateResult(inArray1, inArray2, result, method);
}
private void ValidateResult(Int32[] left, Int32[] right, bool result, [CallerMemberName] string method = "")
{
bool succeeded = true;
var expectedResult = false;
for (var i = 0; i < Op1ElementCount; i++)
{
expectedResult |= (left[i] < right[i]);
}
succeeded = (expectedResult == result);
if (!succeeded)
{
TestLibrary.TestFramework.LogInformation($"{nameof(Vector128)}.{nameof(Vector128.LessThanAny)}<Int32>(Vector128<Int32>, Vector128<Int32>): {method} failed:");
TestLibrary.TestFramework.LogInformation($" left: ({string.Join(", ", left)})");
TestLibrary.TestFramework.LogInformation($" right: ({string.Join(", ", right)})");
TestLibrary.TestFramework.LogInformation($" result: ({result})");
TestLibrary.TestFramework.LogInformation(string.Empty);
Succeeded = false;
}
}
}
}
| // Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
/******************************************************************************
* This file is auto-generated from a template file by the GenerateTests.csx *
* script in tests\src\JIT\HardwareIntrinsics\X86\Shared. In order to make *
* changes, please update the corresponding template and run according to the *
* directions listed in the file. *
******************************************************************************/
using System;
using System.Runtime.CompilerServices;
using System.Runtime.InteropServices;
using System.Runtime.Intrinsics;
namespace JIT.HardwareIntrinsics.General
{
public static partial class Program
{
private static void LessThanAnyInt32()
{
var test = new VectorBooleanBinaryOpTest__LessThanAnyInt32();
// Validates basic functionality works, using Unsafe.Read
test.RunBasicScenario_UnsafeRead();
// Validates calling via reflection works, using Unsafe.Read
test.RunReflectionScenario_UnsafeRead();
// Validates passing a static member works
test.RunClsVarScenario();
// Validates passing a local works, using Unsafe.Read
test.RunLclVarScenario_UnsafeRead();
// Validates passing the field of a local class works
test.RunClassLclFldScenario();
// Validates passing an instance member of a class works
test.RunClassFldScenario();
// Validates passing the field of a local struct works
test.RunStructLclFldScenario();
// Validates passing an instance member of a struct works
test.RunStructFldScenario();
if (!test.Succeeded)
{
throw new Exception("One or more scenarios did not complete as expected.");
}
}
}
public sealed unsafe class VectorBooleanBinaryOpTest__LessThanAnyInt32
{
private struct DataTable
{
private byte[] inArray1;
private byte[] inArray2;
private GCHandle inHandle1;
private GCHandle inHandle2;
private ulong alignment;
public DataTable(Int32[] inArray1, Int32[] inArray2, int alignment)
{
int sizeOfinArray1 = inArray1.Length * Unsafe.SizeOf<Int32>();
int sizeOfinArray2 = inArray2.Length * Unsafe.SizeOf<Int32>();
if ((alignment != 32 && alignment != 16 && alignment != 8) || (alignment * 2) < sizeOfinArray1 || (alignment * 2) < sizeOfinArray2)
{
throw new ArgumentException("Invalid value of alignment");
}
this.inArray1 = new byte[alignment * 2];
this.inArray2 = new byte[alignment * 2];
this.inHandle1 = GCHandle.Alloc(this.inArray1, GCHandleType.Pinned);
this.inHandle2 = GCHandle.Alloc(this.inArray2, GCHandleType.Pinned);
this.alignment = (ulong)alignment;
Unsafe.CopyBlockUnaligned(ref Unsafe.AsRef<byte>(inArray1Ptr), ref Unsafe.As<Int32, byte>(ref inArray1[0]), (uint)sizeOfinArray1);
Unsafe.CopyBlockUnaligned(ref Unsafe.AsRef<byte>(inArray2Ptr), ref Unsafe.As<Int32, byte>(ref inArray2[0]), (uint)sizeOfinArray2);
}
public void* inArray1Ptr => Align((byte*)(inHandle1.AddrOfPinnedObject().ToPointer()), alignment);
public void* inArray2Ptr => Align((byte*)(inHandle2.AddrOfPinnedObject().ToPointer()), alignment);
public void Dispose()
{
inHandle1.Free();
inHandle2.Free();
}
private static unsafe void* Align(byte* buffer, ulong expectedAlignment)
{
return (void*)(((ulong)buffer + expectedAlignment - 1) & ~(expectedAlignment - 1));
}
}
private struct TestStruct
{
public Vector128<Int32> _fld1;
public Vector128<Int32> _fld2;
public static TestStruct Create()
{
var testStruct = new TestStruct();
for (var i = 0; i < Op1ElementCount; i++) { _data1[i] = TestLibrary.Generator.GetInt32(); }
Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector128<Int32>, byte>(ref testStruct._fld1), ref Unsafe.As<Int32, byte>(ref _data1[0]), (uint)Unsafe.SizeOf<Vector128<Int32>>());
for (var i = 0; i < Op2ElementCount; i++) { _data2[i] = TestLibrary.Generator.GetInt32(); }
Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector128<Int32>, byte>(ref testStruct._fld2), ref Unsafe.As<Int32, byte>(ref _data2[0]), (uint)Unsafe.SizeOf<Vector128<Int32>>());
return testStruct;
}
public void RunStructFldScenario(VectorBooleanBinaryOpTest__LessThanAnyInt32 testClass)
{
var result = Vector128.LessThanAny(_fld1, _fld2);
testClass.ValidateResult(_fld1, _fld2, result);
}
}
private static readonly int LargestVectorSize = 16;
private static readonly int Op1ElementCount = Unsafe.SizeOf<Vector128<Int32>>() / sizeof(Int32);
private static readonly int Op2ElementCount = Unsafe.SizeOf<Vector128<Int32>>() / sizeof(Int32);
private static Int32[] _data1 = new Int32[Op1ElementCount];
private static Int32[] _data2 = new Int32[Op2ElementCount];
private static Vector128<Int32> _clsVar1;
private static Vector128<Int32> _clsVar2;
private Vector128<Int32> _fld1;
private Vector128<Int32> _fld2;
private DataTable _dataTable;
static VectorBooleanBinaryOpTest__LessThanAnyInt32()
{
for (var i = 0; i < Op1ElementCount; i++) { _data1[i] = TestLibrary.Generator.GetInt32(); }
Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector128<Int32>, byte>(ref _clsVar1), ref Unsafe.As<Int32, byte>(ref _data1[0]), (uint)Unsafe.SizeOf<Vector128<Int32>>());
for (var i = 0; i < Op2ElementCount; i++) { _data2[i] = TestLibrary.Generator.GetInt32(); }
Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector128<Int32>, byte>(ref _clsVar2), ref Unsafe.As<Int32, byte>(ref _data2[0]), (uint)Unsafe.SizeOf<Vector128<Int32>>());
}
public VectorBooleanBinaryOpTest__LessThanAnyInt32()
{
Succeeded = true;
for (var i = 0; i < Op1ElementCount; i++) { _data1[i] = TestLibrary.Generator.GetInt32(); }
Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector128<Int32>, byte>(ref _fld1), ref Unsafe.As<Int32, byte>(ref _data1[0]), (uint)Unsafe.SizeOf<Vector128<Int32>>());
for (var i = 0; i < Op2ElementCount; i++) { _data2[i] = TestLibrary.Generator.GetInt32(); }
Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector128<Int32>, byte>(ref _fld2), ref Unsafe.As<Int32, byte>(ref _data2[0]), (uint)Unsafe.SizeOf<Vector128<Int32>>());
for (var i = 0; i < Op1ElementCount; i++) { _data1[i] = TestLibrary.Generator.GetInt32(); }
for (var i = 0; i < Op2ElementCount; i++) { _data2[i] = TestLibrary.Generator.GetInt32(); }
_dataTable = new DataTable(_data1, _data2, LargestVectorSize);
}
public bool Succeeded { get; set; }
public void RunBasicScenario_UnsafeRead()
{
TestLibrary.TestFramework.BeginScenario(nameof(RunBasicScenario_UnsafeRead));
var result = Vector128.LessThanAny(
Unsafe.Read<Vector128<Int32>>(_dataTable.inArray1Ptr),
Unsafe.Read<Vector128<Int32>>(_dataTable.inArray2Ptr)
);
ValidateResult(_dataTable.inArray1Ptr, _dataTable.inArray2Ptr, result);
}
public void RunReflectionScenario_UnsafeRead()
{
TestLibrary.TestFramework.BeginScenario(nameof(RunReflectionScenario_UnsafeRead));
var method = typeof(Vector128).GetMethod(nameof(Vector128.LessThanAny), new Type[] {
typeof(Vector128<Int32>),
typeof(Vector128<Int32>)
});
if (method is null)
{
method = typeof(Vector128).GetMethod(nameof(Vector128.LessThanAny), 1, new Type[] {
typeof(Vector128<>).MakeGenericType(Type.MakeGenericMethodParameter(0)),
typeof(Vector128<>).MakeGenericType(Type.MakeGenericMethodParameter(0))
});
}
if (method.IsGenericMethodDefinition)
{
method = method.MakeGenericMethod(typeof(Int32));
}
var result = method.Invoke(null, new object[] {
Unsafe.Read<Vector128<Int32>>(_dataTable.inArray1Ptr),
Unsafe.Read<Vector128<Int32>>(_dataTable.inArray2Ptr)
});
ValidateResult(_dataTable.inArray1Ptr, _dataTable.inArray2Ptr, (bool)(result));
}
public void RunClsVarScenario()
{
TestLibrary.TestFramework.BeginScenario(nameof(RunClsVarScenario));
var result = Vector128.LessThanAny(
_clsVar1,
_clsVar2
);
ValidateResult(_clsVar1, _clsVar2, result);
}
public void RunLclVarScenario_UnsafeRead()
{
TestLibrary.TestFramework.BeginScenario(nameof(RunLclVarScenario_UnsafeRead));
var op1 = Unsafe.Read<Vector128<Int32>>(_dataTable.inArray1Ptr);
var op2 = Unsafe.Read<Vector128<Int32>>(_dataTable.inArray2Ptr);
var result = Vector128.LessThanAny(op1, op2);
ValidateResult(op1, op2, result);
}
public void RunClassLclFldScenario()
{
TestLibrary.TestFramework.BeginScenario(nameof(RunClassLclFldScenario));
var test = new VectorBooleanBinaryOpTest__LessThanAnyInt32();
var result = Vector128.LessThanAny(test._fld1, test._fld2);
ValidateResult(test._fld1, test._fld2, result);
}
public void RunClassFldScenario()
{
TestLibrary.TestFramework.BeginScenario(nameof(RunClassFldScenario));
var result = Vector128.LessThanAny(_fld1, _fld2);
ValidateResult(_fld1, _fld2, result);
}
public void RunStructLclFldScenario()
{
TestLibrary.TestFramework.BeginScenario(nameof(RunStructLclFldScenario));
var test = TestStruct.Create();
var result = Vector128.LessThanAny(test._fld1, test._fld2);
ValidateResult(test._fld1, test._fld2, result);
}
public void RunStructFldScenario()
{
TestLibrary.TestFramework.BeginScenario(nameof(RunStructFldScenario));
var test = TestStruct.Create();
test.RunStructFldScenario(this);
}
private void ValidateResult(Vector128<Int32> op1, Vector128<Int32> op2, bool result, [CallerMemberName] string method = "")
{
Int32[] inArray1 = new Int32[Op1ElementCount];
Int32[] inArray2 = new Int32[Op2ElementCount];
Unsafe.WriteUnaligned(ref Unsafe.As<Int32, byte>(ref inArray1[0]), op1);
Unsafe.WriteUnaligned(ref Unsafe.As<Int32, byte>(ref inArray2[0]), op2);
ValidateResult(inArray1, inArray2, result, method);
}
private void ValidateResult(void* op1, void* op2, bool result, [CallerMemberName] string method = "")
{
Int32[] inArray1 = new Int32[Op1ElementCount];
Int32[] inArray2 = new Int32[Op2ElementCount];
Unsafe.CopyBlockUnaligned(ref Unsafe.As<Int32, byte>(ref inArray1[0]), ref Unsafe.AsRef<byte>(op1), (uint)Unsafe.SizeOf<Vector128<Int32>>());
Unsafe.CopyBlockUnaligned(ref Unsafe.As<Int32, byte>(ref inArray2[0]), ref Unsafe.AsRef<byte>(op2), (uint)Unsafe.SizeOf<Vector128<Int32>>());
ValidateResult(inArray1, inArray2, result, method);
}
private void ValidateResult(Int32[] left, Int32[] right, bool result, [CallerMemberName] string method = "")
{
bool succeeded = true;
var expectedResult = false;
for (var i = 0; i < Op1ElementCount; i++)
{
expectedResult |= (left[i] < right[i]);
}
succeeded = (expectedResult == result);
if (!succeeded)
{
TestLibrary.TestFramework.LogInformation($"{nameof(Vector128)}.{nameof(Vector128.LessThanAny)}<Int32>(Vector128<Int32>, Vector128<Int32>): {method} failed:");
TestLibrary.TestFramework.LogInformation($" left: ({string.Join(", ", left)})");
TestLibrary.TestFramework.LogInformation($" right: ({string.Join(", ", right)})");
TestLibrary.TestFramework.LogInformation($" result: ({result})");
TestLibrary.TestFramework.LogInformation(string.Empty);
Succeeded = false;
}
}
}
}
| -1 |
dotnet/runtime | 66,250 | use the name of the header when throwing exception for invalid chars | Fixes #65136 | pedrobsaila | 2022-03-05T20:18:11Z | 2022-03-08T16:15:18Z | 8fe2d2b0bbb7bec841c97372528531c071b29c16 | fa5dcda23c681031b5d2bd68482702baef80b836 | use the name of the header when throwing exception for invalid chars. Fixes #65136 | ./src/libraries/Common/tests/System/Security/Cryptography/AlgorithmImplementations/ECDiffieHellman/ECDhKeyFileTests.cs | // Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
using System.Security.Cryptography.Tests;
using Xunit;
namespace System.Security.Cryptography.EcDiffieHellman.Tests
{
[SkipOnPlatform(TestPlatforms.Browser, "Not supported on Browser")]
[ActiveIssue("https://github.com/dotnet/runtime/issues/64389", TestPlatforms.Windows)]
public class ECDhKeyFileTests : ECKeyFileTests<ECDiffieHellman>
{
protected override ECDiffieHellman CreateKey() => ECDiffieHellmanFactory.Create();
protected override void Exercise(ECDiffieHellman key) => key.Exercise();
protected override Func<ECDiffieHellman, byte[]> PublicKeyWriteArrayFunc { get; } =
key => key.PublicKey.ExportSubjectPublicKeyInfo();
protected override WriteKeyToSpanFunc PublicKeyWriteSpanFunc { get; } =
(ECDiffieHellman key, Span<byte> destination, out int bytesWritten) =>
key.PublicKey.TryExportSubjectPublicKeyInfo(destination, out bytesWritten);
}
}
| // Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
using System.Security.Cryptography.Tests;
using Xunit;
namespace System.Security.Cryptography.EcDiffieHellman.Tests
{
[SkipOnPlatform(TestPlatforms.Browser, "Not supported on Browser")]
[ActiveIssue("https://github.com/dotnet/runtime/issues/64389", TestPlatforms.Windows)]
public class ECDhKeyFileTests : ECKeyFileTests<ECDiffieHellman>
{
protected override ECDiffieHellman CreateKey() => ECDiffieHellmanFactory.Create();
protected override void Exercise(ECDiffieHellman key) => key.Exercise();
protected override Func<ECDiffieHellman, byte[]> PublicKeyWriteArrayFunc { get; } =
key => key.PublicKey.ExportSubjectPublicKeyInfo();
protected override WriteKeyToSpanFunc PublicKeyWriteSpanFunc { get; } =
(ECDiffieHellman key, Span<byte> destination, out int bytesWritten) =>
key.PublicKey.TryExportSubjectPublicKeyInfo(destination, out bytesWritten);
}
}
| -1 |
dotnet/runtime | 66,250 | use the name of the header when throwing exception for invalid chars | Fixes #65136 | pedrobsaila | 2022-03-05T20:18:11Z | 2022-03-08T16:15:18Z | 8fe2d2b0bbb7bec841c97372528531c071b29c16 | fa5dcda23c681031b5d2bd68482702baef80b836 | use the name of the header when throwing exception for invalid chars. Fixes #65136 | ./src/tests/JIT/HardwareIntrinsics/General/Vector64_1/AllBitsSet.SByte.cs | // Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
/******************************************************************************
* This file is auto-generated from a template file by the GenerateTests.csx *
* script in tests\src\JIT\HardwareIntrinsics\General\Shared. In order to make *
* changes, please update the corresponding template and run according to the *
* directions listed in the file. *
******************************************************************************/
using System;
using System.Runtime.CompilerServices;
using System.Runtime.InteropServices;
using System.Runtime.Intrinsics;
namespace JIT.HardwareIntrinsics.General
{
public static partial class Program
{
private static void AllBitsSetSByte()
{
var test = new VectorAllBitsSet__AllBitsSetSByte();
// Validates basic functionality works
test.RunBasicScenario();
// Validates calling via reflection works
test.RunReflectionScenario();
if (!test.Succeeded)
{
throw new Exception("One or more scenarios did not complete as expected.");
}
}
}
public sealed unsafe class VectorAllBitsSet__AllBitsSetSByte
{
private static readonly int LargestVectorSize = 8;
private static readonly int ElementCount = Unsafe.SizeOf<Vector64<SByte>>() / sizeof(SByte);
public bool Succeeded { get; set; } = true;
public void RunBasicScenario()
{
TestLibrary.TestFramework.BeginScenario(nameof(RunBasicScenario));
Vector64<SByte> result = Vector64<SByte>.AllBitsSet;
ValidateResult(result);
}
public void RunReflectionScenario()
{
TestLibrary.TestFramework.BeginScenario(nameof(RunReflectionScenario));
object result = typeof(Vector64<SByte>)
.GetProperty(nameof(Vector64<SByte>.AllBitsSet), new Type[] { })
.GetGetMethod()
.Invoke(null, new object[] { });
ValidateResult((Vector64<SByte>)(result));
}
private void ValidateResult(Vector64<SByte> result, [CallerMemberName] string method = "")
{
SByte[] resultElements = new SByte[ElementCount];
Unsafe.WriteUnaligned(ref Unsafe.As<SByte, byte>(ref resultElements[0]), result);
ValidateResult(resultElements, method);
}
private unsafe void ValidateResult(SByte[] resultElements, [CallerMemberName] string method = "")
{
bool succeeded = true;
for (var i = 0; i < ElementCount; i++)
{
if (!HasAllBitsSet(resultElements[i]))
{
succeeded = false;
break;
}
}
if (!succeeded)
{
TestLibrary.TestFramework.LogInformation($"Vector64.AllBitsSet(SByte): {method} failed:");
TestLibrary.TestFramework.LogInformation($" result: ({string.Join(", ", resultElements)})");
TestLibrary.TestFramework.LogInformation(string.Empty);
Succeeded = false;
}
}
private unsafe bool HasAllBitsSet(SByte value)
{
for (int i = 0; i < sizeof(SByte); i++)
{
if (((byte*)&value)[i] != 0xFF)
return false;
}
return true;
}
}
}
| // Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
/******************************************************************************
* This file is auto-generated from a template file by the GenerateTests.csx *
* script in tests\src\JIT\HardwareIntrinsics\General\Shared. In order to make *
* changes, please update the corresponding template and run according to the *
* directions listed in the file. *
******************************************************************************/
using System;
using System.Runtime.CompilerServices;
using System.Runtime.InteropServices;
using System.Runtime.Intrinsics;
namespace JIT.HardwareIntrinsics.General
{
public static partial class Program
{
private static void AllBitsSetSByte()
{
var test = new VectorAllBitsSet__AllBitsSetSByte();
// Validates basic functionality works
test.RunBasicScenario();
// Validates calling via reflection works
test.RunReflectionScenario();
if (!test.Succeeded)
{
throw new Exception("One or more scenarios did not complete as expected.");
}
}
}
public sealed unsafe class VectorAllBitsSet__AllBitsSetSByte
{
private static readonly int LargestVectorSize = 8;
private static readonly int ElementCount = Unsafe.SizeOf<Vector64<SByte>>() / sizeof(SByte);
public bool Succeeded { get; set; } = true;
public void RunBasicScenario()
{
TestLibrary.TestFramework.BeginScenario(nameof(RunBasicScenario));
Vector64<SByte> result = Vector64<SByte>.AllBitsSet;
ValidateResult(result);
}
public void RunReflectionScenario()
{
TestLibrary.TestFramework.BeginScenario(nameof(RunReflectionScenario));
object result = typeof(Vector64<SByte>)
.GetProperty(nameof(Vector64<SByte>.AllBitsSet), new Type[] { })
.GetGetMethod()
.Invoke(null, new object[] { });
ValidateResult((Vector64<SByte>)(result));
}
private void ValidateResult(Vector64<SByte> result, [CallerMemberName] string method = "")
{
SByte[] resultElements = new SByte[ElementCount];
Unsafe.WriteUnaligned(ref Unsafe.As<SByte, byte>(ref resultElements[0]), result);
ValidateResult(resultElements, method);
}
private unsafe void ValidateResult(SByte[] resultElements, [CallerMemberName] string method = "")
{
bool succeeded = true;
for (var i = 0; i < ElementCount; i++)
{
if (!HasAllBitsSet(resultElements[i]))
{
succeeded = false;
break;
}
}
if (!succeeded)
{
TestLibrary.TestFramework.LogInformation($"Vector64.AllBitsSet(SByte): {method} failed:");
TestLibrary.TestFramework.LogInformation($" result: ({string.Join(", ", resultElements)})");
TestLibrary.TestFramework.LogInformation(string.Empty);
Succeeded = false;
}
}
private unsafe bool HasAllBitsSet(SByte value)
{
for (int i = 0; i < sizeof(SByte); i++)
{
if (((byte*)&value)[i] != 0xFF)
return false;
}
return true;
}
}
}
| -1 |
dotnet/runtime | 66,250 | use the name of the header when throwing exception for invalid chars | Fixes #65136 | pedrobsaila | 2022-03-05T20:18:11Z | 2022-03-08T16:15:18Z | 8fe2d2b0bbb7bec841c97372528531c071b29c16 | fa5dcda23c681031b5d2bd68482702baef80b836 | use the name of the header when throwing exception for invalid chars. Fixes #65136 | ./src/libraries/Common/src/Interop/Windows/Kernel32/Interop.SetPriorityClass.cs | // Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
using Microsoft.Win32.SafeHandles;
using System.Runtime.InteropServices;
internal static partial class Interop
{
internal static partial class Kernel32
{
[GeneratedDllImport(Libraries.Kernel32, SetLastError = true)]
[return: MarshalAs(UnmanagedType.Bool)]
internal static partial bool SetPriorityClass(SafeProcessHandle handle, int priorityClass);
}
}
| // Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
using Microsoft.Win32.SafeHandles;
using System.Runtime.InteropServices;
internal static partial class Interop
{
internal static partial class Kernel32
{
[GeneratedDllImport(Libraries.Kernel32, SetLastError = true)]
[return: MarshalAs(UnmanagedType.Bool)]
internal static partial bool SetPriorityClass(SafeProcessHandle handle, int priorityClass);
}
}
| -1 |
dotnet/runtime | 66,250 | use the name of the header when throwing exception for invalid chars | Fixes #65136 | pedrobsaila | 2022-03-05T20:18:11Z | 2022-03-08T16:15:18Z | 8fe2d2b0bbb7bec841c97372528531c071b29c16 | fa5dcda23c681031b5d2bd68482702baef80b836 | use the name of the header when throwing exception for invalid chars. Fixes #65136 | ./src/tests/JIT/Methodical/eh/nested/general/throwinfinallynestedintry.cs | // Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
// Throw from a try block nested in a finally which in turn nested in a try block
using System;
public class a
{
private static TestUtil.TestLog testLog;
static a()
{
// Create test writer object to hold expected output
System.IO.StringWriter expectedOut = new System.IO.StringWriter();
// Write expected output to string writer object
expectedOut.WriteLine("In outer try - 0");
expectedOut.WriteLine("In outer try - 1");
expectedOut.WriteLine("In outer finally - 1");
expectedOut.WriteLine("In inner try");
expectedOut.WriteLine("In inner finally");
expectedOut.WriteLine("In outer finally - 0");
expectedOut.WriteLine("Pass");
// Create and initialize test log object
testLog = new TestUtil.TestLog(expectedOut);
}
public static void MiddleMethod()
{
try
{
Console.WriteLine("In outer try - 0");
try
{
Console.WriteLine("In outer try - 1");
}
finally
{
Console.WriteLine("In outer finally - 1");
try
{
Console.WriteLine("In inner try");
throw new System.ArgumentException();
Console.WriteLine("Unreached");
}
finally
{
Console.WriteLine("In inner finally");
}
}
}
finally
{
Console.WriteLine("In outer finally - 0");
}
Console.WriteLine("Unreached...");
}
public static int Main()
{
//Start recording
testLog.StartRecording();
try
{
MiddleMethod();
}
catch
{
Console.WriteLine("Pass");
}
// stop recoding
testLog.StopRecording();
return testLog.VerifyOutput();
}
}
| // Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
// Throw from a try block nested in a finally which in turn nested in a try block
using System;
public class a
{
private static TestUtil.TestLog testLog;
static a()
{
// Create test writer object to hold expected output
System.IO.StringWriter expectedOut = new System.IO.StringWriter();
// Write expected output to string writer object
expectedOut.WriteLine("In outer try - 0");
expectedOut.WriteLine("In outer try - 1");
expectedOut.WriteLine("In outer finally - 1");
expectedOut.WriteLine("In inner try");
expectedOut.WriteLine("In inner finally");
expectedOut.WriteLine("In outer finally - 0");
expectedOut.WriteLine("Pass");
// Create and initialize test log object
testLog = new TestUtil.TestLog(expectedOut);
}
public static void MiddleMethod()
{
try
{
Console.WriteLine("In outer try - 0");
try
{
Console.WriteLine("In outer try - 1");
}
finally
{
Console.WriteLine("In outer finally - 1");
try
{
Console.WriteLine("In inner try");
throw new System.ArgumentException();
Console.WriteLine("Unreached");
}
finally
{
Console.WriteLine("In inner finally");
}
}
}
finally
{
Console.WriteLine("In outer finally - 0");
}
Console.WriteLine("Unreached...");
}
public static int Main()
{
//Start recording
testLog.StartRecording();
try
{
MiddleMethod();
}
catch
{
Console.WriteLine("Pass");
}
// stop recoding
testLog.StopRecording();
return testLog.VerifyOutput();
}
}
| -1 |
dotnet/runtime | 66,250 | use the name of the header when throwing exception for invalid chars | Fixes #65136 | pedrobsaila | 2022-03-05T20:18:11Z | 2022-03-08T16:15:18Z | 8fe2d2b0bbb7bec841c97372528531c071b29c16 | fa5dcda23c681031b5d2bd68482702baef80b836 | use the name of the header when throwing exception for invalid chars. Fixes #65136 | ./src/libraries/System.Globalization.Calendars/tests/ThaiBuddhistCalendar/ThaiBuddhistCalendarGetDayOfWeek.cs | // Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
using System.Collections.Generic;
using Xunit;
namespace System.Globalization.Tests
{
public class ThaiBuddhistCalendarGetDayOfWeek
{
private static readonly RandomDataGenerator s_randomDataGenerator = new RandomDataGenerator();
public static IEnumerable<object[]> GetDayOfWeek_TestData()
{
yield return new object[] { DateTime.MinValue };
yield return new object[] { DateTime.MaxValue };
yield return new object[] { s_randomDataGenerator.GetDateTime(-55) };
}
[Theory]
[MemberData(nameof(GetDayOfWeek_TestData))]
public void GetDayOfWeek(DateTime time)
{
Assert.Equal(new GregorianCalendar().GetDayOfWeek(time), new ThaiBuddhistCalendar().GetDayOfWeek(time));
}
}
}
| // Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
using System.Collections.Generic;
using Xunit;
namespace System.Globalization.Tests
{
public class ThaiBuddhistCalendarGetDayOfWeek
{
private static readonly RandomDataGenerator s_randomDataGenerator = new RandomDataGenerator();
public static IEnumerable<object[]> GetDayOfWeek_TestData()
{
yield return new object[] { DateTime.MinValue };
yield return new object[] { DateTime.MaxValue };
yield return new object[] { s_randomDataGenerator.GetDateTime(-55) };
}
[Theory]
[MemberData(nameof(GetDayOfWeek_TestData))]
public void GetDayOfWeek(DateTime time)
{
Assert.Equal(new GregorianCalendar().GetDayOfWeek(time), new ThaiBuddhistCalendar().GetDayOfWeek(time));
}
}
}
| -1 |
dotnet/runtime | 66,250 | use the name of the header when throwing exception for invalid chars | Fixes #65136 | pedrobsaila | 2022-03-05T20:18:11Z | 2022-03-08T16:15:18Z | 8fe2d2b0bbb7bec841c97372528531c071b29c16 | fa5dcda23c681031b5d2bd68482702baef80b836 | use the name of the header when throwing exception for invalid chars. Fixes #65136 | ./src/libraries/System.Runtime.Serialization.Xml/tests/SerializationTestTypes/DCRImplVariations.cs | // Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
using System;
using System.Runtime.Serialization;
using System.Xml;
namespace SerializationTestTypes
{
[DataContract]
public class Person1
{
public object address;
public Person1(string variation)
{
age = 10;
name = "Tintin";
address = new Address("rd", "wa", 90012);
}
public Person1()
{
}
[DataMember]
public int age;
[DataMember]
public string name;
}
[DataContract]
public class Person2 : Person1
{
[DataMember]
public Guid Uid;
[DataMember]
public XmlQualifiedName[] XQAArray;
[DataMember]
public object anyData;
public Person2()
{
Uid = new Guid("ff816178-54df-2ea8-6511-cfeb4d14ab5a");
XQAArray = new XmlQualifiedName[] { new XmlQualifiedName("Name1", "http://www.PlayForFun.com"), new XmlQualifiedName("Name2", "http://www.FunPlay.com") };
anyData = new Kid();
}
}
public class Kid : Person1
{
[DataMember]
public object FavoriteToy;
public Kid()
{
FavoriteToy = new Blocks("Orange");
age = 3;
}
}
[DataContract]
public class Blocks
{
public Blocks(string s)
{
color = s;
}
[DataMember]
public string color;
}
[DataContract]
public class Address
{
public Address()
{
}
public Address(string c, string s, int z)
{
City = c;
State = s;
ZipCode = z;
}
[DataMember]
public string City;
[DataMember]
public string State;
[DataMember]
public int ZipCode;
}
}
| // Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
using System;
using System.Runtime.Serialization;
using System.Xml;
namespace SerializationTestTypes
{
[DataContract]
public class Person1
{
public object address;
public Person1(string variation)
{
age = 10;
name = "Tintin";
address = new Address("rd", "wa", 90012);
}
public Person1()
{
}
[DataMember]
public int age;
[DataMember]
public string name;
}
[DataContract]
public class Person2 : Person1
{
[DataMember]
public Guid Uid;
[DataMember]
public XmlQualifiedName[] XQAArray;
[DataMember]
public object anyData;
public Person2()
{
Uid = new Guid("ff816178-54df-2ea8-6511-cfeb4d14ab5a");
XQAArray = new XmlQualifiedName[] { new XmlQualifiedName("Name1", "http://www.PlayForFun.com"), new XmlQualifiedName("Name2", "http://www.FunPlay.com") };
anyData = new Kid();
}
}
public class Kid : Person1
{
[DataMember]
public object FavoriteToy;
public Kid()
{
FavoriteToy = new Blocks("Orange");
age = 3;
}
}
[DataContract]
public class Blocks
{
public Blocks(string s)
{
color = s;
}
[DataMember]
public string color;
}
[DataContract]
public class Address
{
public Address()
{
}
public Address(string c, string s, int z)
{
City = c;
State = s;
ZipCode = z;
}
[DataMember]
public string City;
[DataMember]
public string State;
[DataMember]
public int ZipCode;
}
}
| -1 |
dotnet/runtime | 66,250 | use the name of the header when throwing exception for invalid chars | Fixes #65136 | pedrobsaila | 2022-03-05T20:18:11Z | 2022-03-08T16:15:18Z | 8fe2d2b0bbb7bec841c97372528531c071b29c16 | fa5dcda23c681031b5d2bd68482702baef80b836 | use the name of the header when throwing exception for invalid chars. Fixes #65136 | ./src/libraries/System.Reflection.MetadataLoadContext/src/System/Reflection/TypeLoading/General/TypeExtensions.netstandard.cs | // Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
// This file makes NetStandard Reflection's "subclassing" surface area look as much like NetCore as possible so the rest of the code can be written without #if's.
namespace System.Reflection.TypeLoading
{
// For code that have to interact with "Type" rather than "RoType", some handy extension methods that "add" the NetCore reflection apis to NetStandard.
internal static class NetCoreApiEmulators
{
// On NetStandard, have to do with slower emulations.
public static bool IsSignatureType(this Type type) => false;
public static bool IsSZArray(this Type type) => type.IsArray && type.GetArrayRank() == 1 && type.Name.EndsWith("[]", StringComparison.Ordinal);
public static bool IsVariableBoundArray(this Type type) => type.IsArray && !type.IsSZArray();
public static bool IsGenericMethodParameter(this Type type) => type.IsGenericParameter && type.DeclaringMethod != null;
// Signature Types do not exist on NetStandard 2.0 but it's possible we could reach this if a NetCore app uses the NetStandard build of this library.
public static Type MakeSignatureGenericType(this Type genericTypeDefinition, Type[] typeArguments) => throw new NotSupportedException(SR.NotSupported_MakeGenericType_SignatureTypes);
}
/// <summary>
/// Another layer of base types. For NetCore, these base types are all but empty. For NetStandard, these base types add the NetCore apis to NetStandard
/// so code interacting with "RoTypes" and friends can happily code to the full NetCore surface area.
///
/// On NetStandard (and pre-2.2 NetCore), the TypeInfo constructor is not exposed so we cannot derive directly from TypeInfo.
/// But we *can* derive from TypeDelegator which derives from TypeInfo. Since we're overriding (almost) every method,
/// none of TypeDelegator's own methods get called (and the instance field it has for holding the "underlying Type" goes
/// to waste.)
///
/// For future platforms, RoTypeBase's base type should be changed back to TypeInfo. Deriving from TypeDelegator is a hack and
/// causes us to waste an extra pointer-sized field per Type instance. It is also fragile as TypeDelegator could break us in the future
/// by overriding more methods.
/// </summary>
internal abstract class LeveledTypeInfo : TypeDelegator
{
protected LeveledTypeInfo() : base() { }
// This is an api that TypeDelegator overrides that it needn't have. Since RoType expects to fall through to System.Type's method, we have to reimplement
// System.Type's behavior here to avoid getting TypeDelegator's method.
//
// This is an annoying and fragile requirement as we have to do this for any api that (1) RoType declines to override and (2) TypeDelegator does override.
// This could be policed by an analyzer that searches RoType's method bodies for non-virtual calls to apis declared on TypeDelegator.
public override EventInfo[] GetEvents() => GetEvents(BindingFlags.Instance | BindingFlags.Static | BindingFlags.Public);
public abstract bool IsGenericTypeParameter { get; }
public abstract bool IsGenericMethodParameter { get; }
public abstract bool IsSZArray { get; }
public abstract bool IsVariableBoundArray { get; }
public abstract bool IsTypeDefinition { get; }
public abstract bool IsByRefLike { get; }
public virtual bool IsSignatureType => false;
protected abstract MethodInfo GetMethodImpl(string name, int genericParameterCount, BindingFlags bindingAttr, Binder binder, CallingConventions callConvention, Type[] types, ParameterModifier[] modifiers);
public abstract bool HasSameMetadataDefinitionAs(MemberInfo other);
}
internal abstract class LeveledAssembly : Assembly
{
public abstract Type[] GetForwardedTypes();
}
internal abstract class LeveledConstructorInfo : ConstructorInfo
{
public abstract bool IsConstructedGenericMethod { get; }
public abstract bool HasSameMetadataDefinitionAs(MemberInfo other);
}
internal abstract class LeveledMethodInfo : MethodInfo
{
public abstract bool IsConstructedGenericMethod { get; }
public abstract bool HasSameMetadataDefinitionAs(MemberInfo other);
}
internal abstract class LeveledEventInfo : EventInfo
{
public abstract bool HasSameMetadataDefinitionAs(MemberInfo other);
}
internal abstract class LeveledFieldInfo : FieldInfo
{
public abstract bool HasSameMetadataDefinitionAs(MemberInfo other);
}
internal abstract class LeveledPropertyInfo : PropertyInfo
{
public abstract bool HasSameMetadataDefinitionAs(MemberInfo other);
}
internal abstract class LeveledCustomAttributeData : CustomAttributeData
{
// On NetStandard, AttributeType is declared non-virtually so apps are stuck calling the slow version that builds a constructor.
public new abstract Type AttributeType { get; }
}
}
| // Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
// This file makes NetStandard Reflection's "subclassing" surface area look as much like NetCore as possible so the rest of the code can be written without #if's.
namespace System.Reflection.TypeLoading
{
// For code that have to interact with "Type" rather than "RoType", some handy extension methods that "add" the NetCore reflection apis to NetStandard.
internal static class NetCoreApiEmulators
{
// On NetStandard, have to do with slower emulations.
public static bool IsSignatureType(this Type type) => false;
public static bool IsSZArray(this Type type) => type.IsArray && type.GetArrayRank() == 1 && type.Name.EndsWith("[]", StringComparison.Ordinal);
public static bool IsVariableBoundArray(this Type type) => type.IsArray && !type.IsSZArray();
public static bool IsGenericMethodParameter(this Type type) => type.IsGenericParameter && type.DeclaringMethod != null;
// Signature Types do not exist on NetStandard 2.0 but it's possible we could reach this if a NetCore app uses the NetStandard build of this library.
public static Type MakeSignatureGenericType(this Type genericTypeDefinition, Type[] typeArguments) => throw new NotSupportedException(SR.NotSupported_MakeGenericType_SignatureTypes);
}
/// <summary>
/// Another layer of base types. For NetCore, these base types are all but empty. For NetStandard, these base types add the NetCore apis to NetStandard
/// so code interacting with "RoTypes" and friends can happily code to the full NetCore surface area.
///
/// On NetStandard (and pre-2.2 NetCore), the TypeInfo constructor is not exposed so we cannot derive directly from TypeInfo.
/// But we *can* derive from TypeDelegator which derives from TypeInfo. Since we're overriding (almost) every method,
/// none of TypeDelegator's own methods get called (and the instance field it has for holding the "underlying Type" goes
/// to waste.)
///
/// For future platforms, RoTypeBase's base type should be changed back to TypeInfo. Deriving from TypeDelegator is a hack and
/// causes us to waste an extra pointer-sized field per Type instance. It is also fragile as TypeDelegator could break us in the future
/// by overriding more methods.
/// </summary>
internal abstract class LeveledTypeInfo : TypeDelegator
{
protected LeveledTypeInfo() : base() { }
// This is an api that TypeDelegator overrides that it needn't have. Since RoType expects to fall through to System.Type's method, we have to reimplement
// System.Type's behavior here to avoid getting TypeDelegator's method.
//
// This is an annoying and fragile requirement as we have to do this for any api that (1) RoType declines to override and (2) TypeDelegator does override.
// This could be policed by an analyzer that searches RoType's method bodies for non-virtual calls to apis declared on TypeDelegator.
public override EventInfo[] GetEvents() => GetEvents(BindingFlags.Instance | BindingFlags.Static | BindingFlags.Public);
public abstract bool IsGenericTypeParameter { get; }
public abstract bool IsGenericMethodParameter { get; }
public abstract bool IsSZArray { get; }
public abstract bool IsVariableBoundArray { get; }
public abstract bool IsTypeDefinition { get; }
public abstract bool IsByRefLike { get; }
public virtual bool IsSignatureType => false;
protected abstract MethodInfo GetMethodImpl(string name, int genericParameterCount, BindingFlags bindingAttr, Binder binder, CallingConventions callConvention, Type[] types, ParameterModifier[] modifiers);
public abstract bool HasSameMetadataDefinitionAs(MemberInfo other);
}
internal abstract class LeveledAssembly : Assembly
{
public abstract Type[] GetForwardedTypes();
}
internal abstract class LeveledConstructorInfo : ConstructorInfo
{
public abstract bool IsConstructedGenericMethod { get; }
public abstract bool HasSameMetadataDefinitionAs(MemberInfo other);
}
internal abstract class LeveledMethodInfo : MethodInfo
{
public abstract bool IsConstructedGenericMethod { get; }
public abstract bool HasSameMetadataDefinitionAs(MemberInfo other);
}
internal abstract class LeveledEventInfo : EventInfo
{
public abstract bool HasSameMetadataDefinitionAs(MemberInfo other);
}
internal abstract class LeveledFieldInfo : FieldInfo
{
public abstract bool HasSameMetadataDefinitionAs(MemberInfo other);
}
internal abstract class LeveledPropertyInfo : PropertyInfo
{
public abstract bool HasSameMetadataDefinitionAs(MemberInfo other);
}
internal abstract class LeveledCustomAttributeData : CustomAttributeData
{
// On NetStandard, AttributeType is declared non-virtually so apps are stuck calling the slow version that builds a constructor.
public new abstract Type AttributeType { get; }
}
}
| -1 |
dotnet/runtime | 66,250 | use the name of the header when throwing exception for invalid chars | Fixes #65136 | pedrobsaila | 2022-03-05T20:18:11Z | 2022-03-08T16:15:18Z | 8fe2d2b0bbb7bec841c97372528531c071b29c16 | fa5dcda23c681031b5d2bd68482702baef80b836 | use the name of the header when throwing exception for invalid chars. Fixes #65136 | ./src/libraries/System.Linq.Expressions/src/System/Dynamic/Utils/ExpressionVisitorUtils.cs | // Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
using System.Linq.Expressions;
namespace System.Dynamic.Utils
{
internal static class ExpressionVisitorUtils
{
public static Expression[]? VisitBlockExpressions(ExpressionVisitor visitor, BlockExpression block)
{
Expression[]? newNodes = null;
for (int i = 0, n = block.ExpressionCount; i < n; i++)
{
Expression curNode = block.GetExpression(i);
Expression node = visitor.Visit(curNode);
if (newNodes != null)
{
newNodes[i] = node;
}
else if (!object.ReferenceEquals(node, curNode))
{
newNodes = new Expression[n];
for (int j = 0; j < i; j++)
{
newNodes[j] = block.GetExpression(j);
}
newNodes[i] = node;
}
}
return newNodes;
}
public static ParameterExpression[]? VisitParameters(ExpressionVisitor visitor, IParameterProvider nodes, string? callerName)
{
ParameterExpression[]? newNodes = null;
for (int i = 0, n = nodes.ParameterCount; i < n; i++)
{
ParameterExpression curNode = nodes.GetParameter(i);
ParameterExpression node = visitor.VisitAndConvert(curNode, callerName);
if (newNodes != null)
{
newNodes[i] = node;
}
else if (!object.ReferenceEquals(node, curNode))
{
newNodes = new ParameterExpression[n];
for (int j = 0; j < i; j++)
{
newNodes[j] = nodes.GetParameter(j);
}
newNodes[i] = node;
}
}
return newNodes;
}
public static Expression[]? VisitArguments(ExpressionVisitor visitor, IArgumentProvider nodes)
{
Expression[]? newNodes = null;
for (int i = 0, n = nodes.ArgumentCount; i < n; i++)
{
Expression curNode = nodes.GetArgument(i);
Expression node = visitor.Visit(curNode);
if (newNodes != null)
{
newNodes[i] = node;
}
else if (!object.ReferenceEquals(node, curNode))
{
newNodes = new Expression[n];
for (int j = 0; j < i; j++)
{
newNodes[j] = nodes.GetArgument(j);
}
newNodes[i] = node;
}
}
return newNodes;
}
}
}
| // Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
using System.Linq.Expressions;
namespace System.Dynamic.Utils
{
internal static class ExpressionVisitorUtils
{
public static Expression[]? VisitBlockExpressions(ExpressionVisitor visitor, BlockExpression block)
{
Expression[]? newNodes = null;
for (int i = 0, n = block.ExpressionCount; i < n; i++)
{
Expression curNode = block.GetExpression(i);
Expression node = visitor.Visit(curNode);
if (newNodes != null)
{
newNodes[i] = node;
}
else if (!object.ReferenceEquals(node, curNode))
{
newNodes = new Expression[n];
for (int j = 0; j < i; j++)
{
newNodes[j] = block.GetExpression(j);
}
newNodes[i] = node;
}
}
return newNodes;
}
public static ParameterExpression[]? VisitParameters(ExpressionVisitor visitor, IParameterProvider nodes, string? callerName)
{
ParameterExpression[]? newNodes = null;
for (int i = 0, n = nodes.ParameterCount; i < n; i++)
{
ParameterExpression curNode = nodes.GetParameter(i);
ParameterExpression node = visitor.VisitAndConvert(curNode, callerName);
if (newNodes != null)
{
newNodes[i] = node;
}
else if (!object.ReferenceEquals(node, curNode))
{
newNodes = new ParameterExpression[n];
for (int j = 0; j < i; j++)
{
newNodes[j] = nodes.GetParameter(j);
}
newNodes[i] = node;
}
}
return newNodes;
}
public static Expression[]? VisitArguments(ExpressionVisitor visitor, IArgumentProvider nodes)
{
Expression[]? newNodes = null;
for (int i = 0, n = nodes.ArgumentCount; i < n; i++)
{
Expression curNode = nodes.GetArgument(i);
Expression node = visitor.Visit(curNode);
if (newNodes != null)
{
newNodes[i] = node;
}
else if (!object.ReferenceEquals(node, curNode))
{
newNodes = new Expression[n];
for (int j = 0; j < i; j++)
{
newNodes[j] = nodes.GetArgument(j);
}
newNodes[i] = node;
}
}
return newNodes;
}
}
}
| -1 |
dotnet/runtime | 66,250 | use the name of the header when throwing exception for invalid chars | Fixes #65136 | pedrobsaila | 2022-03-05T20:18:11Z | 2022-03-08T16:15:18Z | 8fe2d2b0bbb7bec841c97372528531c071b29c16 | fa5dcda23c681031b5d2bd68482702baef80b836 | use the name of the header when throwing exception for invalid chars. Fixes #65136 | ./src/tests/JIT/CodeGenBringUpTests/rem1.cs | // Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
//
using System;
using System.Runtime.CompilerServices;
class child
{
static int Main()
{
const int Pass = 100;
const int Fail = -1;
int result = rem1(12, 5);
if (result == 2)
return Pass;
else
return Fail;
}
[MethodImplAttribute(MethodImplOptions.NoInlining)]
public static int rem1(int a, int b)
{
return a % b;
}
}
| // Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
//
using System;
using System.Runtime.CompilerServices;
class child
{
static int Main()
{
const int Pass = 100;
const int Fail = -1;
int result = rem1(12, 5);
if (result == 2)
return Pass;
else
return Fail;
}
[MethodImplAttribute(MethodImplOptions.NoInlining)]
public static int rem1(int a, int b)
{
return a % b;
}
}
| -1 |
dotnet/runtime | 66,250 | use the name of the header when throwing exception for invalid chars | Fixes #65136 | pedrobsaila | 2022-03-05T20:18:11Z | 2022-03-08T16:15:18Z | 8fe2d2b0bbb7bec841c97372528531c071b29c16 | fa5dcda23c681031b5d2bd68482702baef80b836 | use the name of the header when throwing exception for invalid chars. Fixes #65136 | ./src/tests/JIT/HardwareIntrinsics/Arm/AdvSimd/AddHighNarrowingUpper.Vector128.Int16.cs | // Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
/******************************************************************************
* This file is auto-generated from a template file by the GenerateTests.csx *
* script in tests\src\JIT\HardwareIntrinsics.Arm\Shared. In order to make *
* changes, please update the corresponding template and run according to the *
* directions listed in the file. *
******************************************************************************/
using System;
using System.Runtime.CompilerServices;
using System.Runtime.InteropServices;
using System.Runtime.Intrinsics;
using System.Runtime.Intrinsics.Arm;
namespace JIT.HardwareIntrinsics.Arm
{
public static partial class Program
{
private static void AddHighNarrowingUpper_Vector128_Int16()
{
var test = new SimpleTernaryOpTest__AddHighNarrowingUpper_Vector128_Int16();
if (test.IsSupported)
{
// Validates basic functionality works, using Unsafe.Read
test.RunBasicScenario_UnsafeRead();
if (AdvSimd.IsSupported)
{
// Validates basic functionality works, using Load
test.RunBasicScenario_Load();
}
// Validates calling via reflection works, using Unsafe.Read
test.RunReflectionScenario_UnsafeRead();
if (AdvSimd.IsSupported)
{
// Validates calling via reflection works, using Load
test.RunReflectionScenario_Load();
}
// Validates passing a static member works
test.RunClsVarScenario();
if (AdvSimd.IsSupported)
{
// Validates passing a static member works, using pinning and Load
test.RunClsVarScenario_Load();
}
// Validates passing a local works, using Unsafe.Read
test.RunLclVarScenario_UnsafeRead();
if (AdvSimd.IsSupported)
{
// Validates passing a local works, using Load
test.RunLclVarScenario_Load();
}
// Validates passing the field of a local class works
test.RunClassLclFldScenario();
if (AdvSimd.IsSupported)
{
// Validates passing the field of a local class works, using pinning and Load
test.RunClassLclFldScenario_Load();
}
// Validates passing an instance member of a class works
test.RunClassFldScenario();
if (AdvSimd.IsSupported)
{
// Validates passing an instance member of a class works, using pinning and Load
test.RunClassFldScenario_Load();
}
// Validates passing the field of a local struct works
test.RunStructLclFldScenario();
if (AdvSimd.IsSupported)
{
// Validates passing the field of a local struct works, using pinning and Load
test.RunStructLclFldScenario_Load();
}
// Validates passing an instance member of a struct works
test.RunStructFldScenario();
if (AdvSimd.IsSupported)
{
// Validates passing an instance member of a struct works, using pinning and Load
test.RunStructFldScenario_Load();
}
}
else
{
// Validates we throw on unsupported hardware
test.RunUnsupportedScenario();
}
if (!test.Succeeded)
{
throw new Exception("One or more scenarios did not complete as expected.");
}
}
}
public sealed unsafe class SimpleTernaryOpTest__AddHighNarrowingUpper_Vector128_Int16
{
private struct DataTable
{
private byte[] inArray1;
private byte[] inArray2;
private byte[] inArray3;
private byte[] outArray;
private GCHandle inHandle1;
private GCHandle inHandle2;
private GCHandle inHandle3;
private GCHandle outHandle;
private ulong alignment;
public DataTable(Int16[] inArray1, Int32[] inArray2, Int32[] inArray3, Int16[] outArray, int alignment)
{
int sizeOfinArray1 = inArray1.Length * Unsafe.SizeOf<Int16>();
int sizeOfinArray2 = inArray2.Length * Unsafe.SizeOf<Int32>();
int sizeOfinArray3 = inArray3.Length * Unsafe.SizeOf<Int32>();
int sizeOfoutArray = outArray.Length * Unsafe.SizeOf<Int16>();
if ((alignment != 16 && alignment != 8) || (alignment * 2) < sizeOfinArray1 || (alignment * 2) < sizeOfinArray2 || (alignment * 2) < sizeOfinArray3 || (alignment * 2) < sizeOfoutArray)
{
throw new ArgumentException("Invalid value of alignment");
}
this.inArray1 = new byte[alignment * 2];
this.inArray2 = new byte[alignment * 2];
this.inArray3 = new byte[alignment * 2];
this.outArray = new byte[alignment * 2];
this.inHandle1 = GCHandle.Alloc(this.inArray1, GCHandleType.Pinned);
this.inHandle2 = GCHandle.Alloc(this.inArray2, GCHandleType.Pinned);
this.inHandle3 = GCHandle.Alloc(this.inArray3, GCHandleType.Pinned);
this.outHandle = GCHandle.Alloc(this.outArray, GCHandleType.Pinned);
this.alignment = (ulong)alignment;
Unsafe.CopyBlockUnaligned(ref Unsafe.AsRef<byte>(inArray1Ptr), ref Unsafe.As<Int16, byte>(ref inArray1[0]), (uint)sizeOfinArray1);
Unsafe.CopyBlockUnaligned(ref Unsafe.AsRef<byte>(inArray2Ptr), ref Unsafe.As<Int32, byte>(ref inArray2[0]), (uint)sizeOfinArray2);
Unsafe.CopyBlockUnaligned(ref Unsafe.AsRef<byte>(inArray3Ptr), ref Unsafe.As<Int32, byte>(ref inArray3[0]), (uint)sizeOfinArray3);
}
public void* inArray1Ptr => Align((byte*)(inHandle1.AddrOfPinnedObject().ToPointer()), alignment);
public void* inArray2Ptr => Align((byte*)(inHandle2.AddrOfPinnedObject().ToPointer()), alignment);
public void* inArray3Ptr => Align((byte*)(inHandle3.AddrOfPinnedObject().ToPointer()), alignment);
public void* outArrayPtr => Align((byte*)(outHandle.AddrOfPinnedObject().ToPointer()), alignment);
public void Dispose()
{
inHandle1.Free();
inHandle2.Free();
inHandle3.Free();
outHandle.Free();
}
private static unsafe void* Align(byte* buffer, ulong expectedAlignment)
{
return (void*)(((ulong)buffer + expectedAlignment - 1) & ~(expectedAlignment - 1));
}
}
private struct TestStruct
{
public Vector64<Int16> _fld1;
public Vector128<Int32> _fld2;
public Vector128<Int32> _fld3;
public static TestStruct Create()
{
var testStruct = new TestStruct();
for (var i = 0; i < Op1ElementCount; i++) { _data1[i] = TestLibrary.Generator.GetInt16(); }
Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector64<Int16>, byte>(ref testStruct._fld1), ref Unsafe.As<Int16, byte>(ref _data1[0]), (uint)Unsafe.SizeOf<Vector64<Int16>>());
for (var i = 0; i < Op2ElementCount; i++) { _data2[i] = TestLibrary.Generator.GetInt32(); }
Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector128<Int32>, byte>(ref testStruct._fld2), ref Unsafe.As<Int32, byte>(ref _data2[0]), (uint)Unsafe.SizeOf<Vector128<Int32>>());
for (var i = 0; i < Op3ElementCount; i++) { _data3[i] = TestLibrary.Generator.GetInt32(); }
Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector128<Int32>, byte>(ref testStruct._fld3), ref Unsafe.As<Int32, byte>(ref _data3[0]), (uint)Unsafe.SizeOf<Vector128<Int32>>());
return testStruct;
}
public void RunStructFldScenario(SimpleTernaryOpTest__AddHighNarrowingUpper_Vector128_Int16 testClass)
{
var result = AdvSimd.AddHighNarrowingUpper(_fld1, _fld2, _fld3);
Unsafe.Write(testClass._dataTable.outArrayPtr, result);
testClass.ValidateResult(_fld1, _fld2, _fld3, testClass._dataTable.outArrayPtr);
}
public void RunStructFldScenario_Load(SimpleTernaryOpTest__AddHighNarrowingUpper_Vector128_Int16 testClass)
{
fixed (Vector64<Int16>* pFld1 = &_fld1)
fixed (Vector128<Int32>* pFld2 = &_fld2)
fixed (Vector128<Int32>* pFld3 = &_fld3)
{
var result = AdvSimd.AddHighNarrowingUpper(
AdvSimd.LoadVector64((Int16*)(pFld1)),
AdvSimd.LoadVector128((Int32*)(pFld2)),
AdvSimd.LoadVector128((Int32*)(pFld3))
);
Unsafe.Write(testClass._dataTable.outArrayPtr, result);
testClass.ValidateResult(_fld1, _fld2, _fld3, testClass._dataTable.outArrayPtr);
}
}
}
private static readonly int LargestVectorSize = 16;
private static readonly int Op1ElementCount = Unsafe.SizeOf<Vector64<Int16>>() / sizeof(Int16);
private static readonly int Op2ElementCount = Unsafe.SizeOf<Vector128<Int32>>() / sizeof(Int32);
private static readonly int Op3ElementCount = Unsafe.SizeOf<Vector128<Int32>>() / sizeof(Int32);
private static readonly int RetElementCount = Unsafe.SizeOf<Vector128<Int16>>() / sizeof(Int16);
private static Int16[] _data1 = new Int16[Op1ElementCount];
private static Int32[] _data2 = new Int32[Op2ElementCount];
private static Int32[] _data3 = new Int32[Op3ElementCount];
private static Vector64<Int16> _clsVar1;
private static Vector128<Int32> _clsVar2;
private static Vector128<Int32> _clsVar3;
private Vector64<Int16> _fld1;
private Vector128<Int32> _fld2;
private Vector128<Int32> _fld3;
private DataTable _dataTable;
static SimpleTernaryOpTest__AddHighNarrowingUpper_Vector128_Int16()
{
for (var i = 0; i < Op1ElementCount; i++) { _data1[i] = TestLibrary.Generator.GetInt16(); }
Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector64<Int16>, byte>(ref _clsVar1), ref Unsafe.As<Int16, byte>(ref _data1[0]), (uint)Unsafe.SizeOf<Vector64<Int16>>());
for (var i = 0; i < Op2ElementCount; i++) { _data2[i] = TestLibrary.Generator.GetInt32(); }
Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector128<Int32>, byte>(ref _clsVar2), ref Unsafe.As<Int32, byte>(ref _data2[0]), (uint)Unsafe.SizeOf<Vector128<Int32>>());
for (var i = 0; i < Op3ElementCount; i++) { _data3[i] = TestLibrary.Generator.GetInt32(); }
Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector128<Int32>, byte>(ref _clsVar3), ref Unsafe.As<Int32, byte>(ref _data3[0]), (uint)Unsafe.SizeOf<Vector128<Int32>>());
}
public SimpleTernaryOpTest__AddHighNarrowingUpper_Vector128_Int16()
{
Succeeded = true;
for (var i = 0; i < Op1ElementCount; i++) { _data1[i] = TestLibrary.Generator.GetInt16(); }
Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector64<Int16>, byte>(ref _fld1), ref Unsafe.As<Int16, byte>(ref _data1[0]), (uint)Unsafe.SizeOf<Vector64<Int16>>());
for (var i = 0; i < Op2ElementCount; i++) { _data2[i] = TestLibrary.Generator.GetInt32(); }
Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector128<Int32>, byte>(ref _fld2), ref Unsafe.As<Int32, byte>(ref _data2[0]), (uint)Unsafe.SizeOf<Vector128<Int32>>());
for (var i = 0; i < Op3ElementCount; i++) { _data3[i] = TestLibrary.Generator.GetInt32(); }
Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector128<Int32>, byte>(ref _fld3), ref Unsafe.As<Int32, byte>(ref _data3[0]), (uint)Unsafe.SizeOf<Vector128<Int32>>());
for (var i = 0; i < Op1ElementCount; i++) { _data1[i] = TestLibrary.Generator.GetInt16(); }
for (var i = 0; i < Op2ElementCount; i++) { _data2[i] = TestLibrary.Generator.GetInt32(); }
for (var i = 0; i < Op3ElementCount; i++) { _data3[i] = TestLibrary.Generator.GetInt32(); }
_dataTable = new DataTable(_data1, _data2, _data3, new Int16[RetElementCount], LargestVectorSize);
}
public bool IsSupported => AdvSimd.IsSupported;
public bool Succeeded { get; set; }
public void RunBasicScenario_UnsafeRead()
{
TestLibrary.TestFramework.BeginScenario(nameof(RunBasicScenario_UnsafeRead));
var result = AdvSimd.AddHighNarrowingUpper(
Unsafe.Read<Vector64<Int16>>(_dataTable.inArray1Ptr),
Unsafe.Read<Vector128<Int32>>(_dataTable.inArray2Ptr),
Unsafe.Read<Vector128<Int32>>(_dataTable.inArray3Ptr)
);
Unsafe.Write(_dataTable.outArrayPtr, result);
ValidateResult(_dataTable.inArray1Ptr, _dataTable.inArray2Ptr, _dataTable.inArray3Ptr, _dataTable.outArrayPtr);
}
public void RunBasicScenario_Load()
{
TestLibrary.TestFramework.BeginScenario(nameof(RunBasicScenario_Load));
var result = AdvSimd.AddHighNarrowingUpper(
AdvSimd.LoadVector64((Int16*)(_dataTable.inArray1Ptr)),
AdvSimd.LoadVector128((Int32*)(_dataTable.inArray2Ptr)),
AdvSimd.LoadVector128((Int32*)(_dataTable.inArray3Ptr))
);
Unsafe.Write(_dataTable.outArrayPtr, result);
ValidateResult(_dataTable.inArray1Ptr, _dataTable.inArray2Ptr, _dataTable.inArray3Ptr, _dataTable.outArrayPtr);
}
public void RunReflectionScenario_UnsafeRead()
{
TestLibrary.TestFramework.BeginScenario(nameof(RunReflectionScenario_UnsafeRead));
var result = typeof(AdvSimd).GetMethod(nameof(AdvSimd.AddHighNarrowingUpper), new Type[] { typeof(Vector64<Int16>), typeof(Vector128<Int32>), typeof(Vector128<Int32>) })
.Invoke(null, new object[] {
Unsafe.Read<Vector64<Int16>>(_dataTable.inArray1Ptr),
Unsafe.Read<Vector128<Int32>>(_dataTable.inArray2Ptr),
Unsafe.Read<Vector128<Int32>>(_dataTable.inArray3Ptr)
});
Unsafe.Write(_dataTable.outArrayPtr, (Vector128<Int16>)(result));
ValidateResult(_dataTable.inArray1Ptr, _dataTable.inArray2Ptr, _dataTable.inArray3Ptr, _dataTable.outArrayPtr);
}
public void RunReflectionScenario_Load()
{
TestLibrary.TestFramework.BeginScenario(nameof(RunReflectionScenario_Load));
var result = typeof(AdvSimd).GetMethod(nameof(AdvSimd.AddHighNarrowingUpper), new Type[] { typeof(Vector64<Int16>), typeof(Vector128<Int32>), typeof(Vector128<Int32>) })
.Invoke(null, new object[] {
AdvSimd.LoadVector64((Int16*)(_dataTable.inArray1Ptr)),
AdvSimd.LoadVector128((Int32*)(_dataTable.inArray2Ptr)),
AdvSimd.LoadVector128((Int32*)(_dataTable.inArray3Ptr))
});
Unsafe.Write(_dataTable.outArrayPtr, (Vector128<Int16>)(result));
ValidateResult(_dataTable.inArray1Ptr, _dataTable.inArray2Ptr, _dataTable.inArray3Ptr, _dataTable.outArrayPtr);
}
public void RunClsVarScenario()
{
TestLibrary.TestFramework.BeginScenario(nameof(RunClsVarScenario));
var result = AdvSimd.AddHighNarrowingUpper(
_clsVar1,
_clsVar2,
_clsVar3
);
Unsafe.Write(_dataTable.outArrayPtr, result);
ValidateResult(_clsVar1, _clsVar2, _clsVar3, _dataTable.outArrayPtr);
}
public void RunClsVarScenario_Load()
{
TestLibrary.TestFramework.BeginScenario(nameof(RunClsVarScenario_Load));
fixed (Vector64<Int16>* pClsVar1 = &_clsVar1)
fixed (Vector128<Int32>* pClsVar2 = &_clsVar2)
fixed (Vector128<Int32>* pClsVar3 = &_clsVar3)
{
var result = AdvSimd.AddHighNarrowingUpper(
AdvSimd.LoadVector64((Int16*)(pClsVar1)),
AdvSimd.LoadVector128((Int32*)(pClsVar2)),
AdvSimd.LoadVector128((Int32*)(pClsVar3))
);
Unsafe.Write(_dataTable.outArrayPtr, result);
ValidateResult(_clsVar1, _clsVar2, _clsVar3, _dataTable.outArrayPtr);
}
}
public void RunLclVarScenario_UnsafeRead()
{
TestLibrary.TestFramework.BeginScenario(nameof(RunLclVarScenario_UnsafeRead));
var op1 = Unsafe.Read<Vector64<Int16>>(_dataTable.inArray1Ptr);
var op2 = Unsafe.Read<Vector128<Int32>>(_dataTable.inArray2Ptr);
var op3 = Unsafe.Read<Vector128<Int32>>(_dataTable.inArray3Ptr);
var result = AdvSimd.AddHighNarrowingUpper(op1, op2, op3);
Unsafe.Write(_dataTable.outArrayPtr, result);
ValidateResult(op1, op2, op3, _dataTable.outArrayPtr);
}
public void RunLclVarScenario_Load()
{
TestLibrary.TestFramework.BeginScenario(nameof(RunLclVarScenario_Load));
var op1 = AdvSimd.LoadVector64((Int16*)(_dataTable.inArray1Ptr));
var op2 = AdvSimd.LoadVector128((Int32*)(_dataTable.inArray2Ptr));
var op3 = AdvSimd.LoadVector128((Int32*)(_dataTable.inArray3Ptr));
var result = AdvSimd.AddHighNarrowingUpper(op1, op2, op3);
Unsafe.Write(_dataTable.outArrayPtr, result);
ValidateResult(op1, op2, op3, _dataTable.outArrayPtr);
}
public void RunClassLclFldScenario()
{
TestLibrary.TestFramework.BeginScenario(nameof(RunClassLclFldScenario));
var test = new SimpleTernaryOpTest__AddHighNarrowingUpper_Vector128_Int16();
var result = AdvSimd.AddHighNarrowingUpper(test._fld1, test._fld2, test._fld3);
Unsafe.Write(_dataTable.outArrayPtr, result);
ValidateResult(test._fld1, test._fld2, test._fld3, _dataTable.outArrayPtr);
}
public void RunClassLclFldScenario_Load()
{
TestLibrary.TestFramework.BeginScenario(nameof(RunClassLclFldScenario_Load));
var test = new SimpleTernaryOpTest__AddHighNarrowingUpper_Vector128_Int16();
fixed (Vector64<Int16>* pFld1 = &test._fld1)
fixed (Vector128<Int32>* pFld2 = &test._fld2)
fixed (Vector128<Int32>* pFld3 = &test._fld3)
{
var result = AdvSimd.AddHighNarrowingUpper(
AdvSimd.LoadVector64((Int16*)(pFld1)),
AdvSimd.LoadVector128((Int32*)(pFld2)),
AdvSimd.LoadVector128((Int32*)(pFld3))
);
Unsafe.Write(_dataTable.outArrayPtr, result);
ValidateResult(test._fld1, test._fld2, test._fld3, _dataTable.outArrayPtr);
}
}
public void RunClassFldScenario()
{
TestLibrary.TestFramework.BeginScenario(nameof(RunClassFldScenario));
var result = AdvSimd.AddHighNarrowingUpper(_fld1, _fld2, _fld3);
Unsafe.Write(_dataTable.outArrayPtr, result);
ValidateResult(_fld1, _fld2, _fld3, _dataTable.outArrayPtr);
}
public void RunClassFldScenario_Load()
{
TestLibrary.TestFramework.BeginScenario(nameof(RunClassFldScenario_Load));
fixed (Vector64<Int16>* pFld1 = &_fld1)
fixed (Vector128<Int32>* pFld2 = &_fld2)
fixed (Vector128<Int32>* pFld3 = &_fld3)
{
var result = AdvSimd.AddHighNarrowingUpper(
AdvSimd.LoadVector64((Int16*)(pFld1)),
AdvSimd.LoadVector128((Int32*)(pFld2)),
AdvSimd.LoadVector128((Int32*)(pFld3))
);
Unsafe.Write(_dataTable.outArrayPtr, result);
ValidateResult(_fld1, _fld2, _fld3, _dataTable.outArrayPtr);
}
}
public void RunStructLclFldScenario()
{
TestLibrary.TestFramework.BeginScenario(nameof(RunStructLclFldScenario));
var test = TestStruct.Create();
var result = AdvSimd.AddHighNarrowingUpper(test._fld1, test._fld2, test._fld3);
Unsafe.Write(_dataTable.outArrayPtr, result);
ValidateResult(test._fld1, test._fld2, test._fld3, _dataTable.outArrayPtr);
}
public void RunStructLclFldScenario_Load()
{
TestLibrary.TestFramework.BeginScenario(nameof(RunStructLclFldScenario_Load));
var test = TestStruct.Create();
var result = AdvSimd.AddHighNarrowingUpper(
AdvSimd.LoadVector64((Int16*)(&test._fld1)),
AdvSimd.LoadVector128((Int32*)(&test._fld2)),
AdvSimd.LoadVector128((Int32*)(&test._fld3))
);
Unsafe.Write(_dataTable.outArrayPtr, result);
ValidateResult(test._fld1, test._fld2, test._fld3, _dataTable.outArrayPtr);
}
public void RunStructFldScenario()
{
TestLibrary.TestFramework.BeginScenario(nameof(RunStructFldScenario));
var test = TestStruct.Create();
test.RunStructFldScenario(this);
}
public void RunStructFldScenario_Load()
{
TestLibrary.TestFramework.BeginScenario(nameof(RunStructFldScenario_Load));
var test = TestStruct.Create();
test.RunStructFldScenario_Load(this);
}
public void RunUnsupportedScenario()
{
TestLibrary.TestFramework.BeginScenario(nameof(RunUnsupportedScenario));
bool succeeded = false;
try
{
RunBasicScenario_UnsafeRead();
}
catch (PlatformNotSupportedException)
{
succeeded = true;
}
if (!succeeded)
{
Succeeded = false;
}
}
private void ValidateResult(Vector64<Int16> op1, Vector128<Int32> op2, Vector128<Int32> op3, void* result, [CallerMemberName] string method = "")
{
Int16[] inArray1 = new Int16[Op1ElementCount];
Int32[] inArray2 = new Int32[Op2ElementCount];
Int32[] inArray3 = new Int32[Op3ElementCount];
Int16[] outArray = new Int16[RetElementCount];
Unsafe.WriteUnaligned(ref Unsafe.As<Int16, byte>(ref inArray1[0]), op1);
Unsafe.WriteUnaligned(ref Unsafe.As<Int32, byte>(ref inArray2[0]), op2);
Unsafe.WriteUnaligned(ref Unsafe.As<Int32, byte>(ref inArray3[0]), op3);
Unsafe.CopyBlockUnaligned(ref Unsafe.As<Int16, byte>(ref outArray[0]), ref Unsafe.AsRef<byte>(result), (uint)Unsafe.SizeOf<Vector128<Int16>>());
ValidateResult(inArray1, inArray2, inArray3, outArray, method);
}
private void ValidateResult(void* op1, void* op2, void* op3, void* result, [CallerMemberName] string method = "")
{
Int16[] inArray1 = new Int16[Op1ElementCount];
Int32[] inArray2 = new Int32[Op2ElementCount];
Int32[] inArray3 = new Int32[Op3ElementCount];
Int16[] outArray = new Int16[RetElementCount];
Unsafe.CopyBlockUnaligned(ref Unsafe.As<Int16, byte>(ref inArray1[0]), ref Unsafe.AsRef<byte>(op1), (uint)Unsafe.SizeOf<Vector64<Int16>>());
Unsafe.CopyBlockUnaligned(ref Unsafe.As<Int32, byte>(ref inArray2[0]), ref Unsafe.AsRef<byte>(op2), (uint)Unsafe.SizeOf<Vector128<Int32>>());
Unsafe.CopyBlockUnaligned(ref Unsafe.As<Int32, byte>(ref inArray3[0]), ref Unsafe.AsRef<byte>(op3), (uint)Unsafe.SizeOf<Vector128<Int32>>());
Unsafe.CopyBlockUnaligned(ref Unsafe.As<Int16, byte>(ref outArray[0]), ref Unsafe.AsRef<byte>(result), (uint)Unsafe.SizeOf<Vector128<Int16>>());
ValidateResult(inArray1, inArray2, inArray3, outArray, method);
}
private void ValidateResult(Int16[] firstOp, Int32[] secondOp, Int32[] thirdOp, Int16[] result, [CallerMemberName] string method = "")
{
bool succeeded = true;
for (var i = 0; i < RetElementCount; i++)
{
if (Helpers.AddHighNarrowingUpper(firstOp, secondOp, thirdOp, i) != result[i])
{
succeeded = false;
break;
}
}
if (!succeeded)
{
TestLibrary.TestFramework.LogInformation($"{nameof(AdvSimd)}.{nameof(AdvSimd.AddHighNarrowingUpper)}<Int16>(Vector64<Int16>, Vector128<Int32>, Vector128<Int32>): {method} failed:");
TestLibrary.TestFramework.LogInformation($" firstOp: ({string.Join(", ", firstOp)})");
TestLibrary.TestFramework.LogInformation($"secondOp: ({string.Join(", ", secondOp)})");
TestLibrary.TestFramework.LogInformation($" thirdOp: ({string.Join(", ", thirdOp)})");
TestLibrary.TestFramework.LogInformation($" result: ({string.Join(", ", result)})");
TestLibrary.TestFramework.LogInformation(string.Empty);
Succeeded = false;
}
}
}
}
| // Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
/******************************************************************************
* This file is auto-generated from a template file by the GenerateTests.csx *
* script in tests\src\JIT\HardwareIntrinsics.Arm\Shared. In order to make *
* changes, please update the corresponding template and run according to the *
* directions listed in the file. *
******************************************************************************/
using System;
using System.Runtime.CompilerServices;
using System.Runtime.InteropServices;
using System.Runtime.Intrinsics;
using System.Runtime.Intrinsics.Arm;
namespace JIT.HardwareIntrinsics.Arm
{
public static partial class Program
{
private static void AddHighNarrowingUpper_Vector128_Int16()
{
var test = new SimpleTernaryOpTest__AddHighNarrowingUpper_Vector128_Int16();
if (test.IsSupported)
{
// Validates basic functionality works, using Unsafe.Read
test.RunBasicScenario_UnsafeRead();
if (AdvSimd.IsSupported)
{
// Validates basic functionality works, using Load
test.RunBasicScenario_Load();
}
// Validates calling via reflection works, using Unsafe.Read
test.RunReflectionScenario_UnsafeRead();
if (AdvSimd.IsSupported)
{
// Validates calling via reflection works, using Load
test.RunReflectionScenario_Load();
}
// Validates passing a static member works
test.RunClsVarScenario();
if (AdvSimd.IsSupported)
{
// Validates passing a static member works, using pinning and Load
test.RunClsVarScenario_Load();
}
// Validates passing a local works, using Unsafe.Read
test.RunLclVarScenario_UnsafeRead();
if (AdvSimd.IsSupported)
{
// Validates passing a local works, using Load
test.RunLclVarScenario_Load();
}
// Validates passing the field of a local class works
test.RunClassLclFldScenario();
if (AdvSimd.IsSupported)
{
// Validates passing the field of a local class works, using pinning and Load
test.RunClassLclFldScenario_Load();
}
// Validates passing an instance member of a class works
test.RunClassFldScenario();
if (AdvSimd.IsSupported)
{
// Validates passing an instance member of a class works, using pinning and Load
test.RunClassFldScenario_Load();
}
// Validates passing the field of a local struct works
test.RunStructLclFldScenario();
if (AdvSimd.IsSupported)
{
// Validates passing the field of a local struct works, using pinning and Load
test.RunStructLclFldScenario_Load();
}
// Validates passing an instance member of a struct works
test.RunStructFldScenario();
if (AdvSimd.IsSupported)
{
// Validates passing an instance member of a struct works, using pinning and Load
test.RunStructFldScenario_Load();
}
}
else
{
// Validates we throw on unsupported hardware
test.RunUnsupportedScenario();
}
if (!test.Succeeded)
{
throw new Exception("One or more scenarios did not complete as expected.");
}
}
}
public sealed unsafe class SimpleTernaryOpTest__AddHighNarrowingUpper_Vector128_Int16
{
private struct DataTable
{
private byte[] inArray1;
private byte[] inArray2;
private byte[] inArray3;
private byte[] outArray;
private GCHandle inHandle1;
private GCHandle inHandle2;
private GCHandle inHandle3;
private GCHandle outHandle;
private ulong alignment;
public DataTable(Int16[] inArray1, Int32[] inArray2, Int32[] inArray3, Int16[] outArray, int alignment)
{
int sizeOfinArray1 = inArray1.Length * Unsafe.SizeOf<Int16>();
int sizeOfinArray2 = inArray2.Length * Unsafe.SizeOf<Int32>();
int sizeOfinArray3 = inArray3.Length * Unsafe.SizeOf<Int32>();
int sizeOfoutArray = outArray.Length * Unsafe.SizeOf<Int16>();
if ((alignment != 16 && alignment != 8) || (alignment * 2) < sizeOfinArray1 || (alignment * 2) < sizeOfinArray2 || (alignment * 2) < sizeOfinArray3 || (alignment * 2) < sizeOfoutArray)
{
throw new ArgumentException("Invalid value of alignment");
}
this.inArray1 = new byte[alignment * 2];
this.inArray2 = new byte[alignment * 2];
this.inArray3 = new byte[alignment * 2];
this.outArray = new byte[alignment * 2];
this.inHandle1 = GCHandle.Alloc(this.inArray1, GCHandleType.Pinned);
this.inHandle2 = GCHandle.Alloc(this.inArray2, GCHandleType.Pinned);
this.inHandle3 = GCHandle.Alloc(this.inArray3, GCHandleType.Pinned);
this.outHandle = GCHandle.Alloc(this.outArray, GCHandleType.Pinned);
this.alignment = (ulong)alignment;
Unsafe.CopyBlockUnaligned(ref Unsafe.AsRef<byte>(inArray1Ptr), ref Unsafe.As<Int16, byte>(ref inArray1[0]), (uint)sizeOfinArray1);
Unsafe.CopyBlockUnaligned(ref Unsafe.AsRef<byte>(inArray2Ptr), ref Unsafe.As<Int32, byte>(ref inArray2[0]), (uint)sizeOfinArray2);
Unsafe.CopyBlockUnaligned(ref Unsafe.AsRef<byte>(inArray3Ptr), ref Unsafe.As<Int32, byte>(ref inArray3[0]), (uint)sizeOfinArray3);
}
public void* inArray1Ptr => Align((byte*)(inHandle1.AddrOfPinnedObject().ToPointer()), alignment);
public void* inArray2Ptr => Align((byte*)(inHandle2.AddrOfPinnedObject().ToPointer()), alignment);
public void* inArray3Ptr => Align((byte*)(inHandle3.AddrOfPinnedObject().ToPointer()), alignment);
public void* outArrayPtr => Align((byte*)(outHandle.AddrOfPinnedObject().ToPointer()), alignment);
public void Dispose()
{
inHandle1.Free();
inHandle2.Free();
inHandle3.Free();
outHandle.Free();
}
private static unsafe void* Align(byte* buffer, ulong expectedAlignment)
{
return (void*)(((ulong)buffer + expectedAlignment - 1) & ~(expectedAlignment - 1));
}
}
private struct TestStruct
{
public Vector64<Int16> _fld1;
public Vector128<Int32> _fld2;
public Vector128<Int32> _fld3;
public static TestStruct Create()
{
var testStruct = new TestStruct();
for (var i = 0; i < Op1ElementCount; i++) { _data1[i] = TestLibrary.Generator.GetInt16(); }
Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector64<Int16>, byte>(ref testStruct._fld1), ref Unsafe.As<Int16, byte>(ref _data1[0]), (uint)Unsafe.SizeOf<Vector64<Int16>>());
for (var i = 0; i < Op2ElementCount; i++) { _data2[i] = TestLibrary.Generator.GetInt32(); }
Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector128<Int32>, byte>(ref testStruct._fld2), ref Unsafe.As<Int32, byte>(ref _data2[0]), (uint)Unsafe.SizeOf<Vector128<Int32>>());
for (var i = 0; i < Op3ElementCount; i++) { _data3[i] = TestLibrary.Generator.GetInt32(); }
Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector128<Int32>, byte>(ref testStruct._fld3), ref Unsafe.As<Int32, byte>(ref _data3[0]), (uint)Unsafe.SizeOf<Vector128<Int32>>());
return testStruct;
}
public void RunStructFldScenario(SimpleTernaryOpTest__AddHighNarrowingUpper_Vector128_Int16 testClass)
{
var result = AdvSimd.AddHighNarrowingUpper(_fld1, _fld2, _fld3);
Unsafe.Write(testClass._dataTable.outArrayPtr, result);
testClass.ValidateResult(_fld1, _fld2, _fld3, testClass._dataTable.outArrayPtr);
}
public void RunStructFldScenario_Load(SimpleTernaryOpTest__AddHighNarrowingUpper_Vector128_Int16 testClass)
{
fixed (Vector64<Int16>* pFld1 = &_fld1)
fixed (Vector128<Int32>* pFld2 = &_fld2)
fixed (Vector128<Int32>* pFld3 = &_fld3)
{
var result = AdvSimd.AddHighNarrowingUpper(
AdvSimd.LoadVector64((Int16*)(pFld1)),
AdvSimd.LoadVector128((Int32*)(pFld2)),
AdvSimd.LoadVector128((Int32*)(pFld3))
);
Unsafe.Write(testClass._dataTable.outArrayPtr, result);
testClass.ValidateResult(_fld1, _fld2, _fld3, testClass._dataTable.outArrayPtr);
}
}
}
private static readonly int LargestVectorSize = 16;
private static readonly int Op1ElementCount = Unsafe.SizeOf<Vector64<Int16>>() / sizeof(Int16);
private static readonly int Op2ElementCount = Unsafe.SizeOf<Vector128<Int32>>() / sizeof(Int32);
private static readonly int Op3ElementCount = Unsafe.SizeOf<Vector128<Int32>>() / sizeof(Int32);
private static readonly int RetElementCount = Unsafe.SizeOf<Vector128<Int16>>() / sizeof(Int16);
private static Int16[] _data1 = new Int16[Op1ElementCount];
private static Int32[] _data2 = new Int32[Op2ElementCount];
private static Int32[] _data3 = new Int32[Op3ElementCount];
private static Vector64<Int16> _clsVar1;
private static Vector128<Int32> _clsVar2;
private static Vector128<Int32> _clsVar3;
private Vector64<Int16> _fld1;
private Vector128<Int32> _fld2;
private Vector128<Int32> _fld3;
private DataTable _dataTable;
static SimpleTernaryOpTest__AddHighNarrowingUpper_Vector128_Int16()
{
for (var i = 0; i < Op1ElementCount; i++) { _data1[i] = TestLibrary.Generator.GetInt16(); }
Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector64<Int16>, byte>(ref _clsVar1), ref Unsafe.As<Int16, byte>(ref _data1[0]), (uint)Unsafe.SizeOf<Vector64<Int16>>());
for (var i = 0; i < Op2ElementCount; i++) { _data2[i] = TestLibrary.Generator.GetInt32(); }
Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector128<Int32>, byte>(ref _clsVar2), ref Unsafe.As<Int32, byte>(ref _data2[0]), (uint)Unsafe.SizeOf<Vector128<Int32>>());
for (var i = 0; i < Op3ElementCount; i++) { _data3[i] = TestLibrary.Generator.GetInt32(); }
Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector128<Int32>, byte>(ref _clsVar3), ref Unsafe.As<Int32, byte>(ref _data3[0]), (uint)Unsafe.SizeOf<Vector128<Int32>>());
}
public SimpleTernaryOpTest__AddHighNarrowingUpper_Vector128_Int16()
{
Succeeded = true;
for (var i = 0; i < Op1ElementCount; i++) { _data1[i] = TestLibrary.Generator.GetInt16(); }
Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector64<Int16>, byte>(ref _fld1), ref Unsafe.As<Int16, byte>(ref _data1[0]), (uint)Unsafe.SizeOf<Vector64<Int16>>());
for (var i = 0; i < Op2ElementCount; i++) { _data2[i] = TestLibrary.Generator.GetInt32(); }
Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector128<Int32>, byte>(ref _fld2), ref Unsafe.As<Int32, byte>(ref _data2[0]), (uint)Unsafe.SizeOf<Vector128<Int32>>());
for (var i = 0; i < Op3ElementCount; i++) { _data3[i] = TestLibrary.Generator.GetInt32(); }
Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector128<Int32>, byte>(ref _fld3), ref Unsafe.As<Int32, byte>(ref _data3[0]), (uint)Unsafe.SizeOf<Vector128<Int32>>());
for (var i = 0; i < Op1ElementCount; i++) { _data1[i] = TestLibrary.Generator.GetInt16(); }
for (var i = 0; i < Op2ElementCount; i++) { _data2[i] = TestLibrary.Generator.GetInt32(); }
for (var i = 0; i < Op3ElementCount; i++) { _data3[i] = TestLibrary.Generator.GetInt32(); }
_dataTable = new DataTable(_data1, _data2, _data3, new Int16[RetElementCount], LargestVectorSize);
}
public bool IsSupported => AdvSimd.IsSupported;
public bool Succeeded { get; set; }
public void RunBasicScenario_UnsafeRead()
{
TestLibrary.TestFramework.BeginScenario(nameof(RunBasicScenario_UnsafeRead));
var result = AdvSimd.AddHighNarrowingUpper(
Unsafe.Read<Vector64<Int16>>(_dataTable.inArray1Ptr),
Unsafe.Read<Vector128<Int32>>(_dataTable.inArray2Ptr),
Unsafe.Read<Vector128<Int32>>(_dataTable.inArray3Ptr)
);
Unsafe.Write(_dataTable.outArrayPtr, result);
ValidateResult(_dataTable.inArray1Ptr, _dataTable.inArray2Ptr, _dataTable.inArray3Ptr, _dataTable.outArrayPtr);
}
public void RunBasicScenario_Load()
{
TestLibrary.TestFramework.BeginScenario(nameof(RunBasicScenario_Load));
var result = AdvSimd.AddHighNarrowingUpper(
AdvSimd.LoadVector64((Int16*)(_dataTable.inArray1Ptr)),
AdvSimd.LoadVector128((Int32*)(_dataTable.inArray2Ptr)),
AdvSimd.LoadVector128((Int32*)(_dataTable.inArray3Ptr))
);
Unsafe.Write(_dataTable.outArrayPtr, result);
ValidateResult(_dataTable.inArray1Ptr, _dataTable.inArray2Ptr, _dataTable.inArray3Ptr, _dataTable.outArrayPtr);
}
public void RunReflectionScenario_UnsafeRead()
{
TestLibrary.TestFramework.BeginScenario(nameof(RunReflectionScenario_UnsafeRead));
var result = typeof(AdvSimd).GetMethod(nameof(AdvSimd.AddHighNarrowingUpper), new Type[] { typeof(Vector64<Int16>), typeof(Vector128<Int32>), typeof(Vector128<Int32>) })
.Invoke(null, new object[] {
Unsafe.Read<Vector64<Int16>>(_dataTable.inArray1Ptr),
Unsafe.Read<Vector128<Int32>>(_dataTable.inArray2Ptr),
Unsafe.Read<Vector128<Int32>>(_dataTable.inArray3Ptr)
});
Unsafe.Write(_dataTable.outArrayPtr, (Vector128<Int16>)(result));
ValidateResult(_dataTable.inArray1Ptr, _dataTable.inArray2Ptr, _dataTable.inArray3Ptr, _dataTable.outArrayPtr);
}
public void RunReflectionScenario_Load()
{
TestLibrary.TestFramework.BeginScenario(nameof(RunReflectionScenario_Load));
var result = typeof(AdvSimd).GetMethod(nameof(AdvSimd.AddHighNarrowingUpper), new Type[] { typeof(Vector64<Int16>), typeof(Vector128<Int32>), typeof(Vector128<Int32>) })
.Invoke(null, new object[] {
AdvSimd.LoadVector64((Int16*)(_dataTable.inArray1Ptr)),
AdvSimd.LoadVector128((Int32*)(_dataTable.inArray2Ptr)),
AdvSimd.LoadVector128((Int32*)(_dataTable.inArray3Ptr))
});
Unsafe.Write(_dataTable.outArrayPtr, (Vector128<Int16>)(result));
ValidateResult(_dataTable.inArray1Ptr, _dataTable.inArray2Ptr, _dataTable.inArray3Ptr, _dataTable.outArrayPtr);
}
public void RunClsVarScenario()
{
TestLibrary.TestFramework.BeginScenario(nameof(RunClsVarScenario));
var result = AdvSimd.AddHighNarrowingUpper(
_clsVar1,
_clsVar2,
_clsVar3
);
Unsafe.Write(_dataTable.outArrayPtr, result);
ValidateResult(_clsVar1, _clsVar2, _clsVar3, _dataTable.outArrayPtr);
}
public void RunClsVarScenario_Load()
{
TestLibrary.TestFramework.BeginScenario(nameof(RunClsVarScenario_Load));
fixed (Vector64<Int16>* pClsVar1 = &_clsVar1)
fixed (Vector128<Int32>* pClsVar2 = &_clsVar2)
fixed (Vector128<Int32>* pClsVar3 = &_clsVar3)
{
var result = AdvSimd.AddHighNarrowingUpper(
AdvSimd.LoadVector64((Int16*)(pClsVar1)),
AdvSimd.LoadVector128((Int32*)(pClsVar2)),
AdvSimd.LoadVector128((Int32*)(pClsVar3))
);
Unsafe.Write(_dataTable.outArrayPtr, result);
ValidateResult(_clsVar1, _clsVar2, _clsVar3, _dataTable.outArrayPtr);
}
}
public void RunLclVarScenario_UnsafeRead()
{
TestLibrary.TestFramework.BeginScenario(nameof(RunLclVarScenario_UnsafeRead));
var op1 = Unsafe.Read<Vector64<Int16>>(_dataTable.inArray1Ptr);
var op2 = Unsafe.Read<Vector128<Int32>>(_dataTable.inArray2Ptr);
var op3 = Unsafe.Read<Vector128<Int32>>(_dataTable.inArray3Ptr);
var result = AdvSimd.AddHighNarrowingUpper(op1, op2, op3);
Unsafe.Write(_dataTable.outArrayPtr, result);
ValidateResult(op1, op2, op3, _dataTable.outArrayPtr);
}
public void RunLclVarScenario_Load()
{
TestLibrary.TestFramework.BeginScenario(nameof(RunLclVarScenario_Load));
var op1 = AdvSimd.LoadVector64((Int16*)(_dataTable.inArray1Ptr));
var op2 = AdvSimd.LoadVector128((Int32*)(_dataTable.inArray2Ptr));
var op3 = AdvSimd.LoadVector128((Int32*)(_dataTable.inArray3Ptr));
var result = AdvSimd.AddHighNarrowingUpper(op1, op2, op3);
Unsafe.Write(_dataTable.outArrayPtr, result);
ValidateResult(op1, op2, op3, _dataTable.outArrayPtr);
}
public void RunClassLclFldScenario()
{
TestLibrary.TestFramework.BeginScenario(nameof(RunClassLclFldScenario));
var test = new SimpleTernaryOpTest__AddHighNarrowingUpper_Vector128_Int16();
var result = AdvSimd.AddHighNarrowingUpper(test._fld1, test._fld2, test._fld3);
Unsafe.Write(_dataTable.outArrayPtr, result);
ValidateResult(test._fld1, test._fld2, test._fld3, _dataTable.outArrayPtr);
}
public void RunClassLclFldScenario_Load()
{
TestLibrary.TestFramework.BeginScenario(nameof(RunClassLclFldScenario_Load));
var test = new SimpleTernaryOpTest__AddHighNarrowingUpper_Vector128_Int16();
fixed (Vector64<Int16>* pFld1 = &test._fld1)
fixed (Vector128<Int32>* pFld2 = &test._fld2)
fixed (Vector128<Int32>* pFld3 = &test._fld3)
{
var result = AdvSimd.AddHighNarrowingUpper(
AdvSimd.LoadVector64((Int16*)(pFld1)),
AdvSimd.LoadVector128((Int32*)(pFld2)),
AdvSimd.LoadVector128((Int32*)(pFld3))
);
Unsafe.Write(_dataTable.outArrayPtr, result);
ValidateResult(test._fld1, test._fld2, test._fld3, _dataTable.outArrayPtr);
}
}
public void RunClassFldScenario()
{
TestLibrary.TestFramework.BeginScenario(nameof(RunClassFldScenario));
var result = AdvSimd.AddHighNarrowingUpper(_fld1, _fld2, _fld3);
Unsafe.Write(_dataTable.outArrayPtr, result);
ValidateResult(_fld1, _fld2, _fld3, _dataTable.outArrayPtr);
}
public void RunClassFldScenario_Load()
{
TestLibrary.TestFramework.BeginScenario(nameof(RunClassFldScenario_Load));
fixed (Vector64<Int16>* pFld1 = &_fld1)
fixed (Vector128<Int32>* pFld2 = &_fld2)
fixed (Vector128<Int32>* pFld3 = &_fld3)
{
var result = AdvSimd.AddHighNarrowingUpper(
AdvSimd.LoadVector64((Int16*)(pFld1)),
AdvSimd.LoadVector128((Int32*)(pFld2)),
AdvSimd.LoadVector128((Int32*)(pFld3))
);
Unsafe.Write(_dataTable.outArrayPtr, result);
ValidateResult(_fld1, _fld2, _fld3, _dataTable.outArrayPtr);
}
}
public void RunStructLclFldScenario()
{
TestLibrary.TestFramework.BeginScenario(nameof(RunStructLclFldScenario));
var test = TestStruct.Create();
var result = AdvSimd.AddHighNarrowingUpper(test._fld1, test._fld2, test._fld3);
Unsafe.Write(_dataTable.outArrayPtr, result);
ValidateResult(test._fld1, test._fld2, test._fld3, _dataTable.outArrayPtr);
}
public void RunStructLclFldScenario_Load()
{
TestLibrary.TestFramework.BeginScenario(nameof(RunStructLclFldScenario_Load));
var test = TestStruct.Create();
var result = AdvSimd.AddHighNarrowingUpper(
AdvSimd.LoadVector64((Int16*)(&test._fld1)),
AdvSimd.LoadVector128((Int32*)(&test._fld2)),
AdvSimd.LoadVector128((Int32*)(&test._fld3))
);
Unsafe.Write(_dataTable.outArrayPtr, result);
ValidateResult(test._fld1, test._fld2, test._fld3, _dataTable.outArrayPtr);
}
public void RunStructFldScenario()
{
TestLibrary.TestFramework.BeginScenario(nameof(RunStructFldScenario));
var test = TestStruct.Create();
test.RunStructFldScenario(this);
}
public void RunStructFldScenario_Load()
{
TestLibrary.TestFramework.BeginScenario(nameof(RunStructFldScenario_Load));
var test = TestStruct.Create();
test.RunStructFldScenario_Load(this);
}
public void RunUnsupportedScenario()
{
TestLibrary.TestFramework.BeginScenario(nameof(RunUnsupportedScenario));
bool succeeded = false;
try
{
RunBasicScenario_UnsafeRead();
}
catch (PlatformNotSupportedException)
{
succeeded = true;
}
if (!succeeded)
{
Succeeded = false;
}
}
private void ValidateResult(Vector64<Int16> op1, Vector128<Int32> op2, Vector128<Int32> op3, void* result, [CallerMemberName] string method = "")
{
Int16[] inArray1 = new Int16[Op1ElementCount];
Int32[] inArray2 = new Int32[Op2ElementCount];
Int32[] inArray3 = new Int32[Op3ElementCount];
Int16[] outArray = new Int16[RetElementCount];
Unsafe.WriteUnaligned(ref Unsafe.As<Int16, byte>(ref inArray1[0]), op1);
Unsafe.WriteUnaligned(ref Unsafe.As<Int32, byte>(ref inArray2[0]), op2);
Unsafe.WriteUnaligned(ref Unsafe.As<Int32, byte>(ref inArray3[0]), op3);
Unsafe.CopyBlockUnaligned(ref Unsafe.As<Int16, byte>(ref outArray[0]), ref Unsafe.AsRef<byte>(result), (uint)Unsafe.SizeOf<Vector128<Int16>>());
ValidateResult(inArray1, inArray2, inArray3, outArray, method);
}
private void ValidateResult(void* op1, void* op2, void* op3, void* result, [CallerMemberName] string method = "")
{
Int16[] inArray1 = new Int16[Op1ElementCount];
Int32[] inArray2 = new Int32[Op2ElementCount];
Int32[] inArray3 = new Int32[Op3ElementCount];
Int16[] outArray = new Int16[RetElementCount];
Unsafe.CopyBlockUnaligned(ref Unsafe.As<Int16, byte>(ref inArray1[0]), ref Unsafe.AsRef<byte>(op1), (uint)Unsafe.SizeOf<Vector64<Int16>>());
Unsafe.CopyBlockUnaligned(ref Unsafe.As<Int32, byte>(ref inArray2[0]), ref Unsafe.AsRef<byte>(op2), (uint)Unsafe.SizeOf<Vector128<Int32>>());
Unsafe.CopyBlockUnaligned(ref Unsafe.As<Int32, byte>(ref inArray3[0]), ref Unsafe.AsRef<byte>(op3), (uint)Unsafe.SizeOf<Vector128<Int32>>());
Unsafe.CopyBlockUnaligned(ref Unsafe.As<Int16, byte>(ref outArray[0]), ref Unsafe.AsRef<byte>(result), (uint)Unsafe.SizeOf<Vector128<Int16>>());
ValidateResult(inArray1, inArray2, inArray3, outArray, method);
}
private void ValidateResult(Int16[] firstOp, Int32[] secondOp, Int32[] thirdOp, Int16[] result, [CallerMemberName] string method = "")
{
bool succeeded = true;
for (var i = 0; i < RetElementCount; i++)
{
if (Helpers.AddHighNarrowingUpper(firstOp, secondOp, thirdOp, i) != result[i])
{
succeeded = false;
break;
}
}
if (!succeeded)
{
TestLibrary.TestFramework.LogInformation($"{nameof(AdvSimd)}.{nameof(AdvSimd.AddHighNarrowingUpper)}<Int16>(Vector64<Int16>, Vector128<Int32>, Vector128<Int32>): {method} failed:");
TestLibrary.TestFramework.LogInformation($" firstOp: ({string.Join(", ", firstOp)})");
TestLibrary.TestFramework.LogInformation($"secondOp: ({string.Join(", ", secondOp)})");
TestLibrary.TestFramework.LogInformation($" thirdOp: ({string.Join(", ", thirdOp)})");
TestLibrary.TestFramework.LogInformation($" result: ({string.Join(", ", result)})");
TestLibrary.TestFramework.LogInformation(string.Empty);
Succeeded = false;
}
}
}
}
| -1 |
dotnet/runtime | 66,250 | use the name of the header when throwing exception for invalid chars | Fixes #65136 | pedrobsaila | 2022-03-05T20:18:11Z | 2022-03-08T16:15:18Z | 8fe2d2b0bbb7bec841c97372528531c071b29c16 | fa5dcda23c681031b5d2bd68482702baef80b836 | use the name of the header when throwing exception for invalid chars. Fixes #65136 | ./src/libraries/System.Private.Xml/tests/XmlDocument/XmlNodeTests/LastChildTests.cs | // Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
using Xunit;
namespace System.Xml.Tests
{
public class LastChildTests
{
[Fact]
public static void ElementWithNoChild()
{
var xmlDocument = new XmlDocument();
xmlDocument.LoadXml("<top />");
Assert.Null(xmlDocument.DocumentElement.LastChild);
}
[Fact]
public static void ElementWithNoChildTwoAttributes()
{
var xmlDocument = new XmlDocument();
xmlDocument.LoadXml("<top attr1='test1' attr2='test2' />");
Assert.Null(xmlDocument.DocumentElement.LastChild);
}
[Fact]
public static void DeleteOnlyChildInsertNewNode()
{
var xmlDocument = new XmlDocument();
xmlDocument.LoadXml("<elem att1='foo'><a /></elem>");
var node = xmlDocument.DocumentElement;
var old = node.FirstChild;
node.RemoveChild(old);
var newNode = xmlDocument.CreateTextNode("textNode");
node.AppendChild(newNode);
Assert.Equal("textNode", node.LastChild.Value);
Assert.Equal(XmlNodeType.Text, node.LastChild.NodeType);
Assert.Equal(1, node.ChildNodes.Count);
}
[Fact]
public static void DeleteOnlyChild()
{
var xmlDocument = new XmlDocument();
xmlDocument.LoadXml("<elem att1='foo'><a /></elem>");
var node = xmlDocument.DocumentElement;
var oldNode = node.FirstChild;
node.RemoveChild(oldNode);
Assert.Null(node.LastChild);
Assert.Equal(0, node.ChildNodes.Count);
}
[Fact]
public static void DeleteOnlyChildAddTwoChildren()
{
var xmlDocument = new XmlDocument();
xmlDocument.LoadXml("<elem att1='foo'><a /></elem>");
var node = xmlDocument.DocumentElement;
var oldNode = node.FirstChild;
node.RemoveChild(oldNode);
var element1 = xmlDocument.CreateElement("elem1");
var element2 = xmlDocument.CreateElement("elem2");
node.AppendChild(element1);
node.AppendChild(element2);
Assert.Equal(2, node.ChildNodes.Count);
Assert.Equal(element2, node.LastChild);
}
[Fact]
public static void DeleteOnlyChildAddTwoChildrenDeleteBoth()
{
var xmlDocument = new XmlDocument();
xmlDocument.LoadXml("<elem att1='foo'><a /></elem>");
var node = xmlDocument.DocumentElement;
var oldNode = node.FirstChild;
node.RemoveChild(oldNode);
var element1 = xmlDocument.CreateElement("elem1");
var element2 = xmlDocument.CreateElement("elem2");
node.AppendChild(element1);
node.AppendChild(element2);
node.RemoveChild(element1);
node.RemoveChild(element2);
Assert.Null(node.LastChild);
Assert.Equal(0, node.ChildNodes.Count);
}
[Fact]
public static void AttributeWithOnlyText()
{
var xmlDocument = new XmlDocument();
xmlDocument.LoadXml("<element attrib='helloworld' />");
var node = xmlDocument.DocumentElement.GetAttributeNode("attrib");
Assert.Equal("helloworld", node.LastChild.Value);
Assert.Equal(1, node.ChildNodes.Count);
}
[Fact]
public static void ElementWithTwoAttributes()
{
var xmlDocument = new XmlDocument();
xmlDocument.LoadXml(" <element attrib1='hello' attrib2='world' />");
Assert.Null(xmlDocument.DocumentElement.LastChild);
}
[Fact]
public static void ElementWithOneChild()
{
var xmlDocument = new XmlDocument();
xmlDocument.LoadXml("<root><child1/></root>");
Assert.Equal(XmlNodeType.Element, xmlDocument.DocumentElement.LastChild.NodeType);
Assert.Equal("child1", xmlDocument.DocumentElement.LastChild.Name);
}
[Fact]
public static void ElementWithMoreThanOneChild()
{
var xmlDocument = new XmlDocument();
xmlDocument.LoadXml("<root><child1/><child2>Some Text</child2><!-- comment --><?PI pi comments?></root>");
Assert.Equal(4, xmlDocument.DocumentElement.ChildNodes.Count);
Assert.NotNull(xmlDocument.DocumentElement.LastChild);
Assert.Equal(XmlNodeType.ProcessingInstruction, xmlDocument.DocumentElement.LastChild.NodeType);
}
[Fact]
public static void ElementNodeWithOneChildAndOneElement()
{
var xmlDocument = new XmlDocument();
xmlDocument.LoadXml("<element attrib1='value'>content</element>");
Assert.Equal(XmlNodeType.Text, xmlDocument.DocumentElement.LastChild.NodeType);
Assert.Equal("content", xmlDocument.DocumentElement.LastChild.Value);
}
[Fact]
public static void NewlyCreatedElement()
{
var xmlDocument = new XmlDocument();
var node = xmlDocument.CreateElement("element");
Assert.Null(node.LastChild);
}
[Fact]
public static void NewlyCreatedAttribute()
{
var xmlDocument = new XmlDocument();
var node = xmlDocument.CreateAttribute("attribute");
Assert.Null(node.LastChild);
}
[Fact]
public static void NewlyCreatedTextNode()
{
var xmlDocument = new XmlDocument();
var node = xmlDocument.CreateTextNode("textnode");
Assert.Null(node.LastChild);
}
[Fact]
public static void NewlyCreatedCDataNode()
{
var xmlDocument = new XmlDocument();
var node = xmlDocument.CreateCDataSection("cdata section");
Assert.Null(node.LastChild);
}
[Fact]
public static void NewlyCreatedProcessingInstruction()
{
var xmlDocument = new XmlDocument();
var node = xmlDocument.CreateProcessingInstruction("PI", "data");
Assert.Null(node.LastChild);
}
[Fact]
public static void NewlyCreatedComment()
{
var xmlDocument = new XmlDocument();
var node = xmlDocument.CreateComment("comment");
Assert.Null(node.LastChild);
}
[Fact]
public static void NewlyCreatedDocumentFragment()
{
var xmlDocument = new XmlDocument();
var node = xmlDocument.CreateDocumentFragment();
Assert.Null(node.LastChild);
}
[Fact]
public static void InsertChildAtLengthMinus1()
{
var xmlDocument = new XmlDocument();
xmlDocument.LoadXml("<root><child1/><child2/><child3/></root>");
var child3 = xmlDocument.DocumentElement.LastChild;
Assert.Equal(3, xmlDocument.DocumentElement.ChildNodes.Count);
Assert.Equal("child3", child3.Name);
var newNode = xmlDocument.CreateElement("elem1");
xmlDocument.DocumentElement.InsertBefore(newNode, child3);
Assert.Equal(4, xmlDocument.DocumentElement.ChildNodes.Count);
Assert.Equal(child3, xmlDocument.DocumentElement.LastChild);
}
[Fact]
public static void InsertChildToElementWithNoNode()
{
var xmlDocument = new XmlDocument();
xmlDocument.LoadXml("<root/>");
Assert.False(xmlDocument.DocumentElement.HasChildNodes);
var newNode = xmlDocument.CreateElement("elem1");
xmlDocument.DocumentElement.AppendChild(newNode);
Assert.Equal(1, xmlDocument.DocumentElement.ChildNodes.Count);
Assert.Equal(newNode, xmlDocument.DocumentElement.LastChild);
}
[Fact]
public static void ReplaceOnlyChildOfNode()
{
var xmlDocument = new XmlDocument();
xmlDocument.LoadXml("<root><child/></root>");
var oldNode = xmlDocument.DocumentElement.LastChild;
var newNode = xmlDocument.CreateElement("elem1");
Assert.Equal(1, xmlDocument.DocumentElement.ChildNodes.Count);
Assert.Equal(oldNode, xmlDocument.DocumentElement.LastChild);
xmlDocument.DocumentElement.ReplaceChild(newNode, oldNode);
Assert.Equal(1, xmlDocument.DocumentElement.ChildNodes.Count);
Assert.Equal(newNode, xmlDocument.DocumentElement.LastChild);
}
[Fact]
public static void ReplaceChild()
{
var xmlDocument = new XmlDocument();
xmlDocument.LoadXml("<root><child1/><child2/><child3/></root>");
var oldNode = xmlDocument.DocumentElement.LastChild;
var newNode = xmlDocument.CreateElement("elem1");
Assert.Equal(3, xmlDocument.DocumentElement.ChildNodes.Count);
Assert.Equal(oldNode, xmlDocument.DocumentElement.LastChild);
xmlDocument.DocumentElement.ReplaceChild(newNode, oldNode);
Assert.Equal(3, xmlDocument.DocumentElement.ChildNodes.Count);
Assert.Equal(newNode, xmlDocument.DocumentElement.LastChild);
}
}
}
| // Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
using Xunit;
namespace System.Xml.Tests
{
public class LastChildTests
{
[Fact]
public static void ElementWithNoChild()
{
var xmlDocument = new XmlDocument();
xmlDocument.LoadXml("<top />");
Assert.Null(xmlDocument.DocumentElement.LastChild);
}
[Fact]
public static void ElementWithNoChildTwoAttributes()
{
var xmlDocument = new XmlDocument();
xmlDocument.LoadXml("<top attr1='test1' attr2='test2' />");
Assert.Null(xmlDocument.DocumentElement.LastChild);
}
[Fact]
public static void DeleteOnlyChildInsertNewNode()
{
var xmlDocument = new XmlDocument();
xmlDocument.LoadXml("<elem att1='foo'><a /></elem>");
var node = xmlDocument.DocumentElement;
var old = node.FirstChild;
node.RemoveChild(old);
var newNode = xmlDocument.CreateTextNode("textNode");
node.AppendChild(newNode);
Assert.Equal("textNode", node.LastChild.Value);
Assert.Equal(XmlNodeType.Text, node.LastChild.NodeType);
Assert.Equal(1, node.ChildNodes.Count);
}
[Fact]
public static void DeleteOnlyChild()
{
var xmlDocument = new XmlDocument();
xmlDocument.LoadXml("<elem att1='foo'><a /></elem>");
var node = xmlDocument.DocumentElement;
var oldNode = node.FirstChild;
node.RemoveChild(oldNode);
Assert.Null(node.LastChild);
Assert.Equal(0, node.ChildNodes.Count);
}
[Fact]
public static void DeleteOnlyChildAddTwoChildren()
{
var xmlDocument = new XmlDocument();
xmlDocument.LoadXml("<elem att1='foo'><a /></elem>");
var node = xmlDocument.DocumentElement;
var oldNode = node.FirstChild;
node.RemoveChild(oldNode);
var element1 = xmlDocument.CreateElement("elem1");
var element2 = xmlDocument.CreateElement("elem2");
node.AppendChild(element1);
node.AppendChild(element2);
Assert.Equal(2, node.ChildNodes.Count);
Assert.Equal(element2, node.LastChild);
}
[Fact]
public static void DeleteOnlyChildAddTwoChildrenDeleteBoth()
{
var xmlDocument = new XmlDocument();
xmlDocument.LoadXml("<elem att1='foo'><a /></elem>");
var node = xmlDocument.DocumentElement;
var oldNode = node.FirstChild;
node.RemoveChild(oldNode);
var element1 = xmlDocument.CreateElement("elem1");
var element2 = xmlDocument.CreateElement("elem2");
node.AppendChild(element1);
node.AppendChild(element2);
node.RemoveChild(element1);
node.RemoveChild(element2);
Assert.Null(node.LastChild);
Assert.Equal(0, node.ChildNodes.Count);
}
[Fact]
public static void AttributeWithOnlyText()
{
var xmlDocument = new XmlDocument();
xmlDocument.LoadXml("<element attrib='helloworld' />");
var node = xmlDocument.DocumentElement.GetAttributeNode("attrib");
Assert.Equal("helloworld", node.LastChild.Value);
Assert.Equal(1, node.ChildNodes.Count);
}
[Fact]
public static void ElementWithTwoAttributes()
{
var xmlDocument = new XmlDocument();
xmlDocument.LoadXml(" <element attrib1='hello' attrib2='world' />");
Assert.Null(xmlDocument.DocumentElement.LastChild);
}
[Fact]
public static void ElementWithOneChild()
{
var xmlDocument = new XmlDocument();
xmlDocument.LoadXml("<root><child1/></root>");
Assert.Equal(XmlNodeType.Element, xmlDocument.DocumentElement.LastChild.NodeType);
Assert.Equal("child1", xmlDocument.DocumentElement.LastChild.Name);
}
[Fact]
public static void ElementWithMoreThanOneChild()
{
var xmlDocument = new XmlDocument();
xmlDocument.LoadXml("<root><child1/><child2>Some Text</child2><!-- comment --><?PI pi comments?></root>");
Assert.Equal(4, xmlDocument.DocumentElement.ChildNodes.Count);
Assert.NotNull(xmlDocument.DocumentElement.LastChild);
Assert.Equal(XmlNodeType.ProcessingInstruction, xmlDocument.DocumentElement.LastChild.NodeType);
}
[Fact]
public static void ElementNodeWithOneChildAndOneElement()
{
var xmlDocument = new XmlDocument();
xmlDocument.LoadXml("<element attrib1='value'>content</element>");
Assert.Equal(XmlNodeType.Text, xmlDocument.DocumentElement.LastChild.NodeType);
Assert.Equal("content", xmlDocument.DocumentElement.LastChild.Value);
}
[Fact]
public static void NewlyCreatedElement()
{
var xmlDocument = new XmlDocument();
var node = xmlDocument.CreateElement("element");
Assert.Null(node.LastChild);
}
[Fact]
public static void NewlyCreatedAttribute()
{
var xmlDocument = new XmlDocument();
var node = xmlDocument.CreateAttribute("attribute");
Assert.Null(node.LastChild);
}
[Fact]
public static void NewlyCreatedTextNode()
{
var xmlDocument = new XmlDocument();
var node = xmlDocument.CreateTextNode("textnode");
Assert.Null(node.LastChild);
}
[Fact]
public static void NewlyCreatedCDataNode()
{
var xmlDocument = new XmlDocument();
var node = xmlDocument.CreateCDataSection("cdata section");
Assert.Null(node.LastChild);
}
[Fact]
public static void NewlyCreatedProcessingInstruction()
{
var xmlDocument = new XmlDocument();
var node = xmlDocument.CreateProcessingInstruction("PI", "data");
Assert.Null(node.LastChild);
}
[Fact]
public static void NewlyCreatedComment()
{
var xmlDocument = new XmlDocument();
var node = xmlDocument.CreateComment("comment");
Assert.Null(node.LastChild);
}
[Fact]
public static void NewlyCreatedDocumentFragment()
{
var xmlDocument = new XmlDocument();
var node = xmlDocument.CreateDocumentFragment();
Assert.Null(node.LastChild);
}
[Fact]
public static void InsertChildAtLengthMinus1()
{
var xmlDocument = new XmlDocument();
xmlDocument.LoadXml("<root><child1/><child2/><child3/></root>");
var child3 = xmlDocument.DocumentElement.LastChild;
Assert.Equal(3, xmlDocument.DocumentElement.ChildNodes.Count);
Assert.Equal("child3", child3.Name);
var newNode = xmlDocument.CreateElement("elem1");
xmlDocument.DocumentElement.InsertBefore(newNode, child3);
Assert.Equal(4, xmlDocument.DocumentElement.ChildNodes.Count);
Assert.Equal(child3, xmlDocument.DocumentElement.LastChild);
}
[Fact]
public static void InsertChildToElementWithNoNode()
{
var xmlDocument = new XmlDocument();
xmlDocument.LoadXml("<root/>");
Assert.False(xmlDocument.DocumentElement.HasChildNodes);
var newNode = xmlDocument.CreateElement("elem1");
xmlDocument.DocumentElement.AppendChild(newNode);
Assert.Equal(1, xmlDocument.DocumentElement.ChildNodes.Count);
Assert.Equal(newNode, xmlDocument.DocumentElement.LastChild);
}
[Fact]
public static void ReplaceOnlyChildOfNode()
{
var xmlDocument = new XmlDocument();
xmlDocument.LoadXml("<root><child/></root>");
var oldNode = xmlDocument.DocumentElement.LastChild;
var newNode = xmlDocument.CreateElement("elem1");
Assert.Equal(1, xmlDocument.DocumentElement.ChildNodes.Count);
Assert.Equal(oldNode, xmlDocument.DocumentElement.LastChild);
xmlDocument.DocumentElement.ReplaceChild(newNode, oldNode);
Assert.Equal(1, xmlDocument.DocumentElement.ChildNodes.Count);
Assert.Equal(newNode, xmlDocument.DocumentElement.LastChild);
}
[Fact]
public static void ReplaceChild()
{
var xmlDocument = new XmlDocument();
xmlDocument.LoadXml("<root><child1/><child2/><child3/></root>");
var oldNode = xmlDocument.DocumentElement.LastChild;
var newNode = xmlDocument.CreateElement("elem1");
Assert.Equal(3, xmlDocument.DocumentElement.ChildNodes.Count);
Assert.Equal(oldNode, xmlDocument.DocumentElement.LastChild);
xmlDocument.DocumentElement.ReplaceChild(newNode, oldNode);
Assert.Equal(3, xmlDocument.DocumentElement.ChildNodes.Count);
Assert.Equal(newNode, xmlDocument.DocumentElement.LastChild);
}
}
}
| -1 |
dotnet/runtime | 66,250 | use the name of the header when throwing exception for invalid chars | Fixes #65136 | pedrobsaila | 2022-03-05T20:18:11Z | 2022-03-08T16:15:18Z | 8fe2d2b0bbb7bec841c97372528531c071b29c16 | fa5dcda23c681031b5d2bd68482702baef80b836 | use the name of the header when throwing exception for invalid chars. Fixes #65136 | ./src/tests/CoreMangLib/system/runtime/interopservices/safehandle/safehandledangerousaddref.cs | // Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
using System.Security;
using System;
using System.Runtime.InteropServices; // For SafeHandle
[SecurityCritical]
public class MySafeValidHandle : SafeHandle
{
[SecurityCritical]
public MySafeValidHandle()
: base(IntPtr.Zero, true)
{
}
public override bool IsInvalid
{
[SecurityCritical]
get { return false; }
}
[SecurityCritical]
protected override bool ReleaseHandle()
{
return true;
}
}
[SecurityCritical]
public class MySafeInValidHandle : SafeHandle
{
[SecurityCritical]
public MySafeInValidHandle()
: base(IntPtr.Zero, true)
{
}
public override bool IsInvalid
{
[SecurityCritical]
get { return true; }
}
[SecurityCritical]
protected override bool ReleaseHandle()
{
return true;
}
}
/// <summary>
/// DangerousAddRef(System.Boolean@)
/// </summary>
public class SafeHandleDangerousAddRef
{
#region Public Methods
[SecuritySafeCritical]
public bool RunTests()
{
bool retVal = true;
TestLibrary.TestFramework.LogInformation("[Positive]");
retVal = PosTest1() && retVal;
retVal = PosTest2() && retVal;
retVal = PosTest3() && retVal;
retVal = PosTest4() && retVal;
return retVal;
}
#region Positive Test Cases
[SecuritySafeCritical]
public bool PosTest1()
{
bool retVal = true;
TestLibrary.TestFramework.BeginScenario("PosTest1: Call DangerousAddRef on a valid handle");
try
{
SafeHandle handle = new MySafeValidHandle();
bool success = false;
handle.DangerousAddRef(ref success);
if (!success)
{
TestLibrary.TestFramework.LogInformation("success returns false after calling DangerousAddRef");
}
}
catch (Exception e)
{
TestLibrary.TestFramework.LogError("001", "Unexpected exception: " + e);
TestLibrary.TestFramework.LogInformation(e.StackTrace);
retVal = false;
}
return retVal;
}
[SecuritySafeCritical]
public bool PosTest2()
{
bool retVal = true;
TestLibrary.TestFramework.BeginScenario("PosTest2: Call DangerousAddRef on an invalid handle");
try
{
SafeHandle handle = new MySafeInValidHandle();
bool success = false;
handle.DangerousAddRef(ref success);
if (!success)
{
TestLibrary.TestFramework.LogInformation("success returns false after calling DangerousAddRef");
}
}
catch (Exception e)
{
TestLibrary.TestFramework.LogError("002", "Unexpected exception: " + e);
TestLibrary.TestFramework.LogInformation(e.StackTrace);
retVal = false;
}
return retVal;
}
[SecuritySafeCritical]
public bool PosTest3()
{
bool retVal = true;
TestLibrary.TestFramework.BeginScenario("PosTest3: Call DangerousAddRef twice on a valid handle");
try
{
SafeHandle handle = new MySafeValidHandle();
bool success = false;
handle.DangerousAddRef(ref success);
if (!success)
{
TestLibrary.TestFramework.LogInformation("success returns false after calling DangerousAddRef");
}
handle.DangerousAddRef(ref success);
if (!success)
{
TestLibrary.TestFramework.LogInformation("success returns false after calling DangerousAddRef");
}
}
catch (Exception e)
{
TestLibrary.TestFramework.LogError("003", "Unexpected exception: " + e);
TestLibrary.TestFramework.LogInformation(e.StackTrace);
retVal = false;
}
return retVal;
}
[SecuritySafeCritical]
public bool PosTest4()
{
bool retVal = true;
TestLibrary.TestFramework.BeginScenario("PosTest4: Call DangerousAddRef twice on an invalid handle");
try
{
SafeHandle handle = new MySafeInValidHandle();
bool success = false;
handle.DangerousAddRef(ref success);
if (!success)
{
TestLibrary.TestFramework.LogInformation("success returns false after calling DangerousAddRef");
}
handle.DangerousAddRef(ref success);
if (!success)
{
TestLibrary.TestFramework.LogInformation("success returns false after calling DangerousAddRef");
}
}
catch (Exception e)
{
TestLibrary.TestFramework.LogError("004", "Unexpected exception: " + e);
TestLibrary.TestFramework.LogInformation(e.StackTrace);
retVal = false;
}
return retVal;
}
#endregion
#endregion
[SecuritySafeCritical]
public static int Main()
{
SafeHandleDangerousAddRef test = new SafeHandleDangerousAddRef();
TestLibrary.TestFramework.BeginTestCase("SafeHandleDangerousAddRef");
if (test.RunTests())
{
TestLibrary.TestFramework.EndTestCase();
TestLibrary.TestFramework.LogInformation("PASS");
return 100;
}
else
{
TestLibrary.TestFramework.EndTestCase();
TestLibrary.TestFramework.LogInformation("FAIL");
return 0;
}
}
}
| // Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
using System.Security;
using System;
using System.Runtime.InteropServices; // For SafeHandle
[SecurityCritical]
public class MySafeValidHandle : SafeHandle
{
[SecurityCritical]
public MySafeValidHandle()
: base(IntPtr.Zero, true)
{
}
public override bool IsInvalid
{
[SecurityCritical]
get { return false; }
}
[SecurityCritical]
protected override bool ReleaseHandle()
{
return true;
}
}
[SecurityCritical]
public class MySafeInValidHandle : SafeHandle
{
[SecurityCritical]
public MySafeInValidHandle()
: base(IntPtr.Zero, true)
{
}
public override bool IsInvalid
{
[SecurityCritical]
get { return true; }
}
[SecurityCritical]
protected override bool ReleaseHandle()
{
return true;
}
}
/// <summary>
/// DangerousAddRef(System.Boolean@)
/// </summary>
public class SafeHandleDangerousAddRef
{
#region Public Methods
[SecuritySafeCritical]
public bool RunTests()
{
bool retVal = true;
TestLibrary.TestFramework.LogInformation("[Positive]");
retVal = PosTest1() && retVal;
retVal = PosTest2() && retVal;
retVal = PosTest3() && retVal;
retVal = PosTest4() && retVal;
return retVal;
}
#region Positive Test Cases
[SecuritySafeCritical]
public bool PosTest1()
{
bool retVal = true;
TestLibrary.TestFramework.BeginScenario("PosTest1: Call DangerousAddRef on a valid handle");
try
{
SafeHandle handle = new MySafeValidHandle();
bool success = false;
handle.DangerousAddRef(ref success);
if (!success)
{
TestLibrary.TestFramework.LogInformation("success returns false after calling DangerousAddRef");
}
}
catch (Exception e)
{
TestLibrary.TestFramework.LogError("001", "Unexpected exception: " + e);
TestLibrary.TestFramework.LogInformation(e.StackTrace);
retVal = false;
}
return retVal;
}
[SecuritySafeCritical]
public bool PosTest2()
{
bool retVal = true;
TestLibrary.TestFramework.BeginScenario("PosTest2: Call DangerousAddRef on an invalid handle");
try
{
SafeHandle handle = new MySafeInValidHandle();
bool success = false;
handle.DangerousAddRef(ref success);
if (!success)
{
TestLibrary.TestFramework.LogInformation("success returns false after calling DangerousAddRef");
}
}
catch (Exception e)
{
TestLibrary.TestFramework.LogError("002", "Unexpected exception: " + e);
TestLibrary.TestFramework.LogInformation(e.StackTrace);
retVal = false;
}
return retVal;
}
[SecuritySafeCritical]
public bool PosTest3()
{
bool retVal = true;
TestLibrary.TestFramework.BeginScenario("PosTest3: Call DangerousAddRef twice on a valid handle");
try
{
SafeHandle handle = new MySafeValidHandle();
bool success = false;
handle.DangerousAddRef(ref success);
if (!success)
{
TestLibrary.TestFramework.LogInformation("success returns false after calling DangerousAddRef");
}
handle.DangerousAddRef(ref success);
if (!success)
{
TestLibrary.TestFramework.LogInformation("success returns false after calling DangerousAddRef");
}
}
catch (Exception e)
{
TestLibrary.TestFramework.LogError("003", "Unexpected exception: " + e);
TestLibrary.TestFramework.LogInformation(e.StackTrace);
retVal = false;
}
return retVal;
}
[SecuritySafeCritical]
public bool PosTest4()
{
bool retVal = true;
TestLibrary.TestFramework.BeginScenario("PosTest4: Call DangerousAddRef twice on an invalid handle");
try
{
SafeHandle handle = new MySafeInValidHandle();
bool success = false;
handle.DangerousAddRef(ref success);
if (!success)
{
TestLibrary.TestFramework.LogInformation("success returns false after calling DangerousAddRef");
}
handle.DangerousAddRef(ref success);
if (!success)
{
TestLibrary.TestFramework.LogInformation("success returns false after calling DangerousAddRef");
}
}
catch (Exception e)
{
TestLibrary.TestFramework.LogError("004", "Unexpected exception: " + e);
TestLibrary.TestFramework.LogInformation(e.StackTrace);
retVal = false;
}
return retVal;
}
#endregion
#endregion
[SecuritySafeCritical]
public static int Main()
{
SafeHandleDangerousAddRef test = new SafeHandleDangerousAddRef();
TestLibrary.TestFramework.BeginTestCase("SafeHandleDangerousAddRef");
if (test.RunTests())
{
TestLibrary.TestFramework.EndTestCase();
TestLibrary.TestFramework.LogInformation("PASS");
return 100;
}
else
{
TestLibrary.TestFramework.EndTestCase();
TestLibrary.TestFramework.LogInformation("FAIL");
return 0;
}
}
}
| -1 |
dotnet/runtime | 66,250 | use the name of the header when throwing exception for invalid chars | Fixes #65136 | pedrobsaila | 2022-03-05T20:18:11Z | 2022-03-08T16:15:18Z | 8fe2d2b0bbb7bec841c97372528531c071b29c16 | fa5dcda23c681031b5d2bd68482702baef80b836 | use the name of the header when throwing exception for invalid chars. Fixes #65136 | ./src/tests/JIT/HardwareIntrinsics/Arm/Dp/DotProduct.Vector64.Int32.cs | // Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
/******************************************************************************
* This file is auto-generated from a template file by the GenerateTests.csx *
* script in tests\src\JIT\HardwareIntrinsics.Arm\Shared. In order to make *
* changes, please update the corresponding template and run according to the *
* directions listed in the file. *
******************************************************************************/
using System;
using System.Runtime.CompilerServices;
using System.Runtime.InteropServices;
using System.Runtime.Intrinsics;
using System.Runtime.Intrinsics.Arm;
namespace JIT.HardwareIntrinsics.Arm
{
public static partial class Program
{
private static void DotProduct_Vector64_Int32()
{
var test = new SimpleTernaryOpTest__DotProduct_Vector64_Int32();
if (test.IsSupported)
{
// Validates basic functionality works, using Unsafe.Read
test.RunBasicScenario_UnsafeRead();
if (AdvSimd.IsSupported)
{
// Validates basic functionality works, using Load
test.RunBasicScenario_Load();
}
// Validates calling via reflection works, using Unsafe.Read
test.RunReflectionScenario_UnsafeRead();
if (AdvSimd.IsSupported)
{
// Validates calling via reflection works, using Load
test.RunReflectionScenario_Load();
}
// Validates passing a static member works
test.RunClsVarScenario();
if (AdvSimd.IsSupported)
{
// Validates passing a static member works, using pinning and Load
test.RunClsVarScenario_Load();
}
// Validates passing a local works, using Unsafe.Read
test.RunLclVarScenario_UnsafeRead();
if (AdvSimd.IsSupported)
{
// Validates passing a local works, using Load
test.RunLclVarScenario_Load();
}
// Validates passing the field of a local class works
test.RunClassLclFldScenario();
if (AdvSimd.IsSupported)
{
// Validates passing the field of a local class works, using pinning and Load
test.RunClassLclFldScenario_Load();
}
// Validates passing an instance member of a class works
test.RunClassFldScenario();
if (AdvSimd.IsSupported)
{
// Validates passing an instance member of a class works, using pinning and Load
test.RunClassFldScenario_Load();
}
// Validates passing the field of a local struct works
test.RunStructLclFldScenario();
if (AdvSimd.IsSupported)
{
// Validates passing the field of a local struct works, using pinning and Load
test.RunStructLclFldScenario_Load();
}
// Validates passing an instance member of a struct works
test.RunStructFldScenario();
if (AdvSimd.IsSupported)
{
// Validates passing an instance member of a struct works, using pinning and Load
test.RunStructFldScenario_Load();
}
}
else
{
// Validates we throw on unsupported hardware
test.RunUnsupportedScenario();
}
if (!test.Succeeded)
{
throw new Exception("One or more scenarios did not complete as expected.");
}
}
}
public sealed unsafe class SimpleTernaryOpTest__DotProduct_Vector64_Int32
{
private struct DataTable
{
private byte[] inArray1;
private byte[] inArray2;
private byte[] inArray3;
private byte[] outArray;
private GCHandle inHandle1;
private GCHandle inHandle2;
private GCHandle inHandle3;
private GCHandle outHandle;
private ulong alignment;
public DataTable(Int32[] inArray1, SByte[] inArray2, SByte[] inArray3, Int32[] outArray, int alignment)
{
int sizeOfinArray1 = inArray1.Length * Unsafe.SizeOf<Int32>();
int sizeOfinArray2 = inArray2.Length * Unsafe.SizeOf<SByte>();
int sizeOfinArray3 = inArray3.Length * Unsafe.SizeOf<SByte>();
int sizeOfoutArray = outArray.Length * Unsafe.SizeOf<Int32>();
if ((alignment != 16 && alignment != 8) || (alignment * 2) < sizeOfinArray1 || (alignment * 2) < sizeOfinArray2 || (alignment * 2) < sizeOfinArray3 || (alignment * 2) < sizeOfoutArray)
{
throw new ArgumentException("Invalid value of alignment");
}
this.inArray1 = new byte[alignment * 2];
this.inArray2 = new byte[alignment * 2];
this.inArray3 = new byte[alignment * 2];
this.outArray = new byte[alignment * 2];
this.inHandle1 = GCHandle.Alloc(this.inArray1, GCHandleType.Pinned);
this.inHandle2 = GCHandle.Alloc(this.inArray2, GCHandleType.Pinned);
this.inHandle3 = GCHandle.Alloc(this.inArray3, GCHandleType.Pinned);
this.outHandle = GCHandle.Alloc(this.outArray, GCHandleType.Pinned);
this.alignment = (ulong)alignment;
Unsafe.CopyBlockUnaligned(ref Unsafe.AsRef<byte>(inArray1Ptr), ref Unsafe.As<Int32, byte>(ref inArray1[0]), (uint)sizeOfinArray1);
Unsafe.CopyBlockUnaligned(ref Unsafe.AsRef<byte>(inArray2Ptr), ref Unsafe.As<SByte, byte>(ref inArray2[0]), (uint)sizeOfinArray2);
Unsafe.CopyBlockUnaligned(ref Unsafe.AsRef<byte>(inArray3Ptr), ref Unsafe.As<SByte, byte>(ref inArray3[0]), (uint)sizeOfinArray3);
}
public void* inArray1Ptr => Align((byte*)(inHandle1.AddrOfPinnedObject().ToPointer()), alignment);
public void* inArray2Ptr => Align((byte*)(inHandle2.AddrOfPinnedObject().ToPointer()), alignment);
public void* inArray3Ptr => Align((byte*)(inHandle3.AddrOfPinnedObject().ToPointer()), alignment);
public void* outArrayPtr => Align((byte*)(outHandle.AddrOfPinnedObject().ToPointer()), alignment);
public void Dispose()
{
inHandle1.Free();
inHandle2.Free();
inHandle3.Free();
outHandle.Free();
}
private static unsafe void* Align(byte* buffer, ulong expectedAlignment)
{
return (void*)(((ulong)buffer + expectedAlignment - 1) & ~(expectedAlignment - 1));
}
}
private struct TestStruct
{
public Vector64<Int32> _fld1;
public Vector64<SByte> _fld2;
public Vector64<SByte> _fld3;
public static TestStruct Create()
{
var testStruct = new TestStruct();
for (var i = 0; i < Op1ElementCount; i++) { _data1[i] = TestLibrary.Generator.GetInt32(); }
Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector64<Int32>, byte>(ref testStruct._fld1), ref Unsafe.As<Int32, byte>(ref _data1[0]), (uint)Unsafe.SizeOf<Vector64<Int32>>());
for (var i = 0; i < Op2ElementCount; i++) { _data2[i] = TestLibrary.Generator.GetSByte(); }
Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector64<SByte>, byte>(ref testStruct._fld2), ref Unsafe.As<SByte, byte>(ref _data2[0]), (uint)Unsafe.SizeOf<Vector64<SByte>>());
for (var i = 0; i < Op3ElementCount; i++) { _data3[i] = TestLibrary.Generator.GetSByte(); }
Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector64<SByte>, byte>(ref testStruct._fld3), ref Unsafe.As<SByte, byte>(ref _data3[0]), (uint)Unsafe.SizeOf<Vector64<SByte>>());
return testStruct;
}
public void RunStructFldScenario(SimpleTernaryOpTest__DotProduct_Vector64_Int32 testClass)
{
var result = Dp.DotProduct(_fld1, _fld2, _fld3);
Unsafe.Write(testClass._dataTable.outArrayPtr, result);
testClass.ValidateResult(_fld1, _fld2, _fld3, testClass._dataTable.outArrayPtr);
}
public void RunStructFldScenario_Load(SimpleTernaryOpTest__DotProduct_Vector64_Int32 testClass)
{
fixed (Vector64<Int32>* pFld1 = &_fld1)
fixed (Vector64<SByte>* pFld2 = &_fld2)
fixed (Vector64<SByte>* pFld3 = &_fld3)
{
var result = Dp.DotProduct(
AdvSimd.LoadVector64((Int32*)(pFld1)),
AdvSimd.LoadVector64((SByte*)(pFld2)),
AdvSimd.LoadVector64((SByte*)(pFld3))
);
Unsafe.Write(testClass._dataTable.outArrayPtr, result);
testClass.ValidateResult(_fld1, _fld2, _fld3, testClass._dataTable.outArrayPtr);
}
}
}
private static readonly int LargestVectorSize = 8;
private static readonly int Op1ElementCount = Unsafe.SizeOf<Vector64<Int32>>() / sizeof(Int32);
private static readonly int Op2ElementCount = Unsafe.SizeOf<Vector64<SByte>>() / sizeof(SByte);
private static readonly int Op3ElementCount = Unsafe.SizeOf<Vector64<SByte>>() / sizeof(SByte);
private static readonly int RetElementCount = Unsafe.SizeOf<Vector64<Int32>>() / sizeof(Int32);
private static Int32[] _data1 = new Int32[Op1ElementCount];
private static SByte[] _data2 = new SByte[Op2ElementCount];
private static SByte[] _data3 = new SByte[Op3ElementCount];
private static Vector64<Int32> _clsVar1;
private static Vector64<SByte> _clsVar2;
private static Vector64<SByte> _clsVar3;
private Vector64<Int32> _fld1;
private Vector64<SByte> _fld2;
private Vector64<SByte> _fld3;
private DataTable _dataTable;
static SimpleTernaryOpTest__DotProduct_Vector64_Int32()
{
for (var i = 0; i < Op1ElementCount; i++) { _data1[i] = TestLibrary.Generator.GetInt32(); }
Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector64<Int32>, byte>(ref _clsVar1), ref Unsafe.As<Int32, byte>(ref _data1[0]), (uint)Unsafe.SizeOf<Vector64<Int32>>());
for (var i = 0; i < Op2ElementCount; i++) { _data2[i] = TestLibrary.Generator.GetSByte(); }
Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector64<SByte>, byte>(ref _clsVar2), ref Unsafe.As<SByte, byte>(ref _data2[0]), (uint)Unsafe.SizeOf<Vector64<SByte>>());
for (var i = 0; i < Op3ElementCount; i++) { _data3[i] = TestLibrary.Generator.GetSByte(); }
Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector64<SByte>, byte>(ref _clsVar3), ref Unsafe.As<SByte, byte>(ref _data3[0]), (uint)Unsafe.SizeOf<Vector64<SByte>>());
}
public SimpleTernaryOpTest__DotProduct_Vector64_Int32()
{
Succeeded = true;
for (var i = 0; i < Op1ElementCount; i++) { _data1[i] = TestLibrary.Generator.GetInt32(); }
Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector64<Int32>, byte>(ref _fld1), ref Unsafe.As<Int32, byte>(ref _data1[0]), (uint)Unsafe.SizeOf<Vector64<Int32>>());
for (var i = 0; i < Op2ElementCount; i++) { _data2[i] = TestLibrary.Generator.GetSByte(); }
Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector64<SByte>, byte>(ref _fld2), ref Unsafe.As<SByte, byte>(ref _data2[0]), (uint)Unsafe.SizeOf<Vector64<SByte>>());
for (var i = 0; i < Op3ElementCount; i++) { _data3[i] = TestLibrary.Generator.GetSByte(); }
Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector64<SByte>, byte>(ref _fld3), ref Unsafe.As<SByte, byte>(ref _data3[0]), (uint)Unsafe.SizeOf<Vector64<SByte>>());
for (var i = 0; i < Op1ElementCount; i++) { _data1[i] = TestLibrary.Generator.GetInt32(); }
for (var i = 0; i < Op2ElementCount; i++) { _data2[i] = TestLibrary.Generator.GetSByte(); }
for (var i = 0; i < Op3ElementCount; i++) { _data3[i] = TestLibrary.Generator.GetSByte(); }
_dataTable = new DataTable(_data1, _data2, _data3, new Int32[RetElementCount], LargestVectorSize);
}
public bool IsSupported => Dp.IsSupported;
public bool Succeeded { get; set; }
public void RunBasicScenario_UnsafeRead()
{
TestLibrary.TestFramework.BeginScenario(nameof(RunBasicScenario_UnsafeRead));
var result = Dp.DotProduct(
Unsafe.Read<Vector64<Int32>>(_dataTable.inArray1Ptr),
Unsafe.Read<Vector64<SByte>>(_dataTable.inArray2Ptr),
Unsafe.Read<Vector64<SByte>>(_dataTable.inArray3Ptr)
);
Unsafe.Write(_dataTable.outArrayPtr, result);
ValidateResult(_dataTable.inArray1Ptr, _dataTable.inArray2Ptr, _dataTable.inArray3Ptr, _dataTable.outArrayPtr);
}
public void RunBasicScenario_Load()
{
TestLibrary.TestFramework.BeginScenario(nameof(RunBasicScenario_Load));
var result = Dp.DotProduct(
AdvSimd.LoadVector64((Int32*)(_dataTable.inArray1Ptr)),
AdvSimd.LoadVector64((SByte*)(_dataTable.inArray2Ptr)),
AdvSimd.LoadVector64((SByte*)(_dataTable.inArray3Ptr))
);
Unsafe.Write(_dataTable.outArrayPtr, result);
ValidateResult(_dataTable.inArray1Ptr, _dataTable.inArray2Ptr, _dataTable.inArray3Ptr, _dataTable.outArrayPtr);
}
public void RunReflectionScenario_UnsafeRead()
{
TestLibrary.TestFramework.BeginScenario(nameof(RunReflectionScenario_UnsafeRead));
var result = typeof(Dp).GetMethod(nameof(Dp.DotProduct), new Type[] { typeof(Vector64<Int32>), typeof(Vector64<SByte>), typeof(Vector64<SByte>) })
.Invoke(null, new object[] {
Unsafe.Read<Vector64<Int32>>(_dataTable.inArray1Ptr),
Unsafe.Read<Vector64<SByte>>(_dataTable.inArray2Ptr),
Unsafe.Read<Vector64<SByte>>(_dataTable.inArray3Ptr)
});
Unsafe.Write(_dataTable.outArrayPtr, (Vector64<Int32>)(result));
ValidateResult(_dataTable.inArray1Ptr, _dataTable.inArray2Ptr, _dataTable.inArray3Ptr, _dataTable.outArrayPtr);
}
public void RunReflectionScenario_Load()
{
TestLibrary.TestFramework.BeginScenario(nameof(RunReflectionScenario_Load));
var result = typeof(Dp).GetMethod(nameof(Dp.DotProduct), new Type[] { typeof(Vector64<Int32>), typeof(Vector64<SByte>), typeof(Vector64<SByte>) })
.Invoke(null, new object[] {
AdvSimd.LoadVector64((Int32*)(_dataTable.inArray1Ptr)),
AdvSimd.LoadVector64((SByte*)(_dataTable.inArray2Ptr)),
AdvSimd.LoadVector64((SByte*)(_dataTable.inArray3Ptr))
});
Unsafe.Write(_dataTable.outArrayPtr, (Vector64<Int32>)(result));
ValidateResult(_dataTable.inArray1Ptr, _dataTable.inArray2Ptr, _dataTable.inArray3Ptr, _dataTable.outArrayPtr);
}
public void RunClsVarScenario()
{
TestLibrary.TestFramework.BeginScenario(nameof(RunClsVarScenario));
var result = Dp.DotProduct(
_clsVar1,
_clsVar2,
_clsVar3
);
Unsafe.Write(_dataTable.outArrayPtr, result);
ValidateResult(_clsVar1, _clsVar2, _clsVar3, _dataTable.outArrayPtr);
}
public void RunClsVarScenario_Load()
{
TestLibrary.TestFramework.BeginScenario(nameof(RunClsVarScenario_Load));
fixed (Vector64<Int32>* pClsVar1 = &_clsVar1)
fixed (Vector64<SByte>* pClsVar2 = &_clsVar2)
fixed (Vector64<SByte>* pClsVar3 = &_clsVar3)
{
var result = Dp.DotProduct(
AdvSimd.LoadVector64((Int32*)(pClsVar1)),
AdvSimd.LoadVector64((SByte*)(pClsVar2)),
AdvSimd.LoadVector64((SByte*)(pClsVar3))
);
Unsafe.Write(_dataTable.outArrayPtr, result);
ValidateResult(_clsVar1, _clsVar2, _clsVar3, _dataTable.outArrayPtr);
}
}
public void RunLclVarScenario_UnsafeRead()
{
TestLibrary.TestFramework.BeginScenario(nameof(RunLclVarScenario_UnsafeRead));
var op1 = Unsafe.Read<Vector64<Int32>>(_dataTable.inArray1Ptr);
var op2 = Unsafe.Read<Vector64<SByte>>(_dataTable.inArray2Ptr);
var op3 = Unsafe.Read<Vector64<SByte>>(_dataTable.inArray3Ptr);
var result = Dp.DotProduct(op1, op2, op3);
Unsafe.Write(_dataTable.outArrayPtr, result);
ValidateResult(op1, op2, op3, _dataTable.outArrayPtr);
}
public void RunLclVarScenario_Load()
{
TestLibrary.TestFramework.BeginScenario(nameof(RunLclVarScenario_Load));
var op1 = AdvSimd.LoadVector64((Int32*)(_dataTable.inArray1Ptr));
var op2 = AdvSimd.LoadVector64((SByte*)(_dataTable.inArray2Ptr));
var op3 = AdvSimd.LoadVector64((SByte*)(_dataTable.inArray3Ptr));
var result = Dp.DotProduct(op1, op2, op3);
Unsafe.Write(_dataTable.outArrayPtr, result);
ValidateResult(op1, op2, op3, _dataTable.outArrayPtr);
}
public void RunClassLclFldScenario()
{
TestLibrary.TestFramework.BeginScenario(nameof(RunClassLclFldScenario));
var test = new SimpleTernaryOpTest__DotProduct_Vector64_Int32();
var result = Dp.DotProduct(test._fld1, test._fld2, test._fld3);
Unsafe.Write(_dataTable.outArrayPtr, result);
ValidateResult(test._fld1, test._fld2, test._fld3, _dataTable.outArrayPtr);
}
public void RunClassLclFldScenario_Load()
{
TestLibrary.TestFramework.BeginScenario(nameof(RunClassLclFldScenario_Load));
var test = new SimpleTernaryOpTest__DotProduct_Vector64_Int32();
fixed (Vector64<Int32>* pFld1 = &test._fld1)
fixed (Vector64<SByte>* pFld2 = &test._fld2)
fixed (Vector64<SByte>* pFld3 = &test._fld3)
{
var result = Dp.DotProduct(
AdvSimd.LoadVector64((Int32*)(pFld1)),
AdvSimd.LoadVector64((SByte*)(pFld2)),
AdvSimd.LoadVector64((SByte*)(pFld3))
);
Unsafe.Write(_dataTable.outArrayPtr, result);
ValidateResult(test._fld1, test._fld2, test._fld3, _dataTable.outArrayPtr);
}
}
public void RunClassFldScenario()
{
TestLibrary.TestFramework.BeginScenario(nameof(RunClassFldScenario));
var result = Dp.DotProduct(_fld1, _fld2, _fld3);
Unsafe.Write(_dataTable.outArrayPtr, result);
ValidateResult(_fld1, _fld2, _fld3, _dataTable.outArrayPtr);
}
public void RunClassFldScenario_Load()
{
TestLibrary.TestFramework.BeginScenario(nameof(RunClassFldScenario_Load));
fixed (Vector64<Int32>* pFld1 = &_fld1)
fixed (Vector64<SByte>* pFld2 = &_fld2)
fixed (Vector64<SByte>* pFld3 = &_fld3)
{
var result = Dp.DotProduct(
AdvSimd.LoadVector64((Int32*)(pFld1)),
AdvSimd.LoadVector64((SByte*)(pFld2)),
AdvSimd.LoadVector64((SByte*)(pFld3))
);
Unsafe.Write(_dataTable.outArrayPtr, result);
ValidateResult(_fld1, _fld2, _fld3, _dataTable.outArrayPtr);
}
}
public void RunStructLclFldScenario()
{
TestLibrary.TestFramework.BeginScenario(nameof(RunStructLclFldScenario));
var test = TestStruct.Create();
var result = Dp.DotProduct(test._fld1, test._fld2, test._fld3);
Unsafe.Write(_dataTable.outArrayPtr, result);
ValidateResult(test._fld1, test._fld2, test._fld3, _dataTable.outArrayPtr);
}
public void RunStructLclFldScenario_Load()
{
TestLibrary.TestFramework.BeginScenario(nameof(RunStructLclFldScenario_Load));
var test = TestStruct.Create();
var result = Dp.DotProduct(
AdvSimd.LoadVector64((Int32*)(&test._fld1)),
AdvSimd.LoadVector64((SByte*)(&test._fld2)),
AdvSimd.LoadVector64((SByte*)(&test._fld3))
);
Unsafe.Write(_dataTable.outArrayPtr, result);
ValidateResult(test._fld1, test._fld2, test._fld3, _dataTable.outArrayPtr);
}
public void RunStructFldScenario()
{
TestLibrary.TestFramework.BeginScenario(nameof(RunStructFldScenario));
var test = TestStruct.Create();
test.RunStructFldScenario(this);
}
public void RunStructFldScenario_Load()
{
TestLibrary.TestFramework.BeginScenario(nameof(RunStructFldScenario_Load));
var test = TestStruct.Create();
test.RunStructFldScenario_Load(this);
}
public void RunUnsupportedScenario()
{
TestLibrary.TestFramework.BeginScenario(nameof(RunUnsupportedScenario));
bool succeeded = false;
try
{
RunBasicScenario_UnsafeRead();
}
catch (PlatformNotSupportedException)
{
succeeded = true;
}
if (!succeeded)
{
Succeeded = false;
}
}
private void ValidateResult(Vector64<Int32> op1, Vector64<SByte> op2, Vector64<SByte> op3, void* result, [CallerMemberName] string method = "")
{
Int32[] inArray1 = new Int32[Op1ElementCount];
SByte[] inArray2 = new SByte[Op2ElementCount];
SByte[] inArray3 = new SByte[Op3ElementCount];
Int32[] outArray = new Int32[RetElementCount];
Unsafe.WriteUnaligned(ref Unsafe.As<Int32, byte>(ref inArray1[0]), op1);
Unsafe.WriteUnaligned(ref Unsafe.As<SByte, byte>(ref inArray2[0]), op2);
Unsafe.WriteUnaligned(ref Unsafe.As<SByte, byte>(ref inArray3[0]), op3);
Unsafe.CopyBlockUnaligned(ref Unsafe.As<Int32, byte>(ref outArray[0]), ref Unsafe.AsRef<byte>(result), (uint)Unsafe.SizeOf<Vector64<Int32>>());
ValidateResult(inArray1, inArray2, inArray3, outArray, method);
}
private void ValidateResult(void* op1, void* op2, void* op3, void* result, [CallerMemberName] string method = "")
{
Int32[] inArray1 = new Int32[Op1ElementCount];
SByte[] inArray2 = new SByte[Op2ElementCount];
SByte[] inArray3 = new SByte[Op3ElementCount];
Int32[] outArray = new Int32[RetElementCount];
Unsafe.CopyBlockUnaligned(ref Unsafe.As<Int32, byte>(ref inArray1[0]), ref Unsafe.AsRef<byte>(op1), (uint)Unsafe.SizeOf<Vector64<Int32>>());
Unsafe.CopyBlockUnaligned(ref Unsafe.As<SByte, byte>(ref inArray2[0]), ref Unsafe.AsRef<byte>(op2), (uint)Unsafe.SizeOf<Vector64<SByte>>());
Unsafe.CopyBlockUnaligned(ref Unsafe.As<SByte, byte>(ref inArray3[0]), ref Unsafe.AsRef<byte>(op3), (uint)Unsafe.SizeOf<Vector64<SByte>>());
Unsafe.CopyBlockUnaligned(ref Unsafe.As<Int32, byte>(ref outArray[0]), ref Unsafe.AsRef<byte>(result), (uint)Unsafe.SizeOf<Vector64<Int32>>());
ValidateResult(inArray1, inArray2, inArray3, outArray, method);
}
private void ValidateResult(Int32[] firstOp, SByte[] secondOp, SByte[] thirdOp, Int32[] result, [CallerMemberName] string method = "")
{
bool succeeded = true;
for (var i = 0; i < RetElementCount; i++)
{
if (Helpers.DotProduct(firstOp[i], secondOp, 4 * i, thirdOp, 4 * i) != result[i])
{
succeeded = false;
break;
}
}
if (!succeeded)
{
TestLibrary.TestFramework.LogInformation($"{nameof(Dp)}.{nameof(Dp.DotProduct)}<Int32>(Vector64<Int32>, Vector64<SByte>, Vector64<SByte>): {method} failed:");
TestLibrary.TestFramework.LogInformation($" firstOp: ({string.Join(", ", firstOp)})");
TestLibrary.TestFramework.LogInformation($"secondOp: ({string.Join(", ", secondOp)})");
TestLibrary.TestFramework.LogInformation($" thirdOp: ({string.Join(", ", thirdOp)})");
TestLibrary.TestFramework.LogInformation($" result: ({string.Join(", ", result)})");
TestLibrary.TestFramework.LogInformation(string.Empty);
Succeeded = false;
}
}
}
}
| // Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
/******************************************************************************
* This file is auto-generated from a template file by the GenerateTests.csx *
* script in tests\src\JIT\HardwareIntrinsics.Arm\Shared. In order to make *
* changes, please update the corresponding template and run according to the *
* directions listed in the file. *
******************************************************************************/
using System;
using System.Runtime.CompilerServices;
using System.Runtime.InteropServices;
using System.Runtime.Intrinsics;
using System.Runtime.Intrinsics.Arm;
namespace JIT.HardwareIntrinsics.Arm
{
public static partial class Program
{
private static void DotProduct_Vector64_Int32()
{
var test = new SimpleTernaryOpTest__DotProduct_Vector64_Int32();
if (test.IsSupported)
{
// Validates basic functionality works, using Unsafe.Read
test.RunBasicScenario_UnsafeRead();
if (AdvSimd.IsSupported)
{
// Validates basic functionality works, using Load
test.RunBasicScenario_Load();
}
// Validates calling via reflection works, using Unsafe.Read
test.RunReflectionScenario_UnsafeRead();
if (AdvSimd.IsSupported)
{
// Validates calling via reflection works, using Load
test.RunReflectionScenario_Load();
}
// Validates passing a static member works
test.RunClsVarScenario();
if (AdvSimd.IsSupported)
{
// Validates passing a static member works, using pinning and Load
test.RunClsVarScenario_Load();
}
// Validates passing a local works, using Unsafe.Read
test.RunLclVarScenario_UnsafeRead();
if (AdvSimd.IsSupported)
{
// Validates passing a local works, using Load
test.RunLclVarScenario_Load();
}
// Validates passing the field of a local class works
test.RunClassLclFldScenario();
if (AdvSimd.IsSupported)
{
// Validates passing the field of a local class works, using pinning and Load
test.RunClassLclFldScenario_Load();
}
// Validates passing an instance member of a class works
test.RunClassFldScenario();
if (AdvSimd.IsSupported)
{
// Validates passing an instance member of a class works, using pinning and Load
test.RunClassFldScenario_Load();
}
// Validates passing the field of a local struct works
test.RunStructLclFldScenario();
if (AdvSimd.IsSupported)
{
// Validates passing the field of a local struct works, using pinning and Load
test.RunStructLclFldScenario_Load();
}
// Validates passing an instance member of a struct works
test.RunStructFldScenario();
if (AdvSimd.IsSupported)
{
// Validates passing an instance member of a struct works, using pinning and Load
test.RunStructFldScenario_Load();
}
}
else
{
// Validates we throw on unsupported hardware
test.RunUnsupportedScenario();
}
if (!test.Succeeded)
{
throw new Exception("One or more scenarios did not complete as expected.");
}
}
}
public sealed unsafe class SimpleTernaryOpTest__DotProduct_Vector64_Int32
{
private struct DataTable
{
private byte[] inArray1;
private byte[] inArray2;
private byte[] inArray3;
private byte[] outArray;
private GCHandle inHandle1;
private GCHandle inHandle2;
private GCHandle inHandle3;
private GCHandle outHandle;
private ulong alignment;
public DataTable(Int32[] inArray1, SByte[] inArray2, SByte[] inArray3, Int32[] outArray, int alignment)
{
int sizeOfinArray1 = inArray1.Length * Unsafe.SizeOf<Int32>();
int sizeOfinArray2 = inArray2.Length * Unsafe.SizeOf<SByte>();
int sizeOfinArray3 = inArray3.Length * Unsafe.SizeOf<SByte>();
int sizeOfoutArray = outArray.Length * Unsafe.SizeOf<Int32>();
if ((alignment != 16 && alignment != 8) || (alignment * 2) < sizeOfinArray1 || (alignment * 2) < sizeOfinArray2 || (alignment * 2) < sizeOfinArray3 || (alignment * 2) < sizeOfoutArray)
{
throw new ArgumentException("Invalid value of alignment");
}
this.inArray1 = new byte[alignment * 2];
this.inArray2 = new byte[alignment * 2];
this.inArray3 = new byte[alignment * 2];
this.outArray = new byte[alignment * 2];
this.inHandle1 = GCHandle.Alloc(this.inArray1, GCHandleType.Pinned);
this.inHandle2 = GCHandle.Alloc(this.inArray2, GCHandleType.Pinned);
this.inHandle3 = GCHandle.Alloc(this.inArray3, GCHandleType.Pinned);
this.outHandle = GCHandle.Alloc(this.outArray, GCHandleType.Pinned);
this.alignment = (ulong)alignment;
Unsafe.CopyBlockUnaligned(ref Unsafe.AsRef<byte>(inArray1Ptr), ref Unsafe.As<Int32, byte>(ref inArray1[0]), (uint)sizeOfinArray1);
Unsafe.CopyBlockUnaligned(ref Unsafe.AsRef<byte>(inArray2Ptr), ref Unsafe.As<SByte, byte>(ref inArray2[0]), (uint)sizeOfinArray2);
Unsafe.CopyBlockUnaligned(ref Unsafe.AsRef<byte>(inArray3Ptr), ref Unsafe.As<SByte, byte>(ref inArray3[0]), (uint)sizeOfinArray3);
}
public void* inArray1Ptr => Align((byte*)(inHandle1.AddrOfPinnedObject().ToPointer()), alignment);
public void* inArray2Ptr => Align((byte*)(inHandle2.AddrOfPinnedObject().ToPointer()), alignment);
public void* inArray3Ptr => Align((byte*)(inHandle3.AddrOfPinnedObject().ToPointer()), alignment);
public void* outArrayPtr => Align((byte*)(outHandle.AddrOfPinnedObject().ToPointer()), alignment);
public void Dispose()
{
inHandle1.Free();
inHandle2.Free();
inHandle3.Free();
outHandle.Free();
}
private static unsafe void* Align(byte* buffer, ulong expectedAlignment)
{
return (void*)(((ulong)buffer + expectedAlignment - 1) & ~(expectedAlignment - 1));
}
}
private struct TestStruct
{
public Vector64<Int32> _fld1;
public Vector64<SByte> _fld2;
public Vector64<SByte> _fld3;
public static TestStruct Create()
{
var testStruct = new TestStruct();
for (var i = 0; i < Op1ElementCount; i++) { _data1[i] = TestLibrary.Generator.GetInt32(); }
Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector64<Int32>, byte>(ref testStruct._fld1), ref Unsafe.As<Int32, byte>(ref _data1[0]), (uint)Unsafe.SizeOf<Vector64<Int32>>());
for (var i = 0; i < Op2ElementCount; i++) { _data2[i] = TestLibrary.Generator.GetSByte(); }
Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector64<SByte>, byte>(ref testStruct._fld2), ref Unsafe.As<SByte, byte>(ref _data2[0]), (uint)Unsafe.SizeOf<Vector64<SByte>>());
for (var i = 0; i < Op3ElementCount; i++) { _data3[i] = TestLibrary.Generator.GetSByte(); }
Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector64<SByte>, byte>(ref testStruct._fld3), ref Unsafe.As<SByte, byte>(ref _data3[0]), (uint)Unsafe.SizeOf<Vector64<SByte>>());
return testStruct;
}
public void RunStructFldScenario(SimpleTernaryOpTest__DotProduct_Vector64_Int32 testClass)
{
var result = Dp.DotProduct(_fld1, _fld2, _fld3);
Unsafe.Write(testClass._dataTable.outArrayPtr, result);
testClass.ValidateResult(_fld1, _fld2, _fld3, testClass._dataTable.outArrayPtr);
}
public void RunStructFldScenario_Load(SimpleTernaryOpTest__DotProduct_Vector64_Int32 testClass)
{
fixed (Vector64<Int32>* pFld1 = &_fld1)
fixed (Vector64<SByte>* pFld2 = &_fld2)
fixed (Vector64<SByte>* pFld3 = &_fld3)
{
var result = Dp.DotProduct(
AdvSimd.LoadVector64((Int32*)(pFld1)),
AdvSimd.LoadVector64((SByte*)(pFld2)),
AdvSimd.LoadVector64((SByte*)(pFld3))
);
Unsafe.Write(testClass._dataTable.outArrayPtr, result);
testClass.ValidateResult(_fld1, _fld2, _fld3, testClass._dataTable.outArrayPtr);
}
}
}
private static readonly int LargestVectorSize = 8;
private static readonly int Op1ElementCount = Unsafe.SizeOf<Vector64<Int32>>() / sizeof(Int32);
private static readonly int Op2ElementCount = Unsafe.SizeOf<Vector64<SByte>>() / sizeof(SByte);
private static readonly int Op3ElementCount = Unsafe.SizeOf<Vector64<SByte>>() / sizeof(SByte);
private static readonly int RetElementCount = Unsafe.SizeOf<Vector64<Int32>>() / sizeof(Int32);
private static Int32[] _data1 = new Int32[Op1ElementCount];
private static SByte[] _data2 = new SByte[Op2ElementCount];
private static SByte[] _data3 = new SByte[Op3ElementCount];
private static Vector64<Int32> _clsVar1;
private static Vector64<SByte> _clsVar2;
private static Vector64<SByte> _clsVar3;
private Vector64<Int32> _fld1;
private Vector64<SByte> _fld2;
private Vector64<SByte> _fld3;
private DataTable _dataTable;
static SimpleTernaryOpTest__DotProduct_Vector64_Int32()
{
for (var i = 0; i < Op1ElementCount; i++) { _data1[i] = TestLibrary.Generator.GetInt32(); }
Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector64<Int32>, byte>(ref _clsVar1), ref Unsafe.As<Int32, byte>(ref _data1[0]), (uint)Unsafe.SizeOf<Vector64<Int32>>());
for (var i = 0; i < Op2ElementCount; i++) { _data2[i] = TestLibrary.Generator.GetSByte(); }
Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector64<SByte>, byte>(ref _clsVar2), ref Unsafe.As<SByte, byte>(ref _data2[0]), (uint)Unsafe.SizeOf<Vector64<SByte>>());
for (var i = 0; i < Op3ElementCount; i++) { _data3[i] = TestLibrary.Generator.GetSByte(); }
Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector64<SByte>, byte>(ref _clsVar3), ref Unsafe.As<SByte, byte>(ref _data3[0]), (uint)Unsafe.SizeOf<Vector64<SByte>>());
}
public SimpleTernaryOpTest__DotProduct_Vector64_Int32()
{
Succeeded = true;
for (var i = 0; i < Op1ElementCount; i++) { _data1[i] = TestLibrary.Generator.GetInt32(); }
Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector64<Int32>, byte>(ref _fld1), ref Unsafe.As<Int32, byte>(ref _data1[0]), (uint)Unsafe.SizeOf<Vector64<Int32>>());
for (var i = 0; i < Op2ElementCount; i++) { _data2[i] = TestLibrary.Generator.GetSByte(); }
Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector64<SByte>, byte>(ref _fld2), ref Unsafe.As<SByte, byte>(ref _data2[0]), (uint)Unsafe.SizeOf<Vector64<SByte>>());
for (var i = 0; i < Op3ElementCount; i++) { _data3[i] = TestLibrary.Generator.GetSByte(); }
Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector64<SByte>, byte>(ref _fld3), ref Unsafe.As<SByte, byte>(ref _data3[0]), (uint)Unsafe.SizeOf<Vector64<SByte>>());
for (var i = 0; i < Op1ElementCount; i++) { _data1[i] = TestLibrary.Generator.GetInt32(); }
for (var i = 0; i < Op2ElementCount; i++) { _data2[i] = TestLibrary.Generator.GetSByte(); }
for (var i = 0; i < Op3ElementCount; i++) { _data3[i] = TestLibrary.Generator.GetSByte(); }
_dataTable = new DataTable(_data1, _data2, _data3, new Int32[RetElementCount], LargestVectorSize);
}
public bool IsSupported => Dp.IsSupported;
public bool Succeeded { get; set; }
public void RunBasicScenario_UnsafeRead()
{
TestLibrary.TestFramework.BeginScenario(nameof(RunBasicScenario_UnsafeRead));
var result = Dp.DotProduct(
Unsafe.Read<Vector64<Int32>>(_dataTable.inArray1Ptr),
Unsafe.Read<Vector64<SByte>>(_dataTable.inArray2Ptr),
Unsafe.Read<Vector64<SByte>>(_dataTable.inArray3Ptr)
);
Unsafe.Write(_dataTable.outArrayPtr, result);
ValidateResult(_dataTable.inArray1Ptr, _dataTable.inArray2Ptr, _dataTable.inArray3Ptr, _dataTable.outArrayPtr);
}
public void RunBasicScenario_Load()
{
TestLibrary.TestFramework.BeginScenario(nameof(RunBasicScenario_Load));
var result = Dp.DotProduct(
AdvSimd.LoadVector64((Int32*)(_dataTable.inArray1Ptr)),
AdvSimd.LoadVector64((SByte*)(_dataTable.inArray2Ptr)),
AdvSimd.LoadVector64((SByte*)(_dataTable.inArray3Ptr))
);
Unsafe.Write(_dataTable.outArrayPtr, result);
ValidateResult(_dataTable.inArray1Ptr, _dataTable.inArray2Ptr, _dataTable.inArray3Ptr, _dataTable.outArrayPtr);
}
public void RunReflectionScenario_UnsafeRead()
{
TestLibrary.TestFramework.BeginScenario(nameof(RunReflectionScenario_UnsafeRead));
var result = typeof(Dp).GetMethod(nameof(Dp.DotProduct), new Type[] { typeof(Vector64<Int32>), typeof(Vector64<SByte>), typeof(Vector64<SByte>) })
.Invoke(null, new object[] {
Unsafe.Read<Vector64<Int32>>(_dataTable.inArray1Ptr),
Unsafe.Read<Vector64<SByte>>(_dataTable.inArray2Ptr),
Unsafe.Read<Vector64<SByte>>(_dataTable.inArray3Ptr)
});
Unsafe.Write(_dataTable.outArrayPtr, (Vector64<Int32>)(result));
ValidateResult(_dataTable.inArray1Ptr, _dataTable.inArray2Ptr, _dataTable.inArray3Ptr, _dataTable.outArrayPtr);
}
public void RunReflectionScenario_Load()
{
TestLibrary.TestFramework.BeginScenario(nameof(RunReflectionScenario_Load));
var result = typeof(Dp).GetMethod(nameof(Dp.DotProduct), new Type[] { typeof(Vector64<Int32>), typeof(Vector64<SByte>), typeof(Vector64<SByte>) })
.Invoke(null, new object[] {
AdvSimd.LoadVector64((Int32*)(_dataTable.inArray1Ptr)),
AdvSimd.LoadVector64((SByte*)(_dataTable.inArray2Ptr)),
AdvSimd.LoadVector64((SByte*)(_dataTable.inArray3Ptr))
});
Unsafe.Write(_dataTable.outArrayPtr, (Vector64<Int32>)(result));
ValidateResult(_dataTable.inArray1Ptr, _dataTable.inArray2Ptr, _dataTable.inArray3Ptr, _dataTable.outArrayPtr);
}
public void RunClsVarScenario()
{
TestLibrary.TestFramework.BeginScenario(nameof(RunClsVarScenario));
var result = Dp.DotProduct(
_clsVar1,
_clsVar2,
_clsVar3
);
Unsafe.Write(_dataTable.outArrayPtr, result);
ValidateResult(_clsVar1, _clsVar2, _clsVar3, _dataTable.outArrayPtr);
}
public void RunClsVarScenario_Load()
{
TestLibrary.TestFramework.BeginScenario(nameof(RunClsVarScenario_Load));
fixed (Vector64<Int32>* pClsVar1 = &_clsVar1)
fixed (Vector64<SByte>* pClsVar2 = &_clsVar2)
fixed (Vector64<SByte>* pClsVar3 = &_clsVar3)
{
var result = Dp.DotProduct(
AdvSimd.LoadVector64((Int32*)(pClsVar1)),
AdvSimd.LoadVector64((SByte*)(pClsVar2)),
AdvSimd.LoadVector64((SByte*)(pClsVar3))
);
Unsafe.Write(_dataTable.outArrayPtr, result);
ValidateResult(_clsVar1, _clsVar2, _clsVar3, _dataTable.outArrayPtr);
}
}
public void RunLclVarScenario_UnsafeRead()
{
TestLibrary.TestFramework.BeginScenario(nameof(RunLclVarScenario_UnsafeRead));
var op1 = Unsafe.Read<Vector64<Int32>>(_dataTable.inArray1Ptr);
var op2 = Unsafe.Read<Vector64<SByte>>(_dataTable.inArray2Ptr);
var op3 = Unsafe.Read<Vector64<SByte>>(_dataTable.inArray3Ptr);
var result = Dp.DotProduct(op1, op2, op3);
Unsafe.Write(_dataTable.outArrayPtr, result);
ValidateResult(op1, op2, op3, _dataTable.outArrayPtr);
}
public void RunLclVarScenario_Load()
{
TestLibrary.TestFramework.BeginScenario(nameof(RunLclVarScenario_Load));
var op1 = AdvSimd.LoadVector64((Int32*)(_dataTable.inArray1Ptr));
var op2 = AdvSimd.LoadVector64((SByte*)(_dataTable.inArray2Ptr));
var op3 = AdvSimd.LoadVector64((SByte*)(_dataTable.inArray3Ptr));
var result = Dp.DotProduct(op1, op2, op3);
Unsafe.Write(_dataTable.outArrayPtr, result);
ValidateResult(op1, op2, op3, _dataTable.outArrayPtr);
}
public void RunClassLclFldScenario()
{
TestLibrary.TestFramework.BeginScenario(nameof(RunClassLclFldScenario));
var test = new SimpleTernaryOpTest__DotProduct_Vector64_Int32();
var result = Dp.DotProduct(test._fld1, test._fld2, test._fld3);
Unsafe.Write(_dataTable.outArrayPtr, result);
ValidateResult(test._fld1, test._fld2, test._fld3, _dataTable.outArrayPtr);
}
public void RunClassLclFldScenario_Load()
{
TestLibrary.TestFramework.BeginScenario(nameof(RunClassLclFldScenario_Load));
var test = new SimpleTernaryOpTest__DotProduct_Vector64_Int32();
fixed (Vector64<Int32>* pFld1 = &test._fld1)
fixed (Vector64<SByte>* pFld2 = &test._fld2)
fixed (Vector64<SByte>* pFld3 = &test._fld3)
{
var result = Dp.DotProduct(
AdvSimd.LoadVector64((Int32*)(pFld1)),
AdvSimd.LoadVector64((SByte*)(pFld2)),
AdvSimd.LoadVector64((SByte*)(pFld3))
);
Unsafe.Write(_dataTable.outArrayPtr, result);
ValidateResult(test._fld1, test._fld2, test._fld3, _dataTable.outArrayPtr);
}
}
public void RunClassFldScenario()
{
TestLibrary.TestFramework.BeginScenario(nameof(RunClassFldScenario));
var result = Dp.DotProduct(_fld1, _fld2, _fld3);
Unsafe.Write(_dataTable.outArrayPtr, result);
ValidateResult(_fld1, _fld2, _fld3, _dataTable.outArrayPtr);
}
public void RunClassFldScenario_Load()
{
TestLibrary.TestFramework.BeginScenario(nameof(RunClassFldScenario_Load));
fixed (Vector64<Int32>* pFld1 = &_fld1)
fixed (Vector64<SByte>* pFld2 = &_fld2)
fixed (Vector64<SByte>* pFld3 = &_fld3)
{
var result = Dp.DotProduct(
AdvSimd.LoadVector64((Int32*)(pFld1)),
AdvSimd.LoadVector64((SByte*)(pFld2)),
AdvSimd.LoadVector64((SByte*)(pFld3))
);
Unsafe.Write(_dataTable.outArrayPtr, result);
ValidateResult(_fld1, _fld2, _fld3, _dataTable.outArrayPtr);
}
}
public void RunStructLclFldScenario()
{
TestLibrary.TestFramework.BeginScenario(nameof(RunStructLclFldScenario));
var test = TestStruct.Create();
var result = Dp.DotProduct(test._fld1, test._fld2, test._fld3);
Unsafe.Write(_dataTable.outArrayPtr, result);
ValidateResult(test._fld1, test._fld2, test._fld3, _dataTable.outArrayPtr);
}
public void RunStructLclFldScenario_Load()
{
TestLibrary.TestFramework.BeginScenario(nameof(RunStructLclFldScenario_Load));
var test = TestStruct.Create();
var result = Dp.DotProduct(
AdvSimd.LoadVector64((Int32*)(&test._fld1)),
AdvSimd.LoadVector64((SByte*)(&test._fld2)),
AdvSimd.LoadVector64((SByte*)(&test._fld3))
);
Unsafe.Write(_dataTable.outArrayPtr, result);
ValidateResult(test._fld1, test._fld2, test._fld3, _dataTable.outArrayPtr);
}
public void RunStructFldScenario()
{
TestLibrary.TestFramework.BeginScenario(nameof(RunStructFldScenario));
var test = TestStruct.Create();
test.RunStructFldScenario(this);
}
public void RunStructFldScenario_Load()
{
TestLibrary.TestFramework.BeginScenario(nameof(RunStructFldScenario_Load));
var test = TestStruct.Create();
test.RunStructFldScenario_Load(this);
}
public void RunUnsupportedScenario()
{
TestLibrary.TestFramework.BeginScenario(nameof(RunUnsupportedScenario));
bool succeeded = false;
try
{
RunBasicScenario_UnsafeRead();
}
catch (PlatformNotSupportedException)
{
succeeded = true;
}
if (!succeeded)
{
Succeeded = false;
}
}
private void ValidateResult(Vector64<Int32> op1, Vector64<SByte> op2, Vector64<SByte> op3, void* result, [CallerMemberName] string method = "")
{
Int32[] inArray1 = new Int32[Op1ElementCount];
SByte[] inArray2 = new SByte[Op2ElementCount];
SByte[] inArray3 = new SByte[Op3ElementCount];
Int32[] outArray = new Int32[RetElementCount];
Unsafe.WriteUnaligned(ref Unsafe.As<Int32, byte>(ref inArray1[0]), op1);
Unsafe.WriteUnaligned(ref Unsafe.As<SByte, byte>(ref inArray2[0]), op2);
Unsafe.WriteUnaligned(ref Unsafe.As<SByte, byte>(ref inArray3[0]), op3);
Unsafe.CopyBlockUnaligned(ref Unsafe.As<Int32, byte>(ref outArray[0]), ref Unsafe.AsRef<byte>(result), (uint)Unsafe.SizeOf<Vector64<Int32>>());
ValidateResult(inArray1, inArray2, inArray3, outArray, method);
}
private void ValidateResult(void* op1, void* op2, void* op3, void* result, [CallerMemberName] string method = "")
{
Int32[] inArray1 = new Int32[Op1ElementCount];
SByte[] inArray2 = new SByte[Op2ElementCount];
SByte[] inArray3 = new SByte[Op3ElementCount];
Int32[] outArray = new Int32[RetElementCount];
Unsafe.CopyBlockUnaligned(ref Unsafe.As<Int32, byte>(ref inArray1[0]), ref Unsafe.AsRef<byte>(op1), (uint)Unsafe.SizeOf<Vector64<Int32>>());
Unsafe.CopyBlockUnaligned(ref Unsafe.As<SByte, byte>(ref inArray2[0]), ref Unsafe.AsRef<byte>(op2), (uint)Unsafe.SizeOf<Vector64<SByte>>());
Unsafe.CopyBlockUnaligned(ref Unsafe.As<SByte, byte>(ref inArray3[0]), ref Unsafe.AsRef<byte>(op3), (uint)Unsafe.SizeOf<Vector64<SByte>>());
Unsafe.CopyBlockUnaligned(ref Unsafe.As<Int32, byte>(ref outArray[0]), ref Unsafe.AsRef<byte>(result), (uint)Unsafe.SizeOf<Vector64<Int32>>());
ValidateResult(inArray1, inArray2, inArray3, outArray, method);
}
private void ValidateResult(Int32[] firstOp, SByte[] secondOp, SByte[] thirdOp, Int32[] result, [CallerMemberName] string method = "")
{
bool succeeded = true;
for (var i = 0; i < RetElementCount; i++)
{
if (Helpers.DotProduct(firstOp[i], secondOp, 4 * i, thirdOp, 4 * i) != result[i])
{
succeeded = false;
break;
}
}
if (!succeeded)
{
TestLibrary.TestFramework.LogInformation($"{nameof(Dp)}.{nameof(Dp.DotProduct)}<Int32>(Vector64<Int32>, Vector64<SByte>, Vector64<SByte>): {method} failed:");
TestLibrary.TestFramework.LogInformation($" firstOp: ({string.Join(", ", firstOp)})");
TestLibrary.TestFramework.LogInformation($"secondOp: ({string.Join(", ", secondOp)})");
TestLibrary.TestFramework.LogInformation($" thirdOp: ({string.Join(", ", thirdOp)})");
TestLibrary.TestFramework.LogInformation($" result: ({string.Join(", ", result)})");
TestLibrary.TestFramework.LogInformation(string.Empty);
Succeeded = false;
}
}
}
}
| -1 |
dotnet/runtime | 66,250 | use the name of the header when throwing exception for invalid chars | Fixes #65136 | pedrobsaila | 2022-03-05T20:18:11Z | 2022-03-08T16:15:18Z | 8fe2d2b0bbb7bec841c97372528531c071b29c16 | fa5dcda23c681031b5d2bd68482702baef80b836 | use the name of the header when throwing exception for invalid chars. Fixes #65136 | ./src/tests/JIT/Directed/CheckedCtor/Generic_Test_CSharp_Base_2.cs | // Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
//
//
// This test represents a case where csc.exe puts a base/peer ctor callsite outside of the
// first block of the derived ctor.
//
// Specifically covers: "Use of ?? in a generic base ctor argument expression"
//
using System;
using System.Runtime.CompilerServices;
namespace Test
{
static class App
{
static int Main()
{
new DerivedClass<int>("NotNull");
new DerivedClass<int>(null);
return 100;
}
}
public class BaseClass
{
[MethodImpl(MethodImplOptions.NoInlining)]
public BaseClass(string arg) { Console.Write("BaseClass::.ctor -- `{0}'\r\n", arg); return; }
}
public class DerivedClass<T> : BaseClass
{
[MethodImpl(MethodImplOptions.NoInlining)]
public DerivedClass(string selector) : base(selector ?? "NeededToFallBack") { }
}
}
| // Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
//
//
// This test represents a case where csc.exe puts a base/peer ctor callsite outside of the
// first block of the derived ctor.
//
// Specifically covers: "Use of ?? in a generic base ctor argument expression"
//
using System;
using System.Runtime.CompilerServices;
namespace Test
{
static class App
{
static int Main()
{
new DerivedClass<int>("NotNull");
new DerivedClass<int>(null);
return 100;
}
}
public class BaseClass
{
[MethodImpl(MethodImplOptions.NoInlining)]
public BaseClass(string arg) { Console.Write("BaseClass::.ctor -- `{0}'\r\n", arg); return; }
}
public class DerivedClass<T> : BaseClass
{
[MethodImpl(MethodImplOptions.NoInlining)]
public DerivedClass(string selector) : base(selector ?? "NeededToFallBack") { }
}
}
| -1 |
dotnet/runtime | 66,250 | use the name of the header when throwing exception for invalid chars | Fixes #65136 | pedrobsaila | 2022-03-05T20:18:11Z | 2022-03-08T16:15:18Z | 8fe2d2b0bbb7bec841c97372528531c071b29c16 | fa5dcda23c681031b5d2bd68482702baef80b836 | use the name of the header when throwing exception for invalid chars. Fixes #65136 | ./src/libraries/System.Private.CoreLib/src/System/Globalization/CalendarData.Icu.cs | // Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
using System.Collections.Generic;
using System.Diagnostics;
using System.Runtime.CompilerServices;
using System.Runtime.InteropServices;
using System.Text;
namespace System.Globalization
{
// needs to be kept in sync with CalendarDataType in System.Globalization.Native
internal enum CalendarDataType
{
Uninitialized = 0,
NativeName = 1,
MonthDay = 2,
ShortDates = 3,
LongDates = 4,
YearMonths = 5,
DayNames = 6,
AbbrevDayNames = 7,
MonthNames = 8,
AbbrevMonthNames = 9,
SuperShortDayNames = 10,
MonthGenitiveNames = 11,
AbbrevMonthGenitiveNames = 12,
EraNames = 13,
AbbrevEraNames = 14,
}
internal sealed partial class CalendarData
{
private bool IcuLoadCalendarDataFromSystem(string localeName, CalendarId calendarId)
{
Debug.Assert(!GlobalizationMode.UseNls);
bool result = true;
// these can return null but are later replaced with String.Empty or other non-nullable value
result &= GetCalendarInfo(localeName, calendarId, CalendarDataType.NativeName, out this.sNativeName!);
result &= GetCalendarInfo(localeName, calendarId, CalendarDataType.MonthDay, out this.sMonthDay!);
if (this.sMonthDay != null)
{
this.sMonthDay = NormalizeDatePattern(this.sMonthDay);
}
result &= EnumDatePatterns(localeName, calendarId, CalendarDataType.ShortDates, out this.saShortDates!);
result &= EnumDatePatterns(localeName, calendarId, CalendarDataType.LongDates, out this.saLongDates!);
result &= EnumDatePatterns(localeName, calendarId, CalendarDataType.YearMonths, out this.saYearMonths!);
result &= EnumCalendarInfo(localeName, calendarId, CalendarDataType.DayNames, out this.saDayNames!);
result &= EnumCalendarInfo(localeName, calendarId, CalendarDataType.AbbrevDayNames, out this.saAbbrevDayNames!);
result &= EnumCalendarInfo(localeName, calendarId, CalendarDataType.SuperShortDayNames, out this.saSuperShortDayNames!);
string? leapHebrewMonthName = null;
result &= EnumMonthNames(localeName, calendarId, CalendarDataType.MonthNames, out this.saMonthNames!, ref leapHebrewMonthName);
if (leapHebrewMonthName != null)
{
Debug.Assert(this.saMonthNames != null);
// In Hebrew calendar, get the leap month name Adar II and override the non-leap month 7
Debug.Assert(calendarId == CalendarId.HEBREW && saMonthNames.Length == 13);
saLeapYearMonthNames = (string[]) saMonthNames.Clone();
saLeapYearMonthNames[6] = leapHebrewMonthName;
// The returned data from ICU has 6th month name as 'Adar I' and 7th month name as 'Adar'
// We need to adjust that in the list used with non-leap year to have 6th month as 'Adar' and 7th month as 'Adar II'
// note that when formatting non-leap year dates, 7th month shouldn't get used at all.
saMonthNames[5] = saMonthNames[6];
saMonthNames[6] = leapHebrewMonthName;
}
result &= EnumMonthNames(localeName, calendarId, CalendarDataType.AbbrevMonthNames, out this.saAbbrevMonthNames!, ref leapHebrewMonthName);
result &= EnumMonthNames(localeName, calendarId, CalendarDataType.MonthGenitiveNames, out this.saMonthGenitiveNames!, ref leapHebrewMonthName);
result &= EnumMonthNames(localeName, calendarId, CalendarDataType.AbbrevMonthGenitiveNames, out this.saAbbrevMonthGenitiveNames!, ref leapHebrewMonthName);
result &= EnumEraNames(localeName, calendarId, CalendarDataType.EraNames, out this.saEraNames!);
result &= EnumEraNames(localeName, calendarId, CalendarDataType.AbbrevEraNames, out this.saAbbrevEraNames!);
return result;
}
internal static int IcuGetTwoDigitYearMax()
{
Debug.Assert(!GlobalizationMode.UseNls);
// There is no user override for this value on Linux or in ICU.
// So just return -1 to use the hard-coded defaults.
return -1;
}
// Call native side to figure out which calendars are allowed
internal static int IcuGetCalendars(string localeName, CalendarId[] calendars)
{
Debug.Assert(!GlobalizationMode.Invariant);
Debug.Assert(!GlobalizationMode.UseNls);
// NOTE: there are no 'user overrides' on Linux
int count = Interop.Globalization.GetCalendars(localeName, calendars, calendars.Length);
// ensure there is at least 1 calendar returned
if (count == 0 && calendars.Length > 0)
{
calendars[0] = CalendarId.GREGORIAN;
count = 1;
}
return count;
}
private static bool IcuSystemSupportsTaiwaneseCalendar()
{
Debug.Assert(!GlobalizationMode.UseNls);
return true;
}
// PAL Layer ends here
private static unsafe bool GetCalendarInfo(string localeName, CalendarId calendarId, CalendarDataType dataType, out string? calendarString)
{
Debug.Assert(!GlobalizationMode.Invariant);
return Interop.CallStringMethod(
static (buffer, locale, id, type) =>
{
fixed (char* bufferPtr = buffer)
{
return Interop.Globalization.GetCalendarInfo(locale, id, type, bufferPtr, buffer.Length);
}
},
localeName,
calendarId,
dataType,
out calendarString);
}
private static bool EnumDatePatterns(string localeName, CalendarId calendarId, CalendarDataType dataType, out string[]? datePatterns)
{
datePatterns = null;
IcuEnumCalendarsData callbackContext = default;
callbackContext.Results = new List<string>();
callbackContext.DisallowDuplicates = true;
bool result = EnumCalendarInfo(localeName, calendarId, dataType, ref callbackContext);
if (result)
{
List<string> datePatternsList = callbackContext.Results;
for (int i = 0; i < datePatternsList.Count; i++)
{
datePatternsList[i] = NormalizeDatePattern(datePatternsList[i]);
}
if (dataType == CalendarDataType.ShortDates)
FixDefaultShortDatePattern(datePatternsList);
datePatterns = datePatternsList.ToArray();
}
return result;
}
// FixDefaultShortDatePattern will convert the default short date pattern from using 'yy' to using 'yyyy'
// And will ensure the original pattern still exist in the list.
// doing that will have the short date pattern format the year as 4-digit number and not just 2-digit number.
// Example: June 5, 2018 will be formatted to something like 6/5/2018 instead of 6/5/18 fro en-US culture.
private static void FixDefaultShortDatePattern(List<string> shortDatePatterns)
{
if (shortDatePatterns.Count == 0)
return;
string s = shortDatePatterns[0];
// We are not expecting any pattern have length more than 100.
// We have to do this check to prevent stack overflow as we allocate the buffer on the stack.
if (s.Length > 100)
return;
Span<char> modifiedPattern = stackalloc char[s.Length + 2];
int index = 0;
while (index < s.Length)
{
if (s[index] == '\'')
{
do
{
modifiedPattern[index] = s[index];
index++;
} while (index < s.Length && s[index] != '\'');
if (index >= s.Length)
return;
}
else if (s[index] == 'y')
{
modifiedPattern[index] = 'y';
break;
}
modifiedPattern[index] = s[index];
index++;
}
if (index >= s.Length - 1 || s[index + 1] != 'y')
{
// not a 'yy' pattern
return;
}
if (index + 2 < s.Length && s[index + 2] == 'y')
{
// we have 'yyy' then nothing to do
return;
}
// we are sure now we have 'yy' pattern
Debug.Assert(index + 3 < modifiedPattern.Length);
modifiedPattern[index + 1] = 'y'; // second y
modifiedPattern[index + 2] = 'y'; // third y
modifiedPattern[index + 3] = 'y'; // fourth y
index += 2;
// Now, copy the rest of the pattern to the destination buffer
while (index < s.Length)
{
modifiedPattern[index + 2] = s[index];
index++;
}
shortDatePatterns[0] = modifiedPattern.ToString();
for (int i = 1; i < shortDatePatterns.Count; i++)
{
if (shortDatePatterns[i] == shortDatePatterns[0])
{
// Found match in the list to the new constructed pattern, then replace it with the original modified pattern
shortDatePatterns[i] = s;
return;
}
}
// if we come here means the newly constructed pattern not found on the list, then add the original pattern
shortDatePatterns.Add(s);
}
/// <summary>
/// The ICU date format characters are not exactly the same as the .NET date format characters.
/// NormalizeDatePattern will take in an ICU date pattern and return the equivalent .NET date pattern.
/// </summary>
/// <remarks>
/// see Date Field Symbol Table in http://userguide.icu-project.org/formatparse/datetime
/// and https://msdn.microsoft.com/en-us/library/8kb3ddd4(v=vs.110).aspx
/// </remarks>
private static string NormalizeDatePattern(string input)
{
var destination = input.Length < 128 ?
new ValueStringBuilder(stackalloc char[128]) :
new ValueStringBuilder(input.Length);
int index = 0;
while (index < input.Length)
{
switch (input[index])
{
case '\'':
// single quotes escape characters, like 'de' in es-SP
// so read verbatim until the next single quote
destination.Append(input[index++]);
while (index < input.Length)
{
char current = input[index++];
destination.Append(current);
if (current == '\'')
{
break;
}
}
break;
case 'E':
case 'e':
case 'c':
// 'E' in ICU is the day of the week, which maps to 3 or 4 'd's in .NET
// 'e' in ICU is the local day of the week, which has no representation in .NET, but
// maps closest to 3 or 4 'd's in .NET
// 'c' in ICU is the stand-alone day of the week, which has no representation in .NET, but
// maps closest to 3 or 4 'd's in .NET
NormalizeDayOfWeek(input, ref destination, ref index);
break;
case 'L':
case 'M':
// 'L' in ICU is the stand-alone name of the month,
// which maps closest to 'M' in .NET since it doesn't support stand-alone month names in patterns
// 'M' in both ICU and .NET is the month,
// but ICU supports 5 'M's, which is the super short month name
int occurrences = CountOccurrences(input, input[index], ref index);
if (occurrences > 4)
{
// 5 'L's or 'M's in ICU is the super short name, which maps closest to MMM in .NET
occurrences = 3;
}
destination.Append('M', occurrences);
break;
case 'G':
// 'G' in ICU is the era, which maps to 'g' in .NET
CountOccurrences(input, 'G', ref index);
// it doesn't matter how many 'G's, since .NET only supports 'g' or 'gg', and they
// have the same meaning
destination.Append('g');
break;
case 'y':
// a single 'y' in ICU is the year with no padding or trimming.
// a single 'y' in .NET is the year with 1 or 2 digits
// so convert any single 'y' to 'yyyy'
occurrences = CountOccurrences(input, 'y', ref index);
if (occurrences == 1)
{
occurrences = 4;
}
destination.Append('y', occurrences);
break;
default:
const string unsupportedDateFieldSymbols = "YuUrQqwWDFg";
Debug.Assert(!unsupportedDateFieldSymbols.Contains(input[index]),
$"Encountered an unexpected date field symbol '{input[index]}' from ICU which has no known corresponding .NET equivalent.");
destination.Append(input[index++]);
break;
}
}
return destination.ToString();
}
private static void NormalizeDayOfWeek(string input, ref ValueStringBuilder destination, ref int index)
{
char dayChar = input[index];
int occurrences = CountOccurrences(input, dayChar, ref index);
occurrences = Math.Max(occurrences, 3);
if (occurrences > 4)
{
// 5 and 6 E/e/c characters in ICU is the super short names, which maps closest to ddd in .NET
occurrences = 3;
}
destination.Append('d', occurrences);
}
private static int CountOccurrences(string input, char value, ref int index)
{
int startIndex = index;
while (index < input.Length && input[index] == value)
{
index++;
}
return index - startIndex;
}
private static bool EnumMonthNames(string localeName, CalendarId calendarId, CalendarDataType dataType, out string[]? monthNames, ref string? leapHebrewMonthName)
{
monthNames = null;
IcuEnumCalendarsData callbackContext = default;
callbackContext.Results = new List<string>();
bool result = EnumCalendarInfo(localeName, calendarId, dataType, ref callbackContext);
if (result)
{
// the month-name arrays are expected to have 13 elements. If ICU only returns 12, add an
// extra empty string to fill the array.
if (callbackContext.Results.Count == 12)
{
callbackContext.Results.Add(string.Empty);
}
if (callbackContext.Results.Count > 13)
{
Debug.Assert(calendarId == CalendarId.HEBREW && callbackContext.Results.Count == 14);
if (calendarId == CalendarId.HEBREW)
{
leapHebrewMonthName = callbackContext.Results[13];
}
callbackContext.Results.RemoveRange(13, callbackContext.Results.Count - 13);
}
monthNames = callbackContext.Results.ToArray();
}
return result;
}
private static bool EnumEraNames(string localeName, CalendarId calendarId, CalendarDataType dataType, out string[]? eraNames)
{
bool result = EnumCalendarInfo(localeName, calendarId, dataType, out eraNames);
// .NET expects that only the Japanese calendars have more than 1 era.
// So for other calendars, only return the latest era.
if (calendarId != CalendarId.JAPAN && calendarId != CalendarId.JAPANESELUNISOLAR && eraNames?.Length > 0)
{
string[] latestEraName = new string[] { eraNames![eraNames.Length - 1] };
eraNames = latestEraName;
}
return result;
}
internal static bool EnumCalendarInfo(string localeName, CalendarId calendarId, CalendarDataType dataType, out string[]? calendarData)
{
calendarData = null;
IcuEnumCalendarsData callbackContext = default;
callbackContext.Results = new List<string>();
bool result = EnumCalendarInfo(localeName, calendarId, dataType, ref callbackContext);
if (result)
{
calendarData = callbackContext.Results.ToArray();
}
return result;
}
private static unsafe bool EnumCalendarInfo(string localeName, CalendarId calendarId, CalendarDataType dataType, ref IcuEnumCalendarsData callbackContext)
{
return Interop.Globalization.EnumCalendarInfo(&EnumCalendarInfoCallback, localeName, calendarId, dataType, (IntPtr)Unsafe.AsPointer(ref callbackContext));
}
[UnmanagedCallersOnly]
private static unsafe void EnumCalendarInfoCallback(char* calendarStringPtr, IntPtr context)
{
try
{
ReadOnlySpan<char> calendarStringSpan = MemoryMarshal.CreateReadOnlySpanFromNullTerminated(calendarStringPtr);
ref IcuEnumCalendarsData callbackContext = ref Unsafe.As<byte, IcuEnumCalendarsData>(ref *(byte*)context);
if (callbackContext.DisallowDuplicates)
{
foreach (string existingResult in callbackContext.Results)
{
if (string.CompareOrdinal(calendarStringSpan, existingResult) == 0)
{
// the value is already in the results, so don't add it again
return;
}
}
}
callbackContext.Results.Add(calendarStringSpan.ToString());
}
catch (Exception e)
{
Debug.Fail(e.ToString());
// we ignore the managed exceptions here because EnumCalendarInfoCallback will get called from the native code.
// If we don't ignore the exception here that can cause the runtime to fail fast.
}
}
private struct IcuEnumCalendarsData
{
public List<string> Results;
public bool DisallowDuplicates;
}
}
}
| // Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
using System.Collections.Generic;
using System.Diagnostics;
using System.Runtime.CompilerServices;
using System.Runtime.InteropServices;
using System.Text;
namespace System.Globalization
{
// needs to be kept in sync with CalendarDataType in System.Globalization.Native
internal enum CalendarDataType
{
Uninitialized = 0,
NativeName = 1,
MonthDay = 2,
ShortDates = 3,
LongDates = 4,
YearMonths = 5,
DayNames = 6,
AbbrevDayNames = 7,
MonthNames = 8,
AbbrevMonthNames = 9,
SuperShortDayNames = 10,
MonthGenitiveNames = 11,
AbbrevMonthGenitiveNames = 12,
EraNames = 13,
AbbrevEraNames = 14,
}
internal sealed partial class CalendarData
{
private bool IcuLoadCalendarDataFromSystem(string localeName, CalendarId calendarId)
{
Debug.Assert(!GlobalizationMode.UseNls);
bool result = true;
// these can return null but are later replaced with String.Empty or other non-nullable value
result &= GetCalendarInfo(localeName, calendarId, CalendarDataType.NativeName, out this.sNativeName!);
result &= GetCalendarInfo(localeName, calendarId, CalendarDataType.MonthDay, out this.sMonthDay!);
if (this.sMonthDay != null)
{
this.sMonthDay = NormalizeDatePattern(this.sMonthDay);
}
result &= EnumDatePatterns(localeName, calendarId, CalendarDataType.ShortDates, out this.saShortDates!);
result &= EnumDatePatterns(localeName, calendarId, CalendarDataType.LongDates, out this.saLongDates!);
result &= EnumDatePatterns(localeName, calendarId, CalendarDataType.YearMonths, out this.saYearMonths!);
result &= EnumCalendarInfo(localeName, calendarId, CalendarDataType.DayNames, out this.saDayNames!);
result &= EnumCalendarInfo(localeName, calendarId, CalendarDataType.AbbrevDayNames, out this.saAbbrevDayNames!);
result &= EnumCalendarInfo(localeName, calendarId, CalendarDataType.SuperShortDayNames, out this.saSuperShortDayNames!);
string? leapHebrewMonthName = null;
result &= EnumMonthNames(localeName, calendarId, CalendarDataType.MonthNames, out this.saMonthNames!, ref leapHebrewMonthName);
if (leapHebrewMonthName != null)
{
Debug.Assert(this.saMonthNames != null);
// In Hebrew calendar, get the leap month name Adar II and override the non-leap month 7
Debug.Assert(calendarId == CalendarId.HEBREW && saMonthNames.Length == 13);
saLeapYearMonthNames = (string[]) saMonthNames.Clone();
saLeapYearMonthNames[6] = leapHebrewMonthName;
// The returned data from ICU has 6th month name as 'Adar I' and 7th month name as 'Adar'
// We need to adjust that in the list used with non-leap year to have 6th month as 'Adar' and 7th month as 'Adar II'
// note that when formatting non-leap year dates, 7th month shouldn't get used at all.
saMonthNames[5] = saMonthNames[6];
saMonthNames[6] = leapHebrewMonthName;
}
result &= EnumMonthNames(localeName, calendarId, CalendarDataType.AbbrevMonthNames, out this.saAbbrevMonthNames!, ref leapHebrewMonthName);
result &= EnumMonthNames(localeName, calendarId, CalendarDataType.MonthGenitiveNames, out this.saMonthGenitiveNames!, ref leapHebrewMonthName);
result &= EnumMonthNames(localeName, calendarId, CalendarDataType.AbbrevMonthGenitiveNames, out this.saAbbrevMonthGenitiveNames!, ref leapHebrewMonthName);
result &= EnumEraNames(localeName, calendarId, CalendarDataType.EraNames, out this.saEraNames!);
result &= EnumEraNames(localeName, calendarId, CalendarDataType.AbbrevEraNames, out this.saAbbrevEraNames!);
return result;
}
internal static int IcuGetTwoDigitYearMax()
{
Debug.Assert(!GlobalizationMode.UseNls);
// There is no user override for this value on Linux or in ICU.
// So just return -1 to use the hard-coded defaults.
return -1;
}
// Call native side to figure out which calendars are allowed
internal static int IcuGetCalendars(string localeName, CalendarId[] calendars)
{
Debug.Assert(!GlobalizationMode.Invariant);
Debug.Assert(!GlobalizationMode.UseNls);
// NOTE: there are no 'user overrides' on Linux
int count = Interop.Globalization.GetCalendars(localeName, calendars, calendars.Length);
// ensure there is at least 1 calendar returned
if (count == 0 && calendars.Length > 0)
{
calendars[0] = CalendarId.GREGORIAN;
count = 1;
}
return count;
}
private static bool IcuSystemSupportsTaiwaneseCalendar()
{
Debug.Assert(!GlobalizationMode.UseNls);
return true;
}
// PAL Layer ends here
private static unsafe bool GetCalendarInfo(string localeName, CalendarId calendarId, CalendarDataType dataType, out string? calendarString)
{
Debug.Assert(!GlobalizationMode.Invariant);
return Interop.CallStringMethod(
static (buffer, locale, id, type) =>
{
fixed (char* bufferPtr = buffer)
{
return Interop.Globalization.GetCalendarInfo(locale, id, type, bufferPtr, buffer.Length);
}
},
localeName,
calendarId,
dataType,
out calendarString);
}
private static bool EnumDatePatterns(string localeName, CalendarId calendarId, CalendarDataType dataType, out string[]? datePatterns)
{
datePatterns = null;
IcuEnumCalendarsData callbackContext = default;
callbackContext.Results = new List<string>();
callbackContext.DisallowDuplicates = true;
bool result = EnumCalendarInfo(localeName, calendarId, dataType, ref callbackContext);
if (result)
{
List<string> datePatternsList = callbackContext.Results;
for (int i = 0; i < datePatternsList.Count; i++)
{
datePatternsList[i] = NormalizeDatePattern(datePatternsList[i]);
}
if (dataType == CalendarDataType.ShortDates)
FixDefaultShortDatePattern(datePatternsList);
datePatterns = datePatternsList.ToArray();
}
return result;
}
// FixDefaultShortDatePattern will convert the default short date pattern from using 'yy' to using 'yyyy'
// And will ensure the original pattern still exist in the list.
// doing that will have the short date pattern format the year as 4-digit number and not just 2-digit number.
// Example: June 5, 2018 will be formatted to something like 6/5/2018 instead of 6/5/18 fro en-US culture.
private static void FixDefaultShortDatePattern(List<string> shortDatePatterns)
{
if (shortDatePatterns.Count == 0)
return;
string s = shortDatePatterns[0];
// We are not expecting any pattern have length more than 100.
// We have to do this check to prevent stack overflow as we allocate the buffer on the stack.
if (s.Length > 100)
return;
Span<char> modifiedPattern = stackalloc char[s.Length + 2];
int index = 0;
while (index < s.Length)
{
if (s[index] == '\'')
{
do
{
modifiedPattern[index] = s[index];
index++;
} while (index < s.Length && s[index] != '\'');
if (index >= s.Length)
return;
}
else if (s[index] == 'y')
{
modifiedPattern[index] = 'y';
break;
}
modifiedPattern[index] = s[index];
index++;
}
if (index >= s.Length - 1 || s[index + 1] != 'y')
{
// not a 'yy' pattern
return;
}
if (index + 2 < s.Length && s[index + 2] == 'y')
{
// we have 'yyy' then nothing to do
return;
}
// we are sure now we have 'yy' pattern
Debug.Assert(index + 3 < modifiedPattern.Length);
modifiedPattern[index + 1] = 'y'; // second y
modifiedPattern[index + 2] = 'y'; // third y
modifiedPattern[index + 3] = 'y'; // fourth y
index += 2;
// Now, copy the rest of the pattern to the destination buffer
while (index < s.Length)
{
modifiedPattern[index + 2] = s[index];
index++;
}
shortDatePatterns[0] = modifiedPattern.ToString();
for (int i = 1; i < shortDatePatterns.Count; i++)
{
if (shortDatePatterns[i] == shortDatePatterns[0])
{
// Found match in the list to the new constructed pattern, then replace it with the original modified pattern
shortDatePatterns[i] = s;
return;
}
}
// if we come here means the newly constructed pattern not found on the list, then add the original pattern
shortDatePatterns.Add(s);
}
/// <summary>
/// The ICU date format characters are not exactly the same as the .NET date format characters.
/// NormalizeDatePattern will take in an ICU date pattern and return the equivalent .NET date pattern.
/// </summary>
/// <remarks>
/// see Date Field Symbol Table in http://userguide.icu-project.org/formatparse/datetime
/// and https://msdn.microsoft.com/en-us/library/8kb3ddd4(v=vs.110).aspx
/// </remarks>
private static string NormalizeDatePattern(string input)
{
var destination = input.Length < 128 ?
new ValueStringBuilder(stackalloc char[128]) :
new ValueStringBuilder(input.Length);
int index = 0;
while (index < input.Length)
{
switch (input[index])
{
case '\'':
// single quotes escape characters, like 'de' in es-SP
// so read verbatim until the next single quote
destination.Append(input[index++]);
while (index < input.Length)
{
char current = input[index++];
destination.Append(current);
if (current == '\'')
{
break;
}
}
break;
case 'E':
case 'e':
case 'c':
// 'E' in ICU is the day of the week, which maps to 3 or 4 'd's in .NET
// 'e' in ICU is the local day of the week, which has no representation in .NET, but
// maps closest to 3 or 4 'd's in .NET
// 'c' in ICU is the stand-alone day of the week, which has no representation in .NET, but
// maps closest to 3 or 4 'd's in .NET
NormalizeDayOfWeek(input, ref destination, ref index);
break;
case 'L':
case 'M':
// 'L' in ICU is the stand-alone name of the month,
// which maps closest to 'M' in .NET since it doesn't support stand-alone month names in patterns
// 'M' in both ICU and .NET is the month,
// but ICU supports 5 'M's, which is the super short month name
int occurrences = CountOccurrences(input, input[index], ref index);
if (occurrences > 4)
{
// 5 'L's or 'M's in ICU is the super short name, which maps closest to MMM in .NET
occurrences = 3;
}
destination.Append('M', occurrences);
break;
case 'G':
// 'G' in ICU is the era, which maps to 'g' in .NET
CountOccurrences(input, 'G', ref index);
// it doesn't matter how many 'G's, since .NET only supports 'g' or 'gg', and they
// have the same meaning
destination.Append('g');
break;
case 'y':
// a single 'y' in ICU is the year with no padding or trimming.
// a single 'y' in .NET is the year with 1 or 2 digits
// so convert any single 'y' to 'yyyy'
occurrences = CountOccurrences(input, 'y', ref index);
if (occurrences == 1)
{
occurrences = 4;
}
destination.Append('y', occurrences);
break;
default:
const string unsupportedDateFieldSymbols = "YuUrQqwWDFg";
Debug.Assert(!unsupportedDateFieldSymbols.Contains(input[index]),
$"Encountered an unexpected date field symbol '{input[index]}' from ICU which has no known corresponding .NET equivalent.");
destination.Append(input[index++]);
break;
}
}
return destination.ToString();
}
private static void NormalizeDayOfWeek(string input, ref ValueStringBuilder destination, ref int index)
{
char dayChar = input[index];
int occurrences = CountOccurrences(input, dayChar, ref index);
occurrences = Math.Max(occurrences, 3);
if (occurrences > 4)
{
// 5 and 6 E/e/c characters in ICU is the super short names, which maps closest to ddd in .NET
occurrences = 3;
}
destination.Append('d', occurrences);
}
private static int CountOccurrences(string input, char value, ref int index)
{
int startIndex = index;
while (index < input.Length && input[index] == value)
{
index++;
}
return index - startIndex;
}
private static bool EnumMonthNames(string localeName, CalendarId calendarId, CalendarDataType dataType, out string[]? monthNames, ref string? leapHebrewMonthName)
{
monthNames = null;
IcuEnumCalendarsData callbackContext = default;
callbackContext.Results = new List<string>();
bool result = EnumCalendarInfo(localeName, calendarId, dataType, ref callbackContext);
if (result)
{
// the month-name arrays are expected to have 13 elements. If ICU only returns 12, add an
// extra empty string to fill the array.
if (callbackContext.Results.Count == 12)
{
callbackContext.Results.Add(string.Empty);
}
if (callbackContext.Results.Count > 13)
{
Debug.Assert(calendarId == CalendarId.HEBREW && callbackContext.Results.Count == 14);
if (calendarId == CalendarId.HEBREW)
{
leapHebrewMonthName = callbackContext.Results[13];
}
callbackContext.Results.RemoveRange(13, callbackContext.Results.Count - 13);
}
monthNames = callbackContext.Results.ToArray();
}
return result;
}
private static bool EnumEraNames(string localeName, CalendarId calendarId, CalendarDataType dataType, out string[]? eraNames)
{
bool result = EnumCalendarInfo(localeName, calendarId, dataType, out eraNames);
// .NET expects that only the Japanese calendars have more than 1 era.
// So for other calendars, only return the latest era.
if (calendarId != CalendarId.JAPAN && calendarId != CalendarId.JAPANESELUNISOLAR && eraNames?.Length > 0)
{
string[] latestEraName = new string[] { eraNames![eraNames.Length - 1] };
eraNames = latestEraName;
}
return result;
}
internal static bool EnumCalendarInfo(string localeName, CalendarId calendarId, CalendarDataType dataType, out string[]? calendarData)
{
calendarData = null;
IcuEnumCalendarsData callbackContext = default;
callbackContext.Results = new List<string>();
bool result = EnumCalendarInfo(localeName, calendarId, dataType, ref callbackContext);
if (result)
{
calendarData = callbackContext.Results.ToArray();
}
return result;
}
private static unsafe bool EnumCalendarInfo(string localeName, CalendarId calendarId, CalendarDataType dataType, ref IcuEnumCalendarsData callbackContext)
{
return Interop.Globalization.EnumCalendarInfo(&EnumCalendarInfoCallback, localeName, calendarId, dataType, (IntPtr)Unsafe.AsPointer(ref callbackContext));
}
[UnmanagedCallersOnly]
private static unsafe void EnumCalendarInfoCallback(char* calendarStringPtr, IntPtr context)
{
try
{
ReadOnlySpan<char> calendarStringSpan = MemoryMarshal.CreateReadOnlySpanFromNullTerminated(calendarStringPtr);
ref IcuEnumCalendarsData callbackContext = ref Unsafe.As<byte, IcuEnumCalendarsData>(ref *(byte*)context);
if (callbackContext.DisallowDuplicates)
{
foreach (string existingResult in callbackContext.Results)
{
if (string.CompareOrdinal(calendarStringSpan, existingResult) == 0)
{
// the value is already in the results, so don't add it again
return;
}
}
}
callbackContext.Results.Add(calendarStringSpan.ToString());
}
catch (Exception e)
{
Debug.Fail(e.ToString());
// we ignore the managed exceptions here because EnumCalendarInfoCallback will get called from the native code.
// If we don't ignore the exception here that can cause the runtime to fail fast.
}
}
private struct IcuEnumCalendarsData
{
public List<string> Results;
public bool DisallowDuplicates;
}
}
}
| -1 |
dotnet/runtime | 66,250 | use the name of the header when throwing exception for invalid chars | Fixes #65136 | pedrobsaila | 2022-03-05T20:18:11Z | 2022-03-08T16:15:18Z | 8fe2d2b0bbb7bec841c97372528531c071b29c16 | fa5dcda23c681031b5d2bd68482702baef80b836 | use the name of the header when throwing exception for invalid chars. Fixes #65136 | ./src/libraries/System.Drawing.Common/src/System/Drawing/Pen.cs | // Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
using System.ComponentModel;
using System.Diagnostics;
using System.Drawing.Drawing2D;
using System.Drawing.Internal;
using System.Globalization;
using System.Runtime.InteropServices;
using Gdip = System.Drawing.SafeNativeMethods.Gdip;
namespace System.Drawing
{
/// <summary>
/// Defines an object used to draw lines and curves.
/// </summary>
public sealed partial class Pen : MarshalByRefObject, ICloneable, IDisposable
#pragma warning disable SA1001
#if FEATURE_SYSTEM_EVENTS
, ISystemColorTracker
#endif
#pragma warning restore SA1001
{
#if FINALIZATION_WATCH
private string _allocationSite = Graphics.GetAllocationStack();
#endif
// Handle to native GDI+ pen object.
private IntPtr _nativePen;
// GDI+ doesn't understand system colors, so we need to cache the value here.
private Color _color;
private bool _immutable;
// Tracks whether the dash style has been changed to something else than Solid during the lifetime of this object.
private bool _dashStyleWasOrIsNotSolid;
/// <summary>
/// Creates a Pen from a native GDI+ object.
/// </summary>
private Pen(IntPtr nativePen) => SetNativePen(nativePen);
internal Pen(Color color, bool immutable) : this(color) => _immutable = immutable;
/// <summary>
/// Initializes a new instance of the Pen class with the specified <see cref='Color'/>.
/// </summary>
public Pen(Color color) : this(color, (float)1.0)
{
}
/// <summary>
/// Initializes a new instance of the <see cref='Pen'/> class with the specified
/// <see cref='Color'/> and <see cref='Width'/>.
/// </summary>
public Pen(Color color, float width)
{
_color = color;
IntPtr pen = IntPtr.Zero;
int status = Gdip.GdipCreatePen1(color.ToArgb(),
width,
(int)GraphicsUnit.World,
out pen);
Gdip.CheckStatus(status);
SetNativePen(pen);
#if FEATURE_SYSTEM_EVENTS
if (_color.IsSystemColor)
{
SystemColorTracker.Add(this);
}
#endif
}
/// <summary>
/// Initializes a new instance of the Pen class with the specified <see cref='Brush'/>.
/// </summary>
public Pen(Brush brush) : this(brush, (float)1.0)
{
}
/// <summary>
/// Initializes a new instance of the <see cref='Pen'/> class with the specified <see cref='Drawing.Brush'/> and width.
/// </summary>
public Pen(Brush brush!!, float width)
{
IntPtr pen;
int status = Gdip.GdipCreatePen2(new HandleRef(brush, brush.NativeBrush),
width,
(int)GraphicsUnit.World,
out pen);
Gdip.CheckStatus(status);
SetNativePen(pen);
}
internal void SetNativePen(IntPtr nativePen)
{
Debug.Assert(nativePen != IntPtr.Zero);
_nativePen = nativePen;
}
[Browsable(false), EditorBrowsable(EditorBrowsableState.Never)]
internal IntPtr NativePen => _nativePen;
/// <summary>
/// Creates an exact copy of this <see cref='System.Drawing.Pen'/>.
/// </summary>
public object Clone()
{
IntPtr clonedPen;
int status = Gdip.GdipClonePen(new HandleRef(this, NativePen), out clonedPen);
Gdip.CheckStatus(status);
return new Pen(clonedPen);
}
/// <summary>
/// Cleans up Windows resources for this <see cref='System.Drawing.Pen'/>.
/// </summary>
public void Dispose()
{
Dispose(true);
GC.SuppressFinalize(this);
}
private void Dispose(bool disposing)
{
#if FINALIZATION_WATCH
if (!disposing && nativePen != IntPtr.Zero)
{
Debug.WriteLine("**********************\nDisposed through finalization:\n" + _allocationSite);
}
#endif
if (!disposing)
{
// If we are finalizing, then we will be unreachable soon. Finalize calls dispose to
// release resources, so we must make sure that during finalization we are
// not immutable.
_immutable = false;
}
else if (_immutable)
{
throw new ArgumentException(SR.Format(SR.CantChangeImmutableObjects, nameof(Pen)));
}
if (_nativePen != IntPtr.Zero)
{
try
{
#if DEBUG
int status = !Gdip.Initialized ? Gdip.Ok :
#endif
Gdip.GdipDeletePen(new HandleRef(this, NativePen));
#if DEBUG
Debug.Assert(status == Gdip.Ok, $"GDI+ returned an error status: {status.ToString(CultureInfo.InvariantCulture)}");
#endif
}
catch (Exception ex) when (!ClientUtils.IsSecurityOrCriticalException(ex))
{
}
finally
{
_nativePen = IntPtr.Zero;
}
}
}
/// <summary>
/// Cleans up Windows resources for this <see cref='System.Drawing.Pen'/>.
/// </summary>
~Pen() => Dispose(false);
/// <summary>
/// Gets or sets the width of this <see cref='System.Drawing.Pen'/>.
/// </summary>
public float Width
{
get
{
var width = new float[] { 0 };
int status = Gdip.GdipGetPenWidth(new HandleRef(this, NativePen), width);
Gdip.CheckStatus(status);
return width[0];
}
set
{
if (_immutable)
{
throw new ArgumentException(SR.Format(SR.CantChangeImmutableObjects, nameof(Pen)));
}
int status = Gdip.GdipSetPenWidth(new HandleRef(this, NativePen), value);
Gdip.CheckStatus(status);
}
}
/// <summary>
/// Sets the values that determine the style of cap used to end lines drawn by this <see cref='Pen'/>.
/// </summary>
public void SetLineCap(LineCap startCap, LineCap endCap, DashCap dashCap)
{
if (_immutable)
{
throw new ArgumentException(SR.Format(SR.CantChangeImmutableObjects, nameof(Pen)));
}
int status = Gdip.GdipSetPenLineCap197819(new HandleRef(this, NativePen),
unchecked((int)startCap), unchecked((int)endCap), unchecked((int)dashCap));
Gdip.CheckStatus(status);
}
/// <summary>
/// Gets or sets the cap style used at the beginning of lines drawn with this <see cref='Pen'/>.
/// </summary>
public LineCap StartCap
{
get
{
int startCap;
int status = Gdip.GdipGetPenStartCap(new HandleRef(this, NativePen), out startCap);
Gdip.CheckStatus(status);
return (LineCap)startCap;
}
set
{
switch (value)
{
case LineCap.Flat:
case LineCap.Square:
case LineCap.Round:
case LineCap.Triangle:
case LineCap.NoAnchor:
case LineCap.SquareAnchor:
case LineCap.RoundAnchor:
case LineCap.DiamondAnchor:
case LineCap.ArrowAnchor:
case LineCap.AnchorMask:
case LineCap.Custom:
break;
default:
throw new InvalidEnumArgumentException(nameof(value), unchecked((int)value), typeof(LineCap));
}
if (_immutable)
{
throw new ArgumentException(SR.Format(SR.CantChangeImmutableObjects, nameof(Pen)));
}
int status = Gdip.GdipSetPenStartCap(new HandleRef(this, NativePen), unchecked((int)value));
Gdip.CheckStatus(status);
}
}
/// <summary>
/// Gets or sets the cap style used at the end of lines drawn with this <see cref='Pen'/>.
/// </summary>
public LineCap EndCap
{
get
{
int endCap;
int status = Gdip.GdipGetPenEndCap(new HandleRef(this, NativePen), out endCap);
Gdip.CheckStatus(status);
return (LineCap)endCap;
}
set
{
switch (value)
{
case LineCap.Flat:
case LineCap.Square:
case LineCap.Round:
case LineCap.Triangle:
case LineCap.NoAnchor:
case LineCap.SquareAnchor:
case LineCap.RoundAnchor:
case LineCap.DiamondAnchor:
case LineCap.ArrowAnchor:
case LineCap.AnchorMask:
case LineCap.Custom:
break;
default:
throw new InvalidEnumArgumentException(nameof(value), unchecked((int)value), typeof(LineCap));
}
if (_immutable)
{
throw new ArgumentException(SR.Format(SR.CantChangeImmutableObjects, nameof(Pen)));
}
int status = Gdip.GdipSetPenEndCap(new HandleRef(this, NativePen), unchecked((int)value));
Gdip.CheckStatus(status);
}
}
/// <summary>
/// Gets or sets the cap style used at the beginning or end of dashed lines drawn with this <see cref='Pen'/>.
/// </summary>
public DashCap DashCap
{
get
{
int dashCap;
int status = Gdip.GdipGetPenDashCap197819(new HandleRef(this, NativePen), out dashCap);
Gdip.CheckStatus(status);
return (DashCap)dashCap;
}
set
{
if (value != DashCap.Flat && value != DashCap.Round && value != DashCap.Triangle)
{
throw new InvalidEnumArgumentException(nameof(value), unchecked((int)value), typeof(DashCap));
}
if (_immutable)
{
throw new ArgumentException(SR.Format(SR.CantChangeImmutableObjects, nameof(Pen)));
}
int status = Gdip.GdipSetPenDashCap197819(new HandleRef(this, NativePen), unchecked((int)value));
Gdip.CheckStatus(status);
}
}
/// <summary>
/// Gets or sets the join style for the ends of two overlapping lines drawn with this <see cref='Pen'/>.
/// </summary>
public LineJoin LineJoin
{
get
{
int lineJoin;
int status = Gdip.GdipGetPenLineJoin(new HandleRef(this, NativePen), out lineJoin);
Gdip.CheckStatus(status);
return (LineJoin)lineJoin;
}
set
{
if (value < LineJoin.Miter || value > LineJoin.MiterClipped)
{
throw new InvalidEnumArgumentException(nameof(value), unchecked((int)value), typeof(LineJoin));
}
if (_immutable)
{
throw new ArgumentException(SR.Format(SR.CantChangeImmutableObjects, nameof(Pen)));
}
int status = Gdip.GdipSetPenLineJoin(new HandleRef(this, NativePen), unchecked((int)value));
Gdip.CheckStatus(status);
}
}
/// <summary>
/// Gets or sets the limit of the thickness of the join on a mitered corner.
/// </summary>
public float MiterLimit
{
get
{
var miterLimit = new float[] { 0 };
int status = Gdip.GdipGetPenMiterLimit(new HandleRef(this, NativePen), miterLimit);
Gdip.CheckStatus(status);
return miterLimit[0];
}
set
{
if (_immutable)
{
throw new ArgumentException(SR.Format(SR.CantChangeImmutableObjects, nameof(Pen)));
}
int status = Gdip.GdipSetPenMiterLimit(new HandleRef(this, NativePen), value);
Gdip.CheckStatus(status);
}
}
/// <summary>
/// Gets or sets the alignment for objects drawn with this <see cref='Pen'/>.
/// </summary>
public PenAlignment Alignment
{
get
{
PenAlignment penMode;
int status = Gdip.GdipGetPenMode(new HandleRef(this, NativePen), out penMode);
Gdip.CheckStatus(status);
return penMode;
}
set
{
if (value < PenAlignment.Center || value > PenAlignment.Right)
{
throw new InvalidEnumArgumentException(nameof(value), unchecked((int)value), typeof(PenAlignment));
}
if (_immutable)
{
throw new ArgumentException(SR.Format(SR.CantChangeImmutableObjects, nameof(Pen)));
}
int status = Gdip.GdipSetPenMode(new HandleRef(this, NativePen), value);
Gdip.CheckStatus(status);
}
}
/// <summary>
/// Gets or sets the geometrical transform for objects drawn with this <see cref='Pen'/>.
/// </summary>
public Matrix Transform
{
get
{
var matrix = new Matrix();
int status = Gdip.GdipGetPenTransform(new HandleRef(this, NativePen), new HandleRef(matrix, matrix.NativeMatrix));
Gdip.CheckStatus(status);
return matrix;
}
set
{
if (_immutable)
{
throw new ArgumentException(SR.Format(SR.CantChangeImmutableObjects, nameof(Pen)));
}
if (value == null)
{
throw new ArgumentNullException(nameof(value));
}
int status = Gdip.GdipSetPenTransform(new HandleRef(this, NativePen), new HandleRef(value, value.NativeMatrix));
Gdip.CheckStatus(status);
}
}
/// <summary>
/// Resets the geometric transform for this <see cref='Pen'/> to identity.
/// </summary>
public void ResetTransform()
{
int status = Gdip.GdipResetPenTransform(new HandleRef(this, NativePen));
Gdip.CheckStatus(status);
}
/// <summary>
/// Multiplies the transform matrix for this <see cref='Pen'/> by the specified <see cref='Matrix'/>.
/// </summary>
public void MultiplyTransform(Matrix matrix) => MultiplyTransform(matrix, MatrixOrder.Prepend);
/// <summary>
/// Multiplies the transform matrix for this <see cref='Pen'/> by the specified <see cref='Matrix'/> in the specified order.
/// </summary>
public void MultiplyTransform(Matrix matrix, MatrixOrder order)
{
if (matrix.NativeMatrix == IntPtr.Zero)
{
// Disposed matrices should result in a no-op.
return;
}
int status = Gdip.GdipMultiplyPenTransform(new HandleRef(this, NativePen),
new HandleRef(matrix, matrix.NativeMatrix),
order);
Gdip.CheckStatus(status);
}
/// <summary>
/// Translates the local geometrical transform by the specified dimensions. This method prepends the translation
/// to the transform.
/// </summary>
public void TranslateTransform(float dx, float dy) => TranslateTransform(dx, dy, MatrixOrder.Prepend);
/// <summary>
/// Translates the local geometrical transform by the specified dimensions in the specified order.
/// </summary>
public void TranslateTransform(float dx, float dy, MatrixOrder order)
{
int status = Gdip.GdipTranslatePenTransform(new HandleRef(this, NativePen),
dx, dy, order);
Gdip.CheckStatus(status);
}
/// <summary>
/// Scales the local geometric transform by the specified amounts. This method prepends the scaling matrix to the transform.
/// </summary>
public void ScaleTransform(float sx, float sy) => ScaleTransform(sx, sy, MatrixOrder.Prepend);
/// <summary>
/// Scales the local geometric transform by the specified amounts in the specified order.
/// </summary>
public void ScaleTransform(float sx, float sy, MatrixOrder order)
{
int status = Gdip.GdipScalePenTransform(new HandleRef(this, NativePen),
sx, sy, order);
Gdip.CheckStatus(status);
}
/// <summary>
/// Rotates the local geometric transform by the specified amount. This method prepends the rotation to the transform.
/// </summary>
public void RotateTransform(float angle) => RotateTransform(angle, MatrixOrder.Prepend);
/// <summary>
/// Rotates the local geometric transform by the specified amount in the specified order.
/// </summary>
public void RotateTransform(float angle, MatrixOrder order)
{
int status = Gdip.GdipRotatePenTransform(new HandleRef(this, NativePen),
angle, order);
Gdip.CheckStatus(status);
}
private void InternalSetColor(Color value)
{
int status = Gdip.GdipSetPenColor(new HandleRef(this, NativePen),
_color.ToArgb());
Gdip.CheckStatus(status);
_color = value;
}
/// <summary>
/// Gets the style of lines drawn with this <see cref='Pen'/>.
/// </summary>
public PenType PenType
{
get
{
int type;
int status = Gdip.GdipGetPenFillType(new HandleRef(this, NativePen), out type);
Gdip.CheckStatus(status);
return (PenType)type;
}
}
/// <summary>
/// Gets or sets the color of this <see cref='Pen'/>.
/// </summary>
public Color Color
{
get
{
if (_color == Color.Empty)
{
if (PenType != PenType.SolidColor)
{
throw new ArgumentException(SR.GdiplusInvalidParameter);
}
int colorARGB;
int status = Gdip.GdipGetPenColor(new HandleRef(this, NativePen), out colorARGB);
Gdip.CheckStatus(status);
_color = Color.FromArgb(colorARGB);
}
// GDI+ doesn't understand system colors, so we can't use GdipGetPenColor in the general case.
return _color;
}
set
{
if (_immutable)
{
throw new ArgumentException(SR.Format(SR.CantChangeImmutableObjects, nameof(Pen)));
}
if (value != _color)
{
#if FEATURE_SYSTEM_EVENTS
Color oldColor = _color;
#endif
_color = value;
InternalSetColor(value);
#if FEATURE_SYSTEM_EVENTS
// NOTE: We never remove pens from the active list, so if someone is
// changing their pen colors a lot, this could be a problem.
if (value.IsSystemColor && !oldColor.IsSystemColor)
{
SystemColorTracker.Add(this);
}
#endif
}
}
}
/// <summary>
/// Gets or sets the <see cref='Drawing.Brush'/> that determines attributes of this <see cref='Pen'/>.
/// </summary>
public Brush Brush
{
get
{
Brush? brush = null;
switch (PenType)
{
case PenType.SolidColor:
brush = new SolidBrush(GetNativeBrush());
break;
case PenType.HatchFill:
brush = new HatchBrush(GetNativeBrush());
break;
case PenType.TextureFill:
brush = new TextureBrush(GetNativeBrush());
break;
case PenType.PathGradient:
brush = new PathGradientBrush(GetNativeBrush());
break;
case PenType.LinearGradient:
brush = new LinearGradientBrush(GetNativeBrush());
break;
default:
break;
}
return brush!;
}
set
{
if (_immutable)
{
throw new ArgumentException(SR.Format(SR.CantChangeImmutableObjects, nameof(Pen)));
}
if (value == null)
{
throw new ArgumentNullException(nameof(value));
}
int status = Gdip.GdipSetPenBrushFill(new HandleRef(this, NativePen),
new HandleRef(value, value.NativeBrush));
Gdip.CheckStatus(status);
}
}
private IntPtr GetNativeBrush()
{
IntPtr nativeBrush;
int status = Gdip.GdipGetPenBrushFill(new HandleRef(this, NativePen), out nativeBrush);
Gdip.CheckStatus(status);
return nativeBrush;
}
/// <summary>
/// Gets or sets the style used for dashed lines drawn with this <see cref='Pen'/>.
/// </summary>
public DashStyle DashStyle
{
get
{
int dashStyle;
int status = Gdip.GdipGetPenDashStyle(new HandleRef(this, NativePen), out dashStyle);
Gdip.CheckStatus(status);
return (DashStyle)dashStyle;
}
set
{
if (value < DashStyle.Solid || value > DashStyle.Custom)
{
throw new InvalidEnumArgumentException(nameof(value), unchecked((int)value), typeof(DashStyle));
}
if (_immutable)
{
throw new ArgumentException(SR.Format(SR.CantChangeImmutableObjects, nameof(Pen)));
}
int status = Gdip.GdipSetPenDashStyle(new HandleRef(this, NativePen), unchecked((int)value));
Gdip.CheckStatus(status);
// If we just set the pen style to Custom without defining the custom dash pattern,
// make sure that we can return a valid value.
if (value == DashStyle.Custom)
{
EnsureValidDashPattern();
}
if (value != DashStyle.Solid)
{
this._dashStyleWasOrIsNotSolid = true;
}
}
}
/// <summary>
/// This method is called after the user sets the pen's dash style to custom. Here, we make sure that there
/// is a default value set for the custom pattern.
/// </summary>
private void EnsureValidDashPattern()
{
int retval;
int status = Gdip.GdipGetPenDashCount(new HandleRef(this, NativePen), out retval);
Gdip.CheckStatus(status);
if (retval == 0)
{
// Set to a solid pattern.
DashPattern = new float[] { 1 };
}
}
/// <summary>
/// Gets or sets the distance from the start of a line to the beginning of a dash pattern.
/// </summary>
public float DashOffset
{
get
{
var dashOffset = new float[] { 0 };
int status = Gdip.GdipGetPenDashOffset(new HandleRef(this, NativePen), dashOffset);
Gdip.CheckStatus(status);
return dashOffset[0];
}
set
{
if (_immutable)
{
throw new ArgumentException(SR.Format(SR.CantChangeImmutableObjects, nameof(Pen)));
}
int status = Gdip.GdipSetPenDashOffset(new HandleRef(this, NativePen), value);
Gdip.CheckStatus(status);
}
}
/// <summary>
/// Gets or sets an array of custom dashes and spaces. The dashes are made up of line segments.
/// </summary>
public float[] DashPattern
{
get
{
int status = Gdip.GdipGetPenDashCount(new HandleRef(this, NativePen), out int count);
Gdip.CheckStatus(status);
float[] pattern;
// don't call GdipGetPenDashArray with a 0 count
if (count > 0)
{
pattern = new float[count];
status = Gdip.GdipGetPenDashArray(new HandleRef(this, NativePen), pattern, count);
Gdip.CheckStatus(status);
}
else if (DashStyle == DashStyle.Solid && !this._dashStyleWasOrIsNotSolid)
{
// Most likely we're replicating an existing System.Drawing bug here, it doesn't make much sense to
// ask for a dash pattern when using a solid dash.
throw new OutOfMemoryException();
}
else if (DashStyle == DashStyle.Solid)
{
pattern = Array.Empty<float>();
}
else
{
// special case (not handled inside GDI+)
pattern = new float[1];
pattern[0] = 1.0f;
}
return pattern;
}
set
{
if (_immutable)
{
throw new ArgumentException(SR.Format(SR.CantChangeImmutableObjects, nameof(Pen)));
}
if (value == null || value.Length == 0)
{
throw new ArgumentException(SR.InvalidDashPattern);
}
foreach (float val in value)
{
if (val <= 0)
{
throw new ArgumentException(SR.InvalidDashPattern);
}
}
int count = value.Length;
IntPtr buf = Marshal.AllocHGlobal(checked(4 * count));
try
{
Marshal.Copy(value, 0, buf, count);
int status = Gdip.GdipSetPenDashArray(new HandleRef(this, NativePen), new HandleRef(buf, buf), count);
Gdip.CheckStatus(status);
}
finally
{
Marshal.FreeHGlobal(buf);
}
}
}
/// <summary>
/// Gets or sets an array of custom dashes and spaces. The dashes are made up of line segments.
/// </summary>
public float[] CompoundArray
{
get
{
int count;
int status = Gdip.GdipGetPenCompoundCount(new HandleRef(this, NativePen), out count);
Gdip.CheckStatus(status);
var array = new float[count];
status = Gdip.GdipGetPenCompoundArray(new HandleRef(this, NativePen), array, count);
Gdip.CheckStatus(status);
return array;
}
set
{
if (_immutable)
{
throw new ArgumentException(SR.Format(SR.CantChangeImmutableObjects, nameof(Pen)));
}
if (value.Length <= 1)
{
throw new ArgumentException(SR.GdiplusInvalidParameter);
}
foreach (float val in value)
{
if (val < 0 || val > 1)
{
throw new ArgumentException(SR.GdiplusInvalidParameter);
}
}
int status = Gdip.GdipSetPenCompoundArray(new HandleRef(this, NativePen), value, value.Length);
Gdip.CheckStatus(status);
}
}
#if FEATURE_SYSTEM_EVENTS
void ISystemColorTracker.OnSystemColorChanged()
{
if (NativePen != IntPtr.Zero)
{
InternalSetColor(_color);
}
}
#endif
}
}
| // Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
using System.ComponentModel;
using System.Diagnostics;
using System.Drawing.Drawing2D;
using System.Drawing.Internal;
using System.Globalization;
using System.Runtime.InteropServices;
using Gdip = System.Drawing.SafeNativeMethods.Gdip;
namespace System.Drawing
{
/// <summary>
/// Defines an object used to draw lines and curves.
/// </summary>
public sealed partial class Pen : MarshalByRefObject, ICloneable, IDisposable
#pragma warning disable SA1001
#if FEATURE_SYSTEM_EVENTS
, ISystemColorTracker
#endif
#pragma warning restore SA1001
{
#if FINALIZATION_WATCH
private string _allocationSite = Graphics.GetAllocationStack();
#endif
// Handle to native GDI+ pen object.
private IntPtr _nativePen;
// GDI+ doesn't understand system colors, so we need to cache the value here.
private Color _color;
private bool _immutable;
// Tracks whether the dash style has been changed to something else than Solid during the lifetime of this object.
private bool _dashStyleWasOrIsNotSolid;
/// <summary>
/// Creates a Pen from a native GDI+ object.
/// </summary>
private Pen(IntPtr nativePen) => SetNativePen(nativePen);
internal Pen(Color color, bool immutable) : this(color) => _immutable = immutable;
/// <summary>
/// Initializes a new instance of the Pen class with the specified <see cref='Color'/>.
/// </summary>
public Pen(Color color) : this(color, (float)1.0)
{
}
/// <summary>
/// Initializes a new instance of the <see cref='Pen'/> class with the specified
/// <see cref='Color'/> and <see cref='Width'/>.
/// </summary>
public Pen(Color color, float width)
{
_color = color;
IntPtr pen = IntPtr.Zero;
int status = Gdip.GdipCreatePen1(color.ToArgb(),
width,
(int)GraphicsUnit.World,
out pen);
Gdip.CheckStatus(status);
SetNativePen(pen);
#if FEATURE_SYSTEM_EVENTS
if (_color.IsSystemColor)
{
SystemColorTracker.Add(this);
}
#endif
}
/// <summary>
/// Initializes a new instance of the Pen class with the specified <see cref='Brush'/>.
/// </summary>
public Pen(Brush brush) : this(brush, (float)1.0)
{
}
/// <summary>
/// Initializes a new instance of the <see cref='Pen'/> class with the specified <see cref='Drawing.Brush'/> and width.
/// </summary>
public Pen(Brush brush!!, float width)
{
IntPtr pen;
int status = Gdip.GdipCreatePen2(new HandleRef(brush, brush.NativeBrush),
width,
(int)GraphicsUnit.World,
out pen);
Gdip.CheckStatus(status);
SetNativePen(pen);
}
internal void SetNativePen(IntPtr nativePen)
{
Debug.Assert(nativePen != IntPtr.Zero);
_nativePen = nativePen;
}
[Browsable(false), EditorBrowsable(EditorBrowsableState.Never)]
internal IntPtr NativePen => _nativePen;
/// <summary>
/// Creates an exact copy of this <see cref='System.Drawing.Pen'/>.
/// </summary>
public object Clone()
{
IntPtr clonedPen;
int status = Gdip.GdipClonePen(new HandleRef(this, NativePen), out clonedPen);
Gdip.CheckStatus(status);
return new Pen(clonedPen);
}
/// <summary>
/// Cleans up Windows resources for this <see cref='System.Drawing.Pen'/>.
/// </summary>
public void Dispose()
{
Dispose(true);
GC.SuppressFinalize(this);
}
private void Dispose(bool disposing)
{
#if FINALIZATION_WATCH
if (!disposing && nativePen != IntPtr.Zero)
{
Debug.WriteLine("**********************\nDisposed through finalization:\n" + _allocationSite);
}
#endif
if (!disposing)
{
// If we are finalizing, then we will be unreachable soon. Finalize calls dispose to
// release resources, so we must make sure that during finalization we are
// not immutable.
_immutable = false;
}
else if (_immutable)
{
throw new ArgumentException(SR.Format(SR.CantChangeImmutableObjects, nameof(Pen)));
}
if (_nativePen != IntPtr.Zero)
{
try
{
#if DEBUG
int status = !Gdip.Initialized ? Gdip.Ok :
#endif
Gdip.GdipDeletePen(new HandleRef(this, NativePen));
#if DEBUG
Debug.Assert(status == Gdip.Ok, $"GDI+ returned an error status: {status.ToString(CultureInfo.InvariantCulture)}");
#endif
}
catch (Exception ex) when (!ClientUtils.IsSecurityOrCriticalException(ex))
{
}
finally
{
_nativePen = IntPtr.Zero;
}
}
}
/// <summary>
/// Cleans up Windows resources for this <see cref='System.Drawing.Pen'/>.
/// </summary>
~Pen() => Dispose(false);
/// <summary>
/// Gets or sets the width of this <see cref='System.Drawing.Pen'/>.
/// </summary>
public float Width
{
get
{
var width = new float[] { 0 };
int status = Gdip.GdipGetPenWidth(new HandleRef(this, NativePen), width);
Gdip.CheckStatus(status);
return width[0];
}
set
{
if (_immutable)
{
throw new ArgumentException(SR.Format(SR.CantChangeImmutableObjects, nameof(Pen)));
}
int status = Gdip.GdipSetPenWidth(new HandleRef(this, NativePen), value);
Gdip.CheckStatus(status);
}
}
/// <summary>
/// Sets the values that determine the style of cap used to end lines drawn by this <see cref='Pen'/>.
/// </summary>
public void SetLineCap(LineCap startCap, LineCap endCap, DashCap dashCap)
{
if (_immutable)
{
throw new ArgumentException(SR.Format(SR.CantChangeImmutableObjects, nameof(Pen)));
}
int status = Gdip.GdipSetPenLineCap197819(new HandleRef(this, NativePen),
unchecked((int)startCap), unchecked((int)endCap), unchecked((int)dashCap));
Gdip.CheckStatus(status);
}
/// <summary>
/// Gets or sets the cap style used at the beginning of lines drawn with this <see cref='Pen'/>.
/// </summary>
public LineCap StartCap
{
get
{
int startCap;
int status = Gdip.GdipGetPenStartCap(new HandleRef(this, NativePen), out startCap);
Gdip.CheckStatus(status);
return (LineCap)startCap;
}
set
{
switch (value)
{
case LineCap.Flat:
case LineCap.Square:
case LineCap.Round:
case LineCap.Triangle:
case LineCap.NoAnchor:
case LineCap.SquareAnchor:
case LineCap.RoundAnchor:
case LineCap.DiamondAnchor:
case LineCap.ArrowAnchor:
case LineCap.AnchorMask:
case LineCap.Custom:
break;
default:
throw new InvalidEnumArgumentException(nameof(value), unchecked((int)value), typeof(LineCap));
}
if (_immutable)
{
throw new ArgumentException(SR.Format(SR.CantChangeImmutableObjects, nameof(Pen)));
}
int status = Gdip.GdipSetPenStartCap(new HandleRef(this, NativePen), unchecked((int)value));
Gdip.CheckStatus(status);
}
}
/// <summary>
/// Gets or sets the cap style used at the end of lines drawn with this <see cref='Pen'/>.
/// </summary>
public LineCap EndCap
{
get
{
int endCap;
int status = Gdip.GdipGetPenEndCap(new HandleRef(this, NativePen), out endCap);
Gdip.CheckStatus(status);
return (LineCap)endCap;
}
set
{
switch (value)
{
case LineCap.Flat:
case LineCap.Square:
case LineCap.Round:
case LineCap.Triangle:
case LineCap.NoAnchor:
case LineCap.SquareAnchor:
case LineCap.RoundAnchor:
case LineCap.DiamondAnchor:
case LineCap.ArrowAnchor:
case LineCap.AnchorMask:
case LineCap.Custom:
break;
default:
throw new InvalidEnumArgumentException(nameof(value), unchecked((int)value), typeof(LineCap));
}
if (_immutable)
{
throw new ArgumentException(SR.Format(SR.CantChangeImmutableObjects, nameof(Pen)));
}
int status = Gdip.GdipSetPenEndCap(new HandleRef(this, NativePen), unchecked((int)value));
Gdip.CheckStatus(status);
}
}
/// <summary>
/// Gets or sets the cap style used at the beginning or end of dashed lines drawn with this <see cref='Pen'/>.
/// </summary>
public DashCap DashCap
{
get
{
int dashCap;
int status = Gdip.GdipGetPenDashCap197819(new HandleRef(this, NativePen), out dashCap);
Gdip.CheckStatus(status);
return (DashCap)dashCap;
}
set
{
if (value != DashCap.Flat && value != DashCap.Round && value != DashCap.Triangle)
{
throw new InvalidEnumArgumentException(nameof(value), unchecked((int)value), typeof(DashCap));
}
if (_immutable)
{
throw new ArgumentException(SR.Format(SR.CantChangeImmutableObjects, nameof(Pen)));
}
int status = Gdip.GdipSetPenDashCap197819(new HandleRef(this, NativePen), unchecked((int)value));
Gdip.CheckStatus(status);
}
}
/// <summary>
/// Gets or sets the join style for the ends of two overlapping lines drawn with this <see cref='Pen'/>.
/// </summary>
public LineJoin LineJoin
{
get
{
int lineJoin;
int status = Gdip.GdipGetPenLineJoin(new HandleRef(this, NativePen), out lineJoin);
Gdip.CheckStatus(status);
return (LineJoin)lineJoin;
}
set
{
if (value < LineJoin.Miter || value > LineJoin.MiterClipped)
{
throw new InvalidEnumArgumentException(nameof(value), unchecked((int)value), typeof(LineJoin));
}
if (_immutable)
{
throw new ArgumentException(SR.Format(SR.CantChangeImmutableObjects, nameof(Pen)));
}
int status = Gdip.GdipSetPenLineJoin(new HandleRef(this, NativePen), unchecked((int)value));
Gdip.CheckStatus(status);
}
}
/// <summary>
/// Gets or sets the limit of the thickness of the join on a mitered corner.
/// </summary>
public float MiterLimit
{
get
{
var miterLimit = new float[] { 0 };
int status = Gdip.GdipGetPenMiterLimit(new HandleRef(this, NativePen), miterLimit);
Gdip.CheckStatus(status);
return miterLimit[0];
}
set
{
if (_immutable)
{
throw new ArgumentException(SR.Format(SR.CantChangeImmutableObjects, nameof(Pen)));
}
int status = Gdip.GdipSetPenMiterLimit(new HandleRef(this, NativePen), value);
Gdip.CheckStatus(status);
}
}
/// <summary>
/// Gets or sets the alignment for objects drawn with this <see cref='Pen'/>.
/// </summary>
public PenAlignment Alignment
{
get
{
PenAlignment penMode;
int status = Gdip.GdipGetPenMode(new HandleRef(this, NativePen), out penMode);
Gdip.CheckStatus(status);
return penMode;
}
set
{
if (value < PenAlignment.Center || value > PenAlignment.Right)
{
throw new InvalidEnumArgumentException(nameof(value), unchecked((int)value), typeof(PenAlignment));
}
if (_immutable)
{
throw new ArgumentException(SR.Format(SR.CantChangeImmutableObjects, nameof(Pen)));
}
int status = Gdip.GdipSetPenMode(new HandleRef(this, NativePen), value);
Gdip.CheckStatus(status);
}
}
/// <summary>
/// Gets or sets the geometrical transform for objects drawn with this <see cref='Pen'/>.
/// </summary>
public Matrix Transform
{
get
{
var matrix = new Matrix();
int status = Gdip.GdipGetPenTransform(new HandleRef(this, NativePen), new HandleRef(matrix, matrix.NativeMatrix));
Gdip.CheckStatus(status);
return matrix;
}
set
{
if (_immutable)
{
throw new ArgumentException(SR.Format(SR.CantChangeImmutableObjects, nameof(Pen)));
}
if (value == null)
{
throw new ArgumentNullException(nameof(value));
}
int status = Gdip.GdipSetPenTransform(new HandleRef(this, NativePen), new HandleRef(value, value.NativeMatrix));
Gdip.CheckStatus(status);
}
}
/// <summary>
/// Resets the geometric transform for this <see cref='Pen'/> to identity.
/// </summary>
public void ResetTransform()
{
int status = Gdip.GdipResetPenTransform(new HandleRef(this, NativePen));
Gdip.CheckStatus(status);
}
/// <summary>
/// Multiplies the transform matrix for this <see cref='Pen'/> by the specified <see cref='Matrix'/>.
/// </summary>
public void MultiplyTransform(Matrix matrix) => MultiplyTransform(matrix, MatrixOrder.Prepend);
/// <summary>
/// Multiplies the transform matrix for this <see cref='Pen'/> by the specified <see cref='Matrix'/> in the specified order.
/// </summary>
public void MultiplyTransform(Matrix matrix, MatrixOrder order)
{
if (matrix.NativeMatrix == IntPtr.Zero)
{
// Disposed matrices should result in a no-op.
return;
}
int status = Gdip.GdipMultiplyPenTransform(new HandleRef(this, NativePen),
new HandleRef(matrix, matrix.NativeMatrix),
order);
Gdip.CheckStatus(status);
}
/// <summary>
/// Translates the local geometrical transform by the specified dimensions. This method prepends the translation
/// to the transform.
/// </summary>
public void TranslateTransform(float dx, float dy) => TranslateTransform(dx, dy, MatrixOrder.Prepend);
/// <summary>
/// Translates the local geometrical transform by the specified dimensions in the specified order.
/// </summary>
public void TranslateTransform(float dx, float dy, MatrixOrder order)
{
int status = Gdip.GdipTranslatePenTransform(new HandleRef(this, NativePen),
dx, dy, order);
Gdip.CheckStatus(status);
}
/// <summary>
/// Scales the local geometric transform by the specified amounts. This method prepends the scaling matrix to the transform.
/// </summary>
public void ScaleTransform(float sx, float sy) => ScaleTransform(sx, sy, MatrixOrder.Prepend);
/// <summary>
/// Scales the local geometric transform by the specified amounts in the specified order.
/// </summary>
public void ScaleTransform(float sx, float sy, MatrixOrder order)
{
int status = Gdip.GdipScalePenTransform(new HandleRef(this, NativePen),
sx, sy, order);
Gdip.CheckStatus(status);
}
/// <summary>
/// Rotates the local geometric transform by the specified amount. This method prepends the rotation to the transform.
/// </summary>
public void RotateTransform(float angle) => RotateTransform(angle, MatrixOrder.Prepend);
/// <summary>
/// Rotates the local geometric transform by the specified amount in the specified order.
/// </summary>
public void RotateTransform(float angle, MatrixOrder order)
{
int status = Gdip.GdipRotatePenTransform(new HandleRef(this, NativePen),
angle, order);
Gdip.CheckStatus(status);
}
private void InternalSetColor(Color value)
{
int status = Gdip.GdipSetPenColor(new HandleRef(this, NativePen),
_color.ToArgb());
Gdip.CheckStatus(status);
_color = value;
}
/// <summary>
/// Gets the style of lines drawn with this <see cref='Pen'/>.
/// </summary>
public PenType PenType
{
get
{
int type;
int status = Gdip.GdipGetPenFillType(new HandleRef(this, NativePen), out type);
Gdip.CheckStatus(status);
return (PenType)type;
}
}
/// <summary>
/// Gets or sets the color of this <see cref='Pen'/>.
/// </summary>
public Color Color
{
get
{
if (_color == Color.Empty)
{
if (PenType != PenType.SolidColor)
{
throw new ArgumentException(SR.GdiplusInvalidParameter);
}
int colorARGB;
int status = Gdip.GdipGetPenColor(new HandleRef(this, NativePen), out colorARGB);
Gdip.CheckStatus(status);
_color = Color.FromArgb(colorARGB);
}
// GDI+ doesn't understand system colors, so we can't use GdipGetPenColor in the general case.
return _color;
}
set
{
if (_immutable)
{
throw new ArgumentException(SR.Format(SR.CantChangeImmutableObjects, nameof(Pen)));
}
if (value != _color)
{
#if FEATURE_SYSTEM_EVENTS
Color oldColor = _color;
#endif
_color = value;
InternalSetColor(value);
#if FEATURE_SYSTEM_EVENTS
// NOTE: We never remove pens from the active list, so if someone is
// changing their pen colors a lot, this could be a problem.
if (value.IsSystemColor && !oldColor.IsSystemColor)
{
SystemColorTracker.Add(this);
}
#endif
}
}
}
/// <summary>
/// Gets or sets the <see cref='Drawing.Brush'/> that determines attributes of this <see cref='Pen'/>.
/// </summary>
public Brush Brush
{
get
{
Brush? brush = null;
switch (PenType)
{
case PenType.SolidColor:
brush = new SolidBrush(GetNativeBrush());
break;
case PenType.HatchFill:
brush = new HatchBrush(GetNativeBrush());
break;
case PenType.TextureFill:
brush = new TextureBrush(GetNativeBrush());
break;
case PenType.PathGradient:
brush = new PathGradientBrush(GetNativeBrush());
break;
case PenType.LinearGradient:
brush = new LinearGradientBrush(GetNativeBrush());
break;
default:
break;
}
return brush!;
}
set
{
if (_immutable)
{
throw new ArgumentException(SR.Format(SR.CantChangeImmutableObjects, nameof(Pen)));
}
if (value == null)
{
throw new ArgumentNullException(nameof(value));
}
int status = Gdip.GdipSetPenBrushFill(new HandleRef(this, NativePen),
new HandleRef(value, value.NativeBrush));
Gdip.CheckStatus(status);
}
}
private IntPtr GetNativeBrush()
{
IntPtr nativeBrush;
int status = Gdip.GdipGetPenBrushFill(new HandleRef(this, NativePen), out nativeBrush);
Gdip.CheckStatus(status);
return nativeBrush;
}
/// <summary>
/// Gets or sets the style used for dashed lines drawn with this <see cref='Pen'/>.
/// </summary>
public DashStyle DashStyle
{
get
{
int dashStyle;
int status = Gdip.GdipGetPenDashStyle(new HandleRef(this, NativePen), out dashStyle);
Gdip.CheckStatus(status);
return (DashStyle)dashStyle;
}
set
{
if (value < DashStyle.Solid || value > DashStyle.Custom)
{
throw new InvalidEnumArgumentException(nameof(value), unchecked((int)value), typeof(DashStyle));
}
if (_immutable)
{
throw new ArgumentException(SR.Format(SR.CantChangeImmutableObjects, nameof(Pen)));
}
int status = Gdip.GdipSetPenDashStyle(new HandleRef(this, NativePen), unchecked((int)value));
Gdip.CheckStatus(status);
// If we just set the pen style to Custom without defining the custom dash pattern,
// make sure that we can return a valid value.
if (value == DashStyle.Custom)
{
EnsureValidDashPattern();
}
if (value != DashStyle.Solid)
{
this._dashStyleWasOrIsNotSolid = true;
}
}
}
/// <summary>
/// This method is called after the user sets the pen's dash style to custom. Here, we make sure that there
/// is a default value set for the custom pattern.
/// </summary>
private void EnsureValidDashPattern()
{
int retval;
int status = Gdip.GdipGetPenDashCount(new HandleRef(this, NativePen), out retval);
Gdip.CheckStatus(status);
if (retval == 0)
{
// Set to a solid pattern.
DashPattern = new float[] { 1 };
}
}
/// <summary>
/// Gets or sets the distance from the start of a line to the beginning of a dash pattern.
/// </summary>
public float DashOffset
{
get
{
var dashOffset = new float[] { 0 };
int status = Gdip.GdipGetPenDashOffset(new HandleRef(this, NativePen), dashOffset);
Gdip.CheckStatus(status);
return dashOffset[0];
}
set
{
if (_immutable)
{
throw new ArgumentException(SR.Format(SR.CantChangeImmutableObjects, nameof(Pen)));
}
int status = Gdip.GdipSetPenDashOffset(new HandleRef(this, NativePen), value);
Gdip.CheckStatus(status);
}
}
/// <summary>
/// Gets or sets an array of custom dashes and spaces. The dashes are made up of line segments.
/// </summary>
public float[] DashPattern
{
get
{
int status = Gdip.GdipGetPenDashCount(new HandleRef(this, NativePen), out int count);
Gdip.CheckStatus(status);
float[] pattern;
// don't call GdipGetPenDashArray with a 0 count
if (count > 0)
{
pattern = new float[count];
status = Gdip.GdipGetPenDashArray(new HandleRef(this, NativePen), pattern, count);
Gdip.CheckStatus(status);
}
else if (DashStyle == DashStyle.Solid && !this._dashStyleWasOrIsNotSolid)
{
// Most likely we're replicating an existing System.Drawing bug here, it doesn't make much sense to
// ask for a dash pattern when using a solid dash.
throw new OutOfMemoryException();
}
else if (DashStyle == DashStyle.Solid)
{
pattern = Array.Empty<float>();
}
else
{
// special case (not handled inside GDI+)
pattern = new float[1];
pattern[0] = 1.0f;
}
return pattern;
}
set
{
if (_immutable)
{
throw new ArgumentException(SR.Format(SR.CantChangeImmutableObjects, nameof(Pen)));
}
if (value == null || value.Length == 0)
{
throw new ArgumentException(SR.InvalidDashPattern);
}
foreach (float val in value)
{
if (val <= 0)
{
throw new ArgumentException(SR.InvalidDashPattern);
}
}
int count = value.Length;
IntPtr buf = Marshal.AllocHGlobal(checked(4 * count));
try
{
Marshal.Copy(value, 0, buf, count);
int status = Gdip.GdipSetPenDashArray(new HandleRef(this, NativePen), new HandleRef(buf, buf), count);
Gdip.CheckStatus(status);
}
finally
{
Marshal.FreeHGlobal(buf);
}
}
}
/// <summary>
/// Gets or sets an array of custom dashes and spaces. The dashes are made up of line segments.
/// </summary>
public float[] CompoundArray
{
get
{
int count;
int status = Gdip.GdipGetPenCompoundCount(new HandleRef(this, NativePen), out count);
Gdip.CheckStatus(status);
var array = new float[count];
status = Gdip.GdipGetPenCompoundArray(new HandleRef(this, NativePen), array, count);
Gdip.CheckStatus(status);
return array;
}
set
{
if (_immutable)
{
throw new ArgumentException(SR.Format(SR.CantChangeImmutableObjects, nameof(Pen)));
}
if (value.Length <= 1)
{
throw new ArgumentException(SR.GdiplusInvalidParameter);
}
foreach (float val in value)
{
if (val < 0 || val > 1)
{
throw new ArgumentException(SR.GdiplusInvalidParameter);
}
}
int status = Gdip.GdipSetPenCompoundArray(new HandleRef(this, NativePen), value, value.Length);
Gdip.CheckStatus(status);
}
}
#if FEATURE_SYSTEM_EVENTS
void ISystemColorTracker.OnSystemColorChanged()
{
if (NativePen != IntPtr.Zero)
{
InternalSetColor(_color);
}
}
#endif
}
}
| -1 |
dotnet/runtime | 66,250 | use the name of the header when throwing exception for invalid chars | Fixes #65136 | pedrobsaila | 2022-03-05T20:18:11Z | 2022-03-08T16:15:18Z | 8fe2d2b0bbb7bec841c97372528531c071b29c16 | fa5dcda23c681031b5d2bd68482702baef80b836 | use the name of the header when throwing exception for invalid chars. Fixes #65136 | ./src/tests/JIT/Regression/CLR-x86-JIT/v2.1/b610562/b610562.cs | // Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
//
//This bug exists in whidbey rtm (2.0.50727.42) and has been fixed in Orcas and PUCLR
//When the test fails, the function call test(ref sometype) causes a
//bad image format exception to be thrown. This is due to a problem when a generic
//static member is passed by ref to an interlocked method.
using System;
namespace VTest
{
class TestMain : refTest<TestMain>
{
static int Main(string[] args)
{
int ret = 100;
try
{
new TestMain();
Console.WriteLine("PASS");
}
catch (System.Exception e)
{
Console.WriteLine("FAIL: exception thrown: " + e.Message);
ret = 666;
}
return ret;
}
}
class refTest<type> where type : refTest<type>
{
public refTest()
{
test(ref sometype);
}
public void test(ref type r)
{
System.Threading.Interlocked.CompareExchange(ref r, this as type, null);
}
public static type sometype;
}
}
| // Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
//
//This bug exists in whidbey rtm (2.0.50727.42) and has been fixed in Orcas and PUCLR
//When the test fails, the function call test(ref sometype) causes a
//bad image format exception to be thrown. This is due to a problem when a generic
//static member is passed by ref to an interlocked method.
using System;
namespace VTest
{
class TestMain : refTest<TestMain>
{
static int Main(string[] args)
{
int ret = 100;
try
{
new TestMain();
Console.WriteLine("PASS");
}
catch (System.Exception e)
{
Console.WriteLine("FAIL: exception thrown: " + e.Message);
ret = 666;
}
return ret;
}
}
class refTest<type> where type : refTest<type>
{
public refTest()
{
test(ref sometype);
}
public void test(ref type r)
{
System.Threading.Interlocked.CompareExchange(ref r, this as type, null);
}
public static type sometype;
}
}
| -1 |
dotnet/runtime | 66,250 | use the name of the header when throwing exception for invalid chars | Fixes #65136 | pedrobsaila | 2022-03-05T20:18:11Z | 2022-03-08T16:15:18Z | 8fe2d2b0bbb7bec841c97372528531c071b29c16 | fa5dcda23c681031b5d2bd68482702baef80b836 | use the name of the header when throwing exception for invalid chars. Fixes #65136 | ./src/installer/tests/HostActivation.Tests/FrameworkResolution/RollForwardMultipleFrameworks.cs | // Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
using Microsoft.DotNet.Cli.Build;
using Microsoft.DotNet.Cli.Build.Framework;
using System;
using Xunit;
namespace Microsoft.DotNet.CoreSetup.Test.HostActivation.FrameworkResolution
{
public class RollForwardMultipleFrameworks :
FrameworkResolutionBase,
IClassFixture<RollForwardMultipleFrameworks.SharedTestState>
{
private const string MiddleWare = "MiddleWare";
private const string AnotherMiddleWare = "AnotherMiddleWare";
private const string HighWare = "HighWare";
private SharedTestState SharedState { get; }
public RollForwardMultipleFrameworks(SharedTestState sharedState)
{
SharedState = sharedState;
}
public class SharedTestState : SharedTestStateBase
{
public TestApp FrameworkReferenceApp { get; }
public DotNetCli DotNetWithMultipleFrameworks { get; }
public SharedTestState()
{
DotNetWithMultipleFrameworks = DotNet("WithOneFramework")
.AddMicrosoftNETCoreAppFrameworkMockHostPolicy("5.1.1")
.AddMicrosoftNETCoreAppFrameworkMockHostPolicy("5.1.3")
.AddMicrosoftNETCoreAppFrameworkMockHostPolicy("5.4.1")
.AddMicrosoftNETCoreAppFrameworkMockHostPolicy("5.6.0")
.AddMicrosoftNETCoreAppFrameworkMockHostPolicy("6.0.0")
.AddMicrosoftNETCoreAppFrameworkMockHostPolicy("6.1.0")
.AddMicrosoftNETCoreAppFrameworkMockHostPolicy("6.1.1-preview.2")
.AddMicrosoftNETCoreAppFrameworkMockHostPolicy("6.1.1-preview.3")
.AddMicrosoftNETCoreAppFrameworkMockHostPolicy("6.2.1")
.AddFramework(MiddleWare, "2.1.2", runtimeConfig =>
runtimeConfig.WithFramework(MicrosoftNETCoreApp, "5.1.3"))
.AddFramework(AnotherMiddleWare, "3.0.0", runtimeConfig =>
runtimeConfig.WithFramework(MicrosoftNETCoreApp, "5.1.3"))
.AddFramework(HighWare, "7.3.1", runtimeConfig =>
runtimeConfig
.WithFramework(MicrosoftNETCoreApp, "5.1.3")
.WithFramework(MiddleWare, "2.1.2"))
.Build();
FrameworkReferenceApp = CreateFrameworkReferenceApp();
}
}
// Verify that inner framework reference (<fxRefVersion>, <rollForward>)
// is correctly reconciled with app's framework reference 5.1.1 (defaults = RollForward:Minor). App fx reference is higher.
[Theory] // fxRefVersion rollForward resolvedFramework
[InlineData("5.0.0", Constants.RollForwardSetting.Disable, ResolvedFramework.FailedToReconcile)]
[InlineData("5.1.1", Constants.RollForwardSetting.Disable, "5.1.1")]
[InlineData("5.0.0", Constants.RollForwardSetting.LatestPatch, ResolvedFramework.FailedToReconcile)]
[InlineData("5.1.0", Constants.RollForwardSetting.LatestPatch, "5.1.3")]
[InlineData("5.1.1", Constants.RollForwardSetting.LatestPatch, "5.1.3")]
[InlineData("5.0.0", null, "5.1.3")]
[InlineData("5.1.1", null, "5.1.3")]
[InlineData("5.0.0", Constants.RollForwardSetting.Minor, "5.1.3")]
[InlineData("5.1.1", Constants.RollForwardSetting.Minor, "5.1.3")]
[InlineData("5.0.0", Constants.RollForwardSetting.LatestMinor, "5.6.0")]
[InlineData("5.1.1", Constants.RollForwardSetting.LatestMinor, "5.6.0")]
[InlineData("1.0.0", Constants.RollForwardSetting.Minor, ResolvedFramework.FailedToReconcile)]
[InlineData("1.0.0", Constants.RollForwardSetting.Major, "5.1.3")] // The app reference which is Minor wins
[InlineData("1.0.0", Constants.RollForwardSetting.LatestMajor, "5.6.0")] // The app reference which is Minor wins
public void ReconcileFrameworkReferences_InnerFrameworkReference_ToHigher(
string versionReference,
string rollForward,
string resolvedFramework)
{
RunTest(
runtimeConfig => runtimeConfig
.WithFramework(MiddleWare, "2.1.0")
.WithFramework(MicrosoftNETCoreApp, "5.1.1"),
dotnetCustomizer => dotnetCustomizer.Framework(MiddleWare).RuntimeConfig(runtimeConfig =>
runtimeConfig.GetFramework(MicrosoftNETCoreApp)
.WithRollForward(rollForward)
.Version = versionReference))
.ShouldHaveResolvedFrameworkOrFailedToReconcileFrameworkReference(
MicrosoftNETCoreApp, resolvedFramework, versionReference, "5.1.1");
}
// Verify that inner framework reference (<fxRefVersion>, <rollForward>)
// is correctly reconciled with app's framework reference 5.1.1 (defaults = RollForward:Minor). App fx reference is higher.
// In this case the direct reference from app is first, so the framework reference from app
// is actually resolved against the disk - and the resolved framework is than compared to
// the inner framework reference (potentially causing re-resolution).
[Theory] // fxRefVersion rollForward resolvedFramework
[InlineData("5.0.0", Constants.RollForwardSetting.Disable, ResolvedFramework.FailedToReconcile)]
[InlineData("5.1.1", Constants.RollForwardSetting.Disable, "5.1.1")]
[InlineData("5.0.0", Constants.RollForwardSetting.LatestPatch, ResolvedFramework.FailedToReconcile)]
[InlineData("5.1.0", Constants.RollForwardSetting.LatestPatch, "5.1.3")]
[InlineData("5.1.1", Constants.RollForwardSetting.LatestPatch, "5.1.3")]
[InlineData("5.0.0", null, "5.1.3")]
[InlineData("5.1.1", null, "5.1.3")]
[InlineData("5.0.0", Constants.RollForwardSetting.Minor, "5.1.3")]
[InlineData("5.1.1", Constants.RollForwardSetting.Minor, "5.1.3")]
[InlineData("5.0.0", Constants.RollForwardSetting.LatestMinor, "5.6.0")]
[InlineData("5.1.1", Constants.RollForwardSetting.LatestMinor, "5.6.0")]
[InlineData("1.0.0", Constants.RollForwardSetting.Minor, ResolvedFramework.FailedToReconcile)]
[InlineData("1.0.0", Constants.RollForwardSetting.Major, "5.1.3")] // The app reference which is Minor wins
[InlineData("1.0.0", Constants.RollForwardSetting.LatestMajor, "5.6.0")] // The app reference which is Minor wins
public void ReconcileFrameworkReferences_InnerFrameworkReference_ToHigher_HardResolve(
string versionReference,
string rollForward,
string resolvedFramework)
{
RunTest(
runtimeConfig => runtimeConfig
.WithFramework(MicrosoftNETCoreApp, "5.1.1")
.WithFramework(MiddleWare, "2.1.0"),
dotnetCustomizer => dotnetCustomizer.Framework(MiddleWare).RuntimeConfig(runtimeConfig =>
runtimeConfig.GetFramework(MicrosoftNETCoreApp)
.WithRollForward(rollForward)
.Version = versionReference))
.ShouldHaveResolvedFrameworkOrFailedToReconcileFrameworkReference(
MicrosoftNETCoreApp, resolvedFramework, versionReference, "5.1.1");
}
// Verify that inner framework reference (<fxRefVersion>, <rollForward>)
// is correctly reconciled with app's framework reference 5.1.1 (defaults = RollForward:Minor). App fx reference is lower.
// Also validates that since all relevant available versions are release,
// the DOTNET_ROLL_FORWARD_TO_PRERELEASE has no effect on the result.
[Theory] // fxRefVersion rollForward rollForwadToPreRelease resolvedFramework
[InlineData("5.1.3", Constants.RollForwardSetting.Disable, false, "5.1.3")]
[InlineData("5.4.0", null, false, "5.4.1")]
[InlineData("5.4.0", Constants.RollForwardSetting.Minor, false, "5.4.1")]
[InlineData("5.4.0", Constants.RollForwardSetting.Minor, true, "5.4.1")]
[InlineData("5.4.0", Constants.RollForwardSetting.LatestMinor, false, "5.6.0")]
[InlineData("5.4.0", Constants.RollForwardSetting.LatestMinor, true, "5.6.0")]
[InlineData("5.4.0", Constants.RollForwardSetting.Major, false, "5.4.1")] // The app's settings (Minor) wins, so effective reference is "5.4.0 minor"
[InlineData("5.4.0", Constants.RollForwardSetting.Major, true, "5.4.1")]
[InlineData("5.4.0", Constants.RollForwardSetting.LatestMajor, false, "5.6.0")] // The app's settings (Minor) wins, so effective reference is "5.4.0 minor highest"
[InlineData("5.4.0", Constants.RollForwardSetting.LatestMajor, true, "5.6.0")]
[InlineData("5.4.1", Constants.RollForwardSetting.Disable, false, "5.4.1")]
[InlineData("5.7.0", Constants.RollForwardSetting.Minor, false, ResolvedFramework.NotFound)]
[InlineData("5.7.0", Constants.RollForwardSetting.Minor, true, ResolvedFramework.NotFound)]
[InlineData("5.7.0", Constants.RollForwardSetting.LatestMinor, false, ResolvedFramework.NotFound)]
[InlineData("5.7.0", Constants.RollForwardSetting.Major, false, ResolvedFramework.NotFound)]
[InlineData("5.7.0", Constants.RollForwardSetting.LatestMajor, false, ResolvedFramework.NotFound)]
[InlineData("6.0.0", Constants.RollForwardSetting.Minor, false, ResolvedFramework.FailedToReconcile)]
[InlineData("6.0.0", Constants.RollForwardSetting.Minor, true, ResolvedFramework.FailedToReconcile)]
[InlineData("6.0.0", Constants.RollForwardSetting.Major, false, ResolvedFramework.FailedToReconcile)]
[InlineData("6.0.0", Constants.RollForwardSetting.LatestMajor, false, ResolvedFramework.FailedToReconcile)]
public void ReconcileFrameworkReferences_InnerFrameworkReference_ToLower(
string versionReference,
string rollForward,
bool rollForwardToPreRelease,
string resolvedFramework)
{
RunTest(
runtimeConfig => runtimeConfig
.WithFramework(MiddleWare, "2.1.0")
.WithFramework(MicrosoftNETCoreApp, "5.1.1"),
dotnetCustomizer => dotnetCustomizer.Framework(MiddleWare).RuntimeConfig(runtimeConfig =>
runtimeConfig.GetFramework(MicrosoftNETCoreApp)
.WithRollForward(rollForward)
.Version = versionReference),
rollForwardToPreRelease)
.ShouldHaveResolvedFrameworkOrFail(
MicrosoftNETCoreApp, resolvedFramework, "5.1.1", versionReference);
}
// Verify that inner framework reference (<fxRefVersion>, <rollForward>)
// is correctly reconciled with app's framework reference 5.1.1 (defaults = RollForward:Minor). App fx reference is lower.
// In this case the direct reference from app is first, so the framework reference from app
// is actually resolved against the disk - and the resolved framework is than compared to
// the inner framework reference (potentially causing re-resolution).
[Theory] // fxRefVersion rollForward resolvedFramework
[InlineData("5.1.3", Constants.RollForwardSetting.Disable, "5.1.3")]
[InlineData("5.4.0", null, "5.4.1")]
[InlineData("5.4.0", Constants.RollForwardSetting.Minor, "5.4.1")]
[InlineData("5.4.0", Constants.RollForwardSetting.LatestMinor, "5.6.0")]
[InlineData("5.4.0", Constants.RollForwardSetting.Major, "5.4.1")]
[InlineData("5.4.0", Constants.RollForwardSetting.LatestMajor, "5.6.0")]
[InlineData("5.4.1", Constants.RollForwardSetting.Disable, "5.4.1")]
[InlineData("5.7.0", Constants.RollForwardSetting.Minor, ResolvedFramework.NotFound)]
[InlineData("5.7.0", Constants.RollForwardSetting.LatestMinor, ResolvedFramework.NotFound)]
[InlineData("5.7.0", Constants.RollForwardSetting.Major, ResolvedFramework.NotFound)]
[InlineData("5.7.0", Constants.RollForwardSetting.LatestMajor, ResolvedFramework.NotFound)]
[InlineData("6.0.0", Constants.RollForwardSetting.Minor, ResolvedFramework.FailedToReconcile)]
[InlineData("6.0.0", Constants.RollForwardSetting.Major, ResolvedFramework.FailedToReconcile)]
public void ReconcileFrameworkReferences_InnerFrameworkReference_ToLower_HardResolve(
string versionReference,
string rollForward,
string resolvedFramework)
{
RunTest(
runtimeConfig => runtimeConfig
.WithFramework(MicrosoftNETCoreApp, "5.1.1")
.WithFramework(MiddleWare, "2.1.0"),
dotnetCustomizer => dotnetCustomizer.Framework(MiddleWare).RuntimeConfig(runtimeConfig =>
runtimeConfig.GetFramework(MicrosoftNETCoreApp)
.WithRollForward(rollForward)
.Version = versionReference))
.ShouldHaveResolvedFrameworkOrFail(
MicrosoftNETCoreApp, resolvedFramework, "5.1.1", versionReference);
}
// Verify that inner framework reference (<fxRefVersion>, <rollForward>)
// is correctly reconciled with app's framework reference 6.1.1-preview.0 (defaults = RollForward:Minor).
// Also validates the effect of DOTNET_ROLL_FORWARD_TO_PRERELEASE on the result.
[Theory] // fxRefVersion rollForward rollForwadToPreRelease resolvedFramework
[InlineData("6.0.0-preview.1", null, false, "6.1.1-preview.2")]
[InlineData("6.0.0", null, false, "6.2.1")]
[InlineData("6.0.0", Constants.RollForwardSetting.LatestPatch, false, ResolvedFramework.FailedToReconcile)]
[InlineData("6.0.0-preview.1", Constants.RollForwardSetting.LatestPatch, false, ResolvedFramework.FailedToReconcile)]
[InlineData("6.0.0-preview.1", Constants.RollForwardSetting.Minor, false, "6.1.1-preview.2")]
[InlineData("6.0.0", Constants.RollForwardSetting.Minor, false, "6.2.1")]
[InlineData("6.0.1-preview.0", Constants.RollForwardSetting.LatestPatch, false, ResolvedFramework.FailedToReconcile)]
[InlineData("6.1.0-preview.0", null, false, "6.1.1-preview.2")]
[InlineData("6.1.0-preview.0", null, true, "6.1.1-preview.2")]
[InlineData("6.1.0", null, false, "6.2.1")]
[InlineData("6.1.0", null, true, "6.1.1-preview.2")]
[InlineData("6.1.1-preview.0", null, false, "6.1.1-preview.2")]
[InlineData("6.1.1-preview.0", null, true, "6.1.1-preview.2")]
[InlineData("6.1.1-preview.0", Constants.RollForwardSetting.LatestPatch, false, "6.1.1-preview.2")]
[InlineData("6.1.1-preview.0", Constants.RollForwardSetting.Disable, false, ResolvedFramework.NotFound)]
[InlineData("6.1.1-preview.2", Constants.RollForwardSetting.Disable, false, "6.1.1-preview.2")]
[InlineData("6.1.1-preview.2", Constants.RollForwardSetting.Disable, true, "6.1.1-preview.2")]
[InlineData("6.1.1-preview.2", Constants.RollForwardSetting.LatestPatch, false, "6.1.1-preview.2")]
[InlineData("6.1.1-preview.2", Constants.RollForwardSetting.LatestPatch, true, "6.1.1-preview.2")]
[InlineData("6.1.1-preview.2", null, false, "6.1.1-preview.2")]
[InlineData("6.1.1-preview.2", null, true, "6.1.1-preview.2")]
[InlineData("6.1.1-preview.2", Constants.RollForwardSetting.Minor, false, "6.1.1-preview.2")]
[InlineData("6.1.1-preview.2", Constants.RollForwardSetting.Minor, true, "6.1.1-preview.2")]
[InlineData("6.1.1-preview.2", Constants.RollForwardSetting.LatestMinor, false, "6.2.1")]
[InlineData("6.1.1-preview.2", Constants.RollForwardSetting.LatestMinor, true, "6.2.1")]
[InlineData("6.1.1-preview.2", Constants.RollForwardSetting.Major, false, "6.1.1-preview.2")]
[InlineData("6.1.1-preview.2", Constants.RollForwardSetting.Major, true, "6.1.1-preview.2")]
[InlineData("6.1.1-preview.2", Constants.RollForwardSetting.LatestMajor, false, "6.2.1")]
[InlineData("6.1.1-preview.2", Constants.RollForwardSetting.LatestMajor, true, "6.2.1")]
[InlineData("6.2.1-preview.1", Constants.RollForwardSetting.Disable, false, ResolvedFramework.NotFound)]
[InlineData("6.2.1-preview.1", Constants.RollForwardSetting.LatestPatch, false, "6.2.1")]
[InlineData("6.2.1-preview.1", null, false, "6.2.1")]
[InlineData("6.2.1-preview.1", Constants.RollForwardSetting.Minor, false, "6.2.1")]
[InlineData("6.2.1-preview.1", Constants.RollForwardSetting.LatestMinor, false, "6.2.1")]
[InlineData("6.2.1-preview.1", Constants.RollForwardSetting.Major, false, "6.2.1")]
[InlineData("6.2.1-preview.1", Constants.RollForwardSetting.LatestMajor, false, "6.2.1")]
public void ReconcileFrameworkReferences_InnerFrameworkReference_PreRelease(
string versionReference,
string rollForward,
bool rollForwardToPreRelease,
string resolvedFramework)
{
RunTest(
runtimeConfig => runtimeConfig
.WithFramework(MicrosoftNETCoreApp, "6.1.1-preview.0")
.WithFramework(MiddleWare, "2.1.0"),
dotnetCustomizer => dotnetCustomizer.Framework(MiddleWare).RuntimeConfig(runtimeConfig =>
runtimeConfig.GetFramework(MicrosoftNETCoreApp)
.WithRollForward(rollForward)
.Version = versionReference),
rollForwardToPreRelease).ShouldHaveResolvedFrameworkOrFail(
MicrosoftNETCoreApp, resolvedFramework, versionReference, "6.1.1-preview.0");
}
// Verify that inner framework reference (<fxRefVersion>, <rollForward>)
// is correctly reconciled with app's framework reference 6.1.0 (defaults = RollForward:Minor).
// Also validates the effect of DOTNET_ROLL_FORWARD_TO_PRERELEASE on the result.
[Theory] // fxRefVersion rollForward rollForwadToPreRelease resolvedFramework
[InlineData("6.0.0", null, false, "6.1.0")]
[InlineData("6.0.0", null, true, "6.1.1-preview.3")]
[InlineData("6.0.0", Constants.RollForwardSetting.LatestPatch, false, ResolvedFramework.FailedToReconcile)]
[InlineData("6.0.0", Constants.RollForwardSetting.Minor, false, "6.1.0")]
[InlineData("6.0.0", Constants.RollForwardSetting.Minor, true, "6.1.1-preview.3")]
[InlineData("6.0.1-preview.0", Constants.RollForwardSetting.LatestPatch, false, ResolvedFramework.FailedToReconcile)]
[InlineData("6.1.0", null, false, "6.1.0")]
[InlineData("6.1.0", null, true, "6.1.1-preview.3")]
[InlineData("6.1.1-preview.0", null, false, "6.2.1")]
[InlineData("6.1.1-preview.0", null, true, "6.1.1-preview.2")]
[InlineData("6.1.1-preview.0", Constants.RollForwardSetting.Disable, false, ResolvedFramework.NotFound)]
[InlineData("6.1.1-preview.0", Constants.RollForwardSetting.LatestPatch, false, "6.1.1-preview.2")]
[InlineData("6.1.1-preview.2", Constants.RollForwardSetting.Disable, false, "6.1.1-preview.2")]
[InlineData("6.1.1-preview.2", Constants.RollForwardSetting.Disable, true, "6.1.1-preview.2")]
[InlineData("6.1.1-preview.2", Constants.RollForwardSetting.LatestPatch, false, "6.1.1-preview.2")]
[InlineData("6.1.1-preview.2", Constants.RollForwardSetting.LatestPatch, true, "6.1.1-preview.2")]
[InlineData("6.1.1-preview.2", null, false, "6.2.1")]
[InlineData("6.1.1-preview.2", null, true, "6.1.1-preview.2")]
[InlineData("6.1.1-preview.2", Constants.RollForwardSetting.Minor, false, "6.2.1")]
[InlineData("6.1.1-preview.2", Constants.RollForwardSetting.Minor, true, "6.1.1-preview.2")]
[InlineData("6.1.1-preview.2", Constants.RollForwardSetting.LatestMinor, false, "6.2.1")]
[InlineData("6.1.1-preview.2", Constants.RollForwardSetting.LatestMinor, true, "6.2.1")]
[InlineData("6.1.1-preview.2", Constants.RollForwardSetting.Major, false, "6.2.1")]
[InlineData("6.1.1-preview.2", Constants.RollForwardSetting.Major, true, "6.1.1-preview.2")]
[InlineData("6.1.1-preview.2", Constants.RollForwardSetting.LatestMajor, false, "6.2.1")]
[InlineData("6.1.1-preview.2", Constants.RollForwardSetting.LatestMajor, true, "6.2.1")]
[InlineData("6.2.1-preview.1", Constants.RollForwardSetting.Disable, false, ResolvedFramework.NotFound)]
[InlineData("6.2.1-preview.1", Constants.RollForwardSetting.LatestPatch, false, "6.2.1")]
[InlineData("6.2.1-preview.1", null, false, "6.2.1")]
[InlineData("6.2.1-preview.1", Constants.RollForwardSetting.Minor, false, "6.2.1")]
[InlineData("6.2.1-preview.1", Constants.RollForwardSetting.LatestMinor, false, "6.2.1")]
[InlineData("6.2.1-preview.1", Constants.RollForwardSetting.Major, false, "6.2.1")]
[InlineData("6.2.1-preview.1", Constants.RollForwardSetting.LatestMajor, false, "6.2.1")]
public void ReconcileFrameworkReferences_InnerFrameworkReference_Release(
string versionReference,
string rollForward,
bool rollForwardToPreRelease,
string resolvedFramework)
{
RunTest(
runtimeConfig => runtimeConfig
.WithFramework(MicrosoftNETCoreApp, "6.1.0")
.WithFramework(MiddleWare, "2.1.0"),
dotnetCustomizer => dotnetCustomizer.Framework(MiddleWare).RuntimeConfig(runtimeConfig =>
runtimeConfig.GetFramework(MicrosoftNETCoreApp)
.WithRollForward(rollForward)
.Version = versionReference),
rollForwardToPreRelease)
.ShouldHaveResolvedFrameworkOrFail(
MicrosoftNETCoreApp, resolvedFramework, versionReference, "6.1.0");
}
// Verify that inner framework reference 5.1.1 (defaults = RollForward:Minor)
// is correctly reconciled with app's framework reference (<fxRefVersion>, <rollForward>).
// App fx reference is lower.
[Theory] // fxRefVersion rollForward resolvedFramework
[InlineData("5.0.0", Constants.RollForwardSetting.Disable, ResolvedFramework.FailedToReconcile)]
[InlineData("5.1.1", Constants.RollForwardSetting.Disable, "5.1.1")]
[InlineData("5.1.3", Constants.RollForwardSetting.Disable, "5.1.3")]
[InlineData("5.0.0", Constants.RollForwardSetting.LatestPatch, ResolvedFramework.FailedToReconcile)]
[InlineData("5.1.0", Constants.RollForwardSetting.LatestPatch, "5.1.3")]
[InlineData("5.1.1", Constants.RollForwardSetting.LatestPatch, "5.1.3")]
[InlineData("5.0.0", null, "5.1.3")]
[InlineData("5.1.1", null, "5.1.3")]
[InlineData("5.0.0", Constants.RollForwardSetting.Minor, "5.1.3")]
[InlineData("5.1.1", Constants.RollForwardSetting.Minor, "5.1.3")]
[InlineData("5.0.0", Constants.RollForwardSetting.LatestMinor, "5.6.0")]
[InlineData("5.1.1", Constants.RollForwardSetting.LatestMinor, "5.6.0")]
[InlineData("1.0.0", Constants.RollForwardSetting.Minor, ResolvedFramework.FailedToReconcile)]
[InlineData("1.0.0", Constants.RollForwardSetting.Major, "5.1.3")]
[InlineData("5.1.1", Constants.RollForwardSetting.Major, "5.1.3")]
[InlineData("1.0.0", Constants.RollForwardSetting.LatestMajor, "5.6.0")]
[InlineData("5.1.1", Constants.RollForwardSetting.LatestMajor, "5.6.0")]
public void ReconcileFrameworkReferences_AppFrameworkReference_ToLower(
string versionReference,
string rollForward,
string resolvedFramework)
{
RunTest(
runtimeConfig => runtimeConfig
.WithFramework(MiddleWare, "2.1.0")
.WithFramework(new RuntimeConfig.Framework(MicrosoftNETCoreApp, versionReference)
.WithRollForward(rollForward)),
dotnetCustomizer => dotnetCustomizer.Framework(MiddleWare).RuntimeConfig(runtimeConfig =>
runtimeConfig.GetFramework(MicrosoftNETCoreApp)
.Version = "5.1.1"))
.ShouldHaveResolvedFrameworkOrFailedToReconcileFrameworkReference(
MicrosoftNETCoreApp, resolvedFramework, versionReference, "5.1.1");
}
// Verify that inner framework reference 5.1.1 (defaults = RollForward:Minor)
// is correctly reconciled with app's framework reference (<fxRefVersion>, <rollForward>).
// App fx reference is lower.
// In this case the direct reference from app is first, so the framework reference from app
// is actually resolved against the disk - and the resolved framework is than compared to
// the inner framework reference (potentially causing re-resolution).
[Theory] // fxRefVersion rollForward resolvedFramework
[InlineData("5.0.0", Constants.RollForwardSetting.Disable, ResolvedFramework.NotFound)]
[InlineData("5.1.1", Constants.RollForwardSetting.Disable, "5.1.1")]
[InlineData("5.1.3", Constants.RollForwardSetting.Disable, "5.1.3")]
[InlineData("5.0.0", Constants.RollForwardSetting.LatestPatch, ResolvedFramework.NotFound)]
[InlineData("5.1.0", Constants.RollForwardSetting.LatestPatch, "5.1.3")]
[InlineData("5.1.1", Constants.RollForwardSetting.LatestPatch, "5.1.3")]
[InlineData("5.0.0", null, "5.1.3")]
[InlineData("5.1.1", null, "5.1.3")]
[InlineData("5.0.0", Constants.RollForwardSetting.Minor, "5.1.3")]
[InlineData("5.1.1", Constants.RollForwardSetting.Minor, "5.1.3")]
[InlineData("5.0.0", Constants.RollForwardSetting.LatestMinor, "5.6.0")]
[InlineData("5.1.1", Constants.RollForwardSetting.LatestMinor, "5.6.0")]
[InlineData("1.0.0", Constants.RollForwardSetting.Minor, ResolvedFramework.NotFound)]
[InlineData("1.0.0", Constants.RollForwardSetting.Major, "5.1.3")]
[InlineData("1.0.0", Constants.RollForwardSetting.LatestMajor, "5.6.0")]
public void ReconcileFrameworkReferences_AppFrameworkReference_ToLower_HardResolve(
string versionReference,
string rollForward,
string resolvedFramework)
{
RunTest(
runtimeConfig => runtimeConfig
.WithFramework(new RuntimeConfig.Framework(MicrosoftNETCoreApp, versionReference)
.WithRollForward(rollForward))
.WithFramework(MiddleWare, "2.1.0"),
dotnetCustomizer => dotnetCustomizer.Framework(MiddleWare).RuntimeConfig(runtimeConfig =>
runtimeConfig.GetFramework(MicrosoftNETCoreApp)
.Version = "5.1.1"))
// Note that in this case (since the app reference is first) if the app's framework reference
// can't be resolved against the available frameworks, the error is actually a regular
// "can't find framework" and not a framework reconcile event.
.ShouldHaveResolvedFrameworkOrFailToFind(MicrosoftNETCoreApp, resolvedFramework);
}
// Verify that inner framework reference 5.1.1 (defaults = RollForward:Minor)
// is correctly reconciled with app's framework reference (<fxRefVersion>, <rollForward>).
// App fx reference is higher.
[Theory] // fxRefVersion rollForward resolvedFramework
[InlineData("5.4.0", null, "5.4.1")]
[InlineData("5.4.0", Constants.RollForwardSetting.Minor, "5.4.1")]
[InlineData("5.4.0", Constants.RollForwardSetting.LatestMinor, "5.6.0")]
[InlineData("5.4.0", Constants.RollForwardSetting.Major, "5.4.1")]
[InlineData("5.4.0", Constants.RollForwardSetting.LatestMajor, "5.6.0")]
[InlineData("5.4.1", Constants.RollForwardSetting.Disable, "5.4.1")]
[InlineData("6.0.0", Constants.RollForwardSetting.Minor, ResolvedFramework.FailedToReconcile)]
[InlineData("6.0.0", Constants.RollForwardSetting.Major, ResolvedFramework.FailedToReconcile)]
public void ReconcileFrameworkReferences_AppFrameworkReference_ToHigher(
string versionReference,
string rollForward,
string resolvedFramework)
{
RunTest(
runtimeConfig => runtimeConfig
.WithFramework(MiddleWare, "2.1.0")
.WithFramework(new RuntimeConfig.Framework(MicrosoftNETCoreApp, versionReference)
.WithRollForward(rollForward)),
dotnetCustomizer => dotnetCustomizer.Framework(MiddleWare).RuntimeConfig(runtimeConfig =>
runtimeConfig.GetFramework(MicrosoftNETCoreApp)
.Version = "5.1.1"))
.ShouldHaveResolvedFrameworkOrFailedToReconcileFrameworkReference(
MicrosoftNETCoreApp, resolvedFramework, "5.1.1", versionReference);
}
// Verify that inner framework reference 5.1.1 (defaults = RollForward:Minor)
// is correctly reconciled with app's framework reference (<fxRefVersion>, <rollForward>).
// App fx reference is higher.
// In this case the direct reference from app is first, so the framework reference from app
// is actually resolved against the disk - and the resolved framework is than compared to
// the inner framework reference (potentially causing re-resolution).
[Theory] // fxRefVersion rollForward resolvedFramework
[InlineData("5.4.0", null, "5.4.1")]
[InlineData("5.4.0", Constants.RollForwardSetting.Minor, "5.4.1")]
[InlineData("5.4.0", Constants.RollForwardSetting.LatestMinor, "5.6.0")]
[InlineData("5.4.0", Constants.RollForwardSetting.Major, "5.4.1")]
[InlineData("5.4.0", Constants.RollForwardSetting.LatestMajor, "5.6.0")]
[InlineData("5.4.1", Constants.RollForwardSetting.Disable, "5.4.1")]
[InlineData("6.0.0", Constants.RollForwardSetting.Minor, ResolvedFramework.FailedToReconcile)]
[InlineData("6.0.0", Constants.RollForwardSetting.Major, ResolvedFramework.FailedToReconcile)]
public void ReconcileFrameworkReferences_AppFrameworkReference_ToHigher_HardResolve(
string versionReference,
string rollForward,
string resolvedFramework)
{
RunTest(
runtimeConfig => runtimeConfig
.WithFramework(new RuntimeConfig.Framework(MicrosoftNETCoreApp, versionReference)
.WithRollForward(rollForward))
.WithFramework(MiddleWare, "2.1.0"),
dotnetCustomizer => dotnetCustomizer.Framework(MiddleWare).RuntimeConfig(runtimeConfig =>
runtimeConfig.GetFramework(MicrosoftNETCoreApp)
.Version = "5.1.1"))
.ShouldHaveResolvedFrameworkOrFailedToReconcileFrameworkReference(
MicrosoftNETCoreApp, resolvedFramework, "5.1.1", versionReference);
}
// Verify that inner framework reference 5.1.1 (defaults = RollForward:Minor)
// is correctly reconciled with another framework's framework reference (<fxRefVersion>, <rollForward>).
// The higher framework has fx reference with higher version.
[Theory] // fxRefVersion rollForward resolvedFramework
[InlineData("5.0.0", Constants.RollForwardSetting.Disable, ResolvedFramework.FailedToReconcile)]
[InlineData("5.1.1", Constants.RollForwardSetting.Disable, "5.1.1")]
[InlineData("5.0.0", Constants.RollForwardSetting.LatestPatch, ResolvedFramework.FailedToReconcile)]
[InlineData("5.1.0", Constants.RollForwardSetting.LatestPatch, "5.1.3")]
[InlineData("5.1.1", Constants.RollForwardSetting.LatestPatch, "5.1.3")]
[InlineData("5.0.0", null, "5.1.3")]
[InlineData("5.1.1", null, "5.1.3")]
[InlineData("5.0.0", Constants.RollForwardSetting.Minor, "5.1.3")]
[InlineData("5.1.1", Constants.RollForwardSetting.Minor, "5.1.3")]
[InlineData("5.0.0", Constants.RollForwardSetting.LatestMinor, "5.6.0")]
[InlineData("5.1.1", Constants.RollForwardSetting.LatestMinor, "5.6.0")]
[InlineData("1.0.0", Constants.RollForwardSetting.Minor, ResolvedFramework.FailedToReconcile)]
[InlineData("1.0.0", Constants.RollForwardSetting.Major, "5.1.3")]
[InlineData("1.0.0", Constants.RollForwardSetting.LatestMajor, "5.6.0")]
public void ReconcileFrameworkReferences_InnerToInnerFrameworkReference_ToLower(
string versionReference,
string rollForward,
string resolvedFramework)
{
RunTest(
runtimeConfig => runtimeConfig
.WithFramework(HighWare, "7.0.0"),
dotnetCustomizer =>
{
dotnetCustomizer.Framework(HighWare).RuntimeConfig(runtimeConfig =>
runtimeConfig.GetFramework(MicrosoftNETCoreApp)
.Version = "5.1.1");
dotnetCustomizer.Framework(MiddleWare).RuntimeConfig(runtimeConfig =>
runtimeConfig.GetFramework(MicrosoftNETCoreApp)
.WithRollForward(rollForward)
.Version = versionReference);
})
.ShouldHaveResolvedFrameworkOrFailedToReconcileFrameworkReference(
MicrosoftNETCoreApp, resolvedFramework, versionReference, "5.1.1");
}
// Verify that inner framework reference 5.1.1 (defaults = RollForward:Minor)
// is correctly reconciled with another framework's framework reference (<fxRefVersion>, <rollForward>).
// The higher framework has fx reference with lower version.
[Theory] // fxRefVersion rollForward resolvedFramework
[InlineData("5.1.3", Constants.RollForwardSetting.Disable, "5.1.3")]
[InlineData("5.4.0", null, "5.4.1")]
[InlineData("5.4.0", Constants.RollForwardSetting.Minor, "5.4.1")]
[InlineData("5.4.0", Constants.RollForwardSetting.LatestMinor, "5.6.0")]
[InlineData("5.4.0", Constants.RollForwardSetting.Major, "5.4.1")]
[InlineData("5.4.0", Constants.RollForwardSetting.LatestMajor, "5.6.0")]
[InlineData("5.4.1", Constants.RollForwardSetting.Disable, "5.4.1")]
[InlineData("6.0.0", Constants.RollForwardSetting.Minor, ResolvedFramework.FailedToReconcile)]
[InlineData("6.0.0", Constants.RollForwardSetting.Major, ResolvedFramework.FailedToReconcile)]
public void ReconcileFrameworkReferences_InnerToInnerFrameworkReference_ToHigher(
string versionReference,
string rollForward,
string resolvedFramework)
{
RunTest(
runtimeConfig => runtimeConfig
.WithFramework(HighWare, "7.0.0"),
dotnetCustomizer =>
{
dotnetCustomizer.Framework(HighWare).RuntimeConfig(runtimeConfig =>
runtimeConfig.GetFramework(MicrosoftNETCoreApp)
.Version = "5.1.1");
dotnetCustomizer.Framework(MiddleWare).RuntimeConfig(runtimeConfig =>
runtimeConfig.GetFramework(MicrosoftNETCoreApp)
.WithRollForward(rollForward)
.Version = versionReference);
})
.ShouldHaveResolvedFrameworkOrFailedToReconcileFrameworkReference(
MicrosoftNETCoreApp, resolvedFramework, "5.1.1", versionReference);
}
// This test:
// - Forces hard resolve of 5.1.1 -> 5.1.3 (direct reference from app)
// - Loads HighWare which has 5.4.1
// - This forces a retry since 5.1.3 was hard resolved, so we have reload with 5.4.1 instead
// - Loads MiddleWare which has 5.6.0
// - This forces a retry since by this time 5.4.1 was hard resolved, so we have to reload with 5.6.0 instead
[Fact]
public void FrameworkResolutionRetry_FrameworkChain()
{
RunTest(
runtimeConfig => runtimeConfig
.WithRollForward(Constants.RollForwardSetting.Major)
.WithFramework(MicrosoftNETCoreApp, "5.1.1")
.WithFramework(HighWare, "7.3.1"),
dotnetCustomizer =>
{
dotnetCustomizer.Framework(HighWare).RuntimeConfig(runtimeConfig =>
runtimeConfig.GetFramework(MicrosoftNETCoreApp)
.Version = "5.4.1");
dotnetCustomizer.Framework(MiddleWare).RuntimeConfig(runtimeConfig =>
runtimeConfig.GetFramework(MicrosoftNETCoreApp)
.Version = "5.6.0");
})
.Should().Pass()
.And.RestartedFrameworkResolution("5.1.1", "5.4.1")
.And.RestartedFrameworkResolution("5.4.1", "5.6.0")
.And.HaveResolvedFramework(MicrosoftNETCoreApp, "5.6.0");
}
// This test:
// - Forces hard resolve of 5.1.1 -> 5.1.3 (direct reference from app)
// - Loads MiddleWare which has 5.4.1
// - This forces a retry since 5.1.3 was hard resolved, so we have reload with 5.4.1 instead
// - Loads AnotherMiddleWare which has 5.6.0
// - This forces a retry since by this time 5.4.1 was hard resolved, so we have to reload with 5.6.0 instead
[Fact]
public void FrameworkResolutionRetry_FrameworkTree()
{
RunTest(
runtimeConfig => runtimeConfig
.WithRollForward(Constants.RollForwardSetting.Major)
.WithFramework(MicrosoftNETCoreApp, "5.1.1")
.WithFramework(MiddleWare, "2.1.2")
.WithFramework(AnotherMiddleWare, "3.0.0"),
dotnetCustomizer =>
{
dotnetCustomizer.Framework(MiddleWare).RuntimeConfig(runtimeConfig =>
runtimeConfig.GetFramework(MicrosoftNETCoreApp)
.Version = "5.4.1");
dotnetCustomizer.Framework(AnotherMiddleWare).RuntimeConfig(runtimeConfig =>
runtimeConfig.GetFramework(MicrosoftNETCoreApp)
.Version = "5.6.0");
})
.Should().Pass()
.And.RestartedFrameworkResolution("5.1.1", "5.4.1")
.And.RestartedFrameworkResolution("5.4.1", "5.6.0")
.And.HaveResolvedFramework(MicrosoftNETCoreApp, "5.6.0");
}
// Verifies that reconciling framework references correctly remembers whether it should prefer release versions or not.
[Theory]
[InlineData("6.0.0", "6.1.1-preview.0", "6.2.1")] // Release should prefer release even if there's a pre-release in the middle
[InlineData("6.1.0", "6.1.1-preview.0", "6.2.1")] // Release should prefer release even if there's a pre-release in the middle
[InlineData("6.1.1", "6.1.1-preview.0", "6.2.1")] // Release should prefer release even if there's a pre-release in the middle
[InlineData("6.0.0-preview.1", "6.1.1-preview.0", "6.1.1-preview.2")] // Both pre-relelase, take the closest even if it's pre-release
[InlineData("6.1.0-preview.0", "6.1.1", "6.2.1")] // Release should prefer release
[InlineData("6.1.1-preview.0", "6.1.0", "6.2.1")] // Release should prefer release
[InlineData("6.1.1-preview.0", "6.1.1", "6.2.1")] // Release should prefer release
public void PreferReleaseToRelease(string appVersionReference, string frameworkVersionReference, string resolvedFramework)
{
RunTest(
runtimeConfig => runtimeConfig
.WithFramework(MiddleWare, "2.1.2")
.WithFramework(MicrosoftNETCoreApp, appVersionReference),
dotnetCustomizer =>
{
dotnetCustomizer.Framework(MiddleWare).RuntimeConfig(runtimeConfig =>
runtimeConfig.GetFramework(MicrosoftNETCoreApp)
.Version = frameworkVersionReference);
})
.ShouldHaveResolvedFramework(MicrosoftNETCoreApp, resolvedFramework);
}
// Verify that inner framework reference (<fxRefVersion>, <fxRollForward>)
// is correctly reconciled with app's framework reference (<appRefVersion>, <appRollForward>).
// It then also tests it the other way round (as the result should not depend on which setting comes from FX and which from app)
// In this case the direct reference from app is first, so the framework reference from app
// is actually resolved against the disk - and the resolved framework is than compared to
// the inner framework reference (potentially causing re-resolution).
// This is mostly a collection of interesting cases as testing the full matrix is prohibitively large
[Theory] // appRefVersion appRollForward fxRefVersion fxRollForward resolvedFramework
// Disable + anything -> Disable
[InlineData("5.1.0", Constants.RollForwardSetting.Disable, "5.1.0", Constants.RollForwardSetting.Disable, ResolvedFramework.NotFound)]
[InlineData("5.1.0", Constants.RollForwardSetting.Disable, "5.1.0", Constants.RollForwardSetting.LatestPatch, ResolvedFramework.NotFound)]
[InlineData("5.1.0", Constants.RollForwardSetting.Disable, "5.1.0", Constants.RollForwardSetting.Minor, ResolvedFramework.NotFound)]
[InlineData("5.1.0", Constants.RollForwardSetting.Disable, "5.1.0", Constants.RollForwardSetting.LatestMinor, ResolvedFramework.NotFound)]
[InlineData("5.1.0", Constants.RollForwardSetting.Disable, "5.1.0", Constants.RollForwardSetting.Major, ResolvedFramework.NotFound)]
[InlineData("5.1.0", Constants.RollForwardSetting.Disable, "5.1.0", Constants.RollForwardSetting.LatestMajor, ResolvedFramework.NotFound)]
// Default - should apply normal Minor semantics
[InlineData("5.0.0", null, "5.0.0", null, "5.1.3")]
// Default + LatestPatch -> LatestPatch
[InlineData("5.0.0", null, "5.0.0", Constants.RollForwardSetting.LatestPatch, ResolvedFramework.NotFound)]
// Default + LatestMinor -> LatestMinor
[InlineData("5.0.0", null, "5.0.0", Constants.RollForwardSetting.LatestMinor, "5.6.0")]
// Default + Major -> Minor
[InlineData("5.0.0", null, "5.0.0", Constants.RollForwardSetting.Major, "5.1.3")]
// Default + LatestMajor -> LatestMinor
[InlineData("5.0.0", null, "5.0.0", Constants.RollForwardSetting.LatestMajor, "5.6.0")]
// LatestMinor + Major -> LatestMinor
[InlineData("5.0.0", Constants.RollForwardSetting.LatestMinor, "5.0.0", Constants.RollForwardSetting.Major, "5.6.0")]
// LatestMinor + LatestMajor -> LatestMinor
[InlineData("5.0.0", Constants.RollForwardSetting.LatestMinor, "5.0.0", Constants.RollForwardSetting.LatestMajor, "5.6.0")]
// LatestMajor + Major -> LatestMajor
[InlineData("4.0.0", Constants.RollForwardSetting.LatestMajor, "4.0.0", Constants.RollForwardSetting.Major, "6.2.1")]
// LatestMajor + Minor -> LatestMinor
[InlineData("4.0.0", Constants.RollForwardSetting.LatestMajor, "4.0.0", Constants.RollForwardSetting.Minor, ResolvedFramework.NotFound)]
[InlineData("5.0.0", Constants.RollForwardSetting.LatestMajor, "5.0.0", Constants.RollForwardSetting.Minor, "5.6.0")]
// LatestMinor + LatestPatch -> LatestPatch
[InlineData("5.1.0", Constants.RollForwardSetting.LatestMinor, "5.1.0", Constants.RollForwardSetting.LatestPatch, "5.1.3")]
[InlineData("5.0.0", Constants.RollForwardSetting.LatestMinor, "5.0.0", Constants.RollForwardSetting.LatestPatch, ResolvedFramework.NotFound)]
// LatestMajor + LatestPatch -> LatestPatch
[InlineData("5.1.0", Constants.RollForwardSetting.LatestMajor, "5.1.0", Constants.RollForwardSetting.LatestPatch, "5.1.3")]
[InlineData("5.0.0", Constants.RollForwardSetting.LatestMajor, "5.0.0", Constants.RollForwardSetting.LatestPatch, ResolvedFramework.NotFound)]
public void ReconcileFrameworkReferences_MergeRollForward(
string appVersionReference,
string appRollForward,
string fxVersionReference,
string fxRollForward,
string resolvedFramework)
{
RunTest(
runtimeConfig => runtimeConfig
.WithFramework(new RuntimeConfig.Framework(MicrosoftNETCoreApp, appVersionReference)
.WithRollForward(appRollForward))
.WithFramework(MiddleWare, "2.1.0"),
dotnetCustomizer => dotnetCustomizer.Framework(MiddleWare).RuntimeConfig(runtimeConfig =>
runtimeConfig.GetFramework(MicrosoftNETCoreApp)
.WithRollForward(fxRollForward)
.Version = fxVersionReference))
.ShouldHaveResolvedFrameworkOrFailToFind(MicrosoftNETCoreApp, resolvedFramework);
RunTest(
runtimeConfig => runtimeConfig
.WithFramework(new RuntimeConfig.Framework(MicrosoftNETCoreApp, fxVersionReference)
.WithRollForward(fxRollForward))
.WithFramework(MiddleWare, "2.1.0"),
dotnetCustomizer => dotnetCustomizer.Framework(MiddleWare).RuntimeConfig(runtimeConfig =>
runtimeConfig.GetFramework(MicrosoftNETCoreApp)
.WithRollForward(appRollForward)
.Version = appVersionReference))
.ShouldHaveResolvedFrameworkOrFailToFind(MicrosoftNETCoreApp, resolvedFramework);
}
// Verify that the "roll to highest version" flag is propagated into inner framework reference.
// The app references MiddleWare framework with the specified appRollForward setting
// then the MiddleWare framework references Microsoft.NETCore.App with the specified fxRefVersion and fxRollForward.
[Theory] // appRollForward fxRefVersion fxRollForward resolvedFramework
// LatestPatch does not imply roll_to_highest
[InlineData(Constants.RollForwardSetting.LatestPatch, "5.1.1", Constants.RollForwardSetting.Disable, "5.1.1")]
[InlineData(Constants.RollForwardSetting.LatestPatch, "5.1.0", Constants.RollForwardSetting.LatestPatch, "5.1.3")]
[InlineData(Constants.RollForwardSetting.LatestPatch, "5.1.0", null, "5.1.3")]
[InlineData(Constants.RollForwardSetting.LatestPatch, "5.1.0", Constants.RollForwardSetting.Minor, "5.1.3")]
[InlineData(Constants.RollForwardSetting.LatestPatch, "5.1.0", Constants.RollForwardSetting.Major, "5.1.3")]
[InlineData(Constants.RollForwardSetting.LatestPatch, "5.1.0", Constants.RollForwardSetting.LatestMajor, "6.2.1")]
[InlineData(Constants.RollForwardSetting.LatestPatch, "6.1.1-preview.1", Constants.RollForwardSetting.LatestPatch, "6.1.1-preview.2")]
[InlineData(Constants.RollForwardSetting.LatestPatch, "6.1.1-preview.2", Constants.RollForwardSetting.LatestPatch, "6.1.1-preview.2")]
// Minor/Major do not imply roll_to_highest
[InlineData(Constants.RollForwardSetting.Minor, "5.1.0", Constants.RollForwardSetting.Minor, "5.1.3")]
[InlineData(Constants.RollForwardSetting.Major, "5.1.0", Constants.RollForwardSetting.Minor, "5.1.3")]
[InlineData(Constants.RollForwardSetting.Minor, "6.1.1-preview.2", Constants.RollForwardSetting.Minor, "6.1.1-preview.2")]
[InlineData(Constants.RollForwardSetting.Minor, "6.1.1-preview.2", Constants.RollForwardSetting.LatestPatch, "6.1.1-preview.2")]
[InlineData(Constants.RollForwardSetting.Minor, "6.1.1-preview.1", Constants.RollForwardSetting.LatestPatch, "6.1.1-preview.2")]
[InlineData(Constants.RollForwardSetting.Major, "6.1.1-preview.2", Constants.RollForwardSetting.Major, "6.1.1-preview.2")]
[InlineData(Constants.RollForwardSetting.Major, "6.1.1-preview.2", Constants.RollForwardSetting.LatestPatch, "6.1.1-preview.2")]
[InlineData(Constants.RollForwardSetting.Major, "6.1.1-preview.1", Constants.RollForwardSetting.LatestPatch, "6.1.1-preview.2")]
// LatestMinor does imply roll_to_highest
[InlineData(Constants.RollForwardSetting.LatestMinor, "5.1.1", Constants.RollForwardSetting.Disable, "5.1.1")]
[InlineData(Constants.RollForwardSetting.LatestMinor, "5.1.0", Constants.RollForwardSetting.LatestPatch, "5.1.3")]
[InlineData(Constants.RollForwardSetting.LatestMinor, "5.1.0", null, "5.6.0")]
[InlineData(Constants.RollForwardSetting.LatestMinor, "5.1.0", Constants.RollForwardSetting.Minor, "5.6.0")]
[InlineData(Constants.RollForwardSetting.LatestMinor, "5.1.0", Constants.RollForwardSetting.Major, "6.2.1")]
[InlineData(Constants.RollForwardSetting.LatestMinor, "5.1.0", Constants.RollForwardSetting.LatestMajor, "6.2.1")]
// In this case the "roll to highest" should not impact the pre-release search since it should not have any influence on LatestPatch behavior
// which for pre-release versions is to pick the closest match - in this case the exact match exists, so it should pick that one.
[InlineData(Constants.RollForwardSetting.LatestMinor, "6.1.1-preview.2", Constants.RollForwardSetting.LatestPatch, "6.1.1-preview.2")]
[InlineData(Constants.RollForwardSetting.LatestMinor, "6.1.1-preview.1", Constants.RollForwardSetting.LatestPatch, "6.1.1-preview.2")]
// LatestMajor does imply roll_to_highest
[InlineData(Constants.RollForwardSetting.LatestMajor, "5.1.1", Constants.RollForwardSetting.Disable, "5.1.1")]
[InlineData(Constants.RollForwardSetting.LatestMajor, "5.1.0", Constants.RollForwardSetting.LatestPatch, "5.1.3")]
[InlineData(Constants.RollForwardSetting.LatestMajor, "5.1.0", null, "5.6.0")]
[InlineData(Constants.RollForwardSetting.LatestMajor, "5.1.0", Constants.RollForwardSetting.Minor, "5.6.0")]
[InlineData(Constants.RollForwardSetting.LatestMajor, "5.1.0", Constants.RollForwardSetting.Major, "6.2.1")]
[InlineData(Constants.RollForwardSetting.LatestMajor, "5.1.0", Constants.RollForwardSetting.LatestMajor, "6.2.1")]
// In this case the "roll to highest" should not impact the pre-release search since it should not have any influence on LatestPatch behavior
// which for pre-release versions is to pick the closest match - in this case the exact match exists, so it should pick that one.
[InlineData(Constants.RollForwardSetting.LatestMajor, "6.1.1-preview.2", Constants.RollForwardSetting.LatestPatch, "6.1.1-preview.2")]
[InlineData(Constants.RollForwardSetting.LatestMajor, "6.1.1-preview.1", Constants.RollForwardSetting.LatestPatch, "6.1.1-preview.2")]
public void PropagateRollToHighestVersion(string appRollForward, string fxRefVersion, string fxRollForward, string resolvedFramework)
{
RunTest(
runtimeConfig => runtimeConfig
.WithFramework(new RuntimeConfig.Framework(MiddleWare, "2.1.0")
.WithRollForward(appRollForward)),
dotnetCustomizer => dotnetCustomizer.Framework(MiddleWare).RuntimeConfig(runtimeConfig =>
runtimeConfig.GetFramework(MicrosoftNETCoreApp)
.WithRollForward(fxRollForward)
.Version = fxRefVersion))
.ShouldHaveResolvedFrameworkOrFailToFind(MicrosoftNETCoreApp, resolvedFramework);
}
// Verify that the "roll to highest version" flag is propagated across multiple layers of framework references.
// The app references HighWare framework, which in turn references MiddleWare framework which then references
// Microsoft.NETCore.App. Each level specify a roll forward option.
[Theory] // appRollForward fxRefVersion higherFxRollForward lowerFxRollForward resolvedFramework
[InlineData(Constants.RollForwardSetting.LatestPatch, "5.1.0", Constants.RollForwardSetting.Minor, Constants.RollForwardSetting.Minor, "5.1.3")]
[InlineData(Constants.RollForwardSetting.LatestMinor, "5.1.0", null, null, "5.6.0")]
[InlineData(Constants.RollForwardSetting.LatestMinor, "5.1.0", Constants.RollForwardSetting.Minor, Constants.RollForwardSetting.Minor, "5.6.0")]
[InlineData(Constants.RollForwardSetting.LatestMinor, "5.1.0", Constants.RollForwardSetting.LatestPatch, Constants.RollForwardSetting.LatestPatch, "5.1.3")]
[InlineData(Constants.RollForwardSetting.LatestMajor, "5.1.0", Constants.RollForwardSetting.Minor, Constants.RollForwardSetting.Minor, "5.6.0")]
[InlineData(Constants.RollForwardSetting.LatestMajor, "5.1.0", Constants.RollForwardSetting.LatestPatch, Constants.RollForwardSetting.LatestPatch, "5.1.3")]
public void PropagateRollToHighestVersionAcrossMultipleFrameworks(
string appRollForward,
string fxRefVersion,
string higherFxRollForward,
string lowerFxRollForward,
string resolvedFramework)
{
RunTest(
runtimeConfig => runtimeConfig
.WithFramework(new RuntimeConfig.Framework(HighWare, "7.3.1")
.WithRollForward(appRollForward)),
dotnetCustomizer =>
{
dotnetCustomizer.Framework(HighWare).RuntimeConfig(runtimeConfig =>
{
runtimeConfig.RemoveFramework(MicrosoftNETCoreApp);
runtimeConfig.GetFramework(MiddleWare)
.WithRollForward(higherFxRollForward);
});
dotnetCustomizer.Framework(MiddleWare).RuntimeConfig(runtimeConfig =>
runtimeConfig.GetFramework(MicrosoftNETCoreApp)
.WithRollForward(lowerFxRollForward)
.Version = fxRefVersion);
})
.ShouldHaveResolvedFrameworkOrFailToFind(MicrosoftNETCoreApp, resolvedFramework);
}
private CommandResult RunTest(
Func<RuntimeConfig, RuntimeConfig> runtimeConfig,
Action<DotNetCliExtensions.DotNetCliCustomizer> customizeDotNet = null,
bool rollForwardToPreRelease = false)
{
return RunTest(
SharedState.DotNetWithMultipleFrameworks,
SharedState.FrameworkReferenceApp,
new TestSettings()
.WithRuntimeConfigCustomizer(runtimeConfig)
.WithDotnetCustomizer(customizeDotNet)
.WithEnvironment(Constants.RollForwardToPreRelease.EnvironmentVariable, rollForwardToPreRelease ? "1" : "0"));
}
}
}
| // Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
using Microsoft.DotNet.Cli.Build;
using Microsoft.DotNet.Cli.Build.Framework;
using System;
using Xunit;
namespace Microsoft.DotNet.CoreSetup.Test.HostActivation.FrameworkResolution
{
public class RollForwardMultipleFrameworks :
FrameworkResolutionBase,
IClassFixture<RollForwardMultipleFrameworks.SharedTestState>
{
private const string MiddleWare = "MiddleWare";
private const string AnotherMiddleWare = "AnotherMiddleWare";
private const string HighWare = "HighWare";
private SharedTestState SharedState { get; }
public RollForwardMultipleFrameworks(SharedTestState sharedState)
{
SharedState = sharedState;
}
public class SharedTestState : SharedTestStateBase
{
public TestApp FrameworkReferenceApp { get; }
public DotNetCli DotNetWithMultipleFrameworks { get; }
public SharedTestState()
{
DotNetWithMultipleFrameworks = DotNet("WithOneFramework")
.AddMicrosoftNETCoreAppFrameworkMockHostPolicy("5.1.1")
.AddMicrosoftNETCoreAppFrameworkMockHostPolicy("5.1.3")
.AddMicrosoftNETCoreAppFrameworkMockHostPolicy("5.4.1")
.AddMicrosoftNETCoreAppFrameworkMockHostPolicy("5.6.0")
.AddMicrosoftNETCoreAppFrameworkMockHostPolicy("6.0.0")
.AddMicrosoftNETCoreAppFrameworkMockHostPolicy("6.1.0")
.AddMicrosoftNETCoreAppFrameworkMockHostPolicy("6.1.1-preview.2")
.AddMicrosoftNETCoreAppFrameworkMockHostPolicy("6.1.1-preview.3")
.AddMicrosoftNETCoreAppFrameworkMockHostPolicy("6.2.1")
.AddFramework(MiddleWare, "2.1.2", runtimeConfig =>
runtimeConfig.WithFramework(MicrosoftNETCoreApp, "5.1.3"))
.AddFramework(AnotherMiddleWare, "3.0.0", runtimeConfig =>
runtimeConfig.WithFramework(MicrosoftNETCoreApp, "5.1.3"))
.AddFramework(HighWare, "7.3.1", runtimeConfig =>
runtimeConfig
.WithFramework(MicrosoftNETCoreApp, "5.1.3")
.WithFramework(MiddleWare, "2.1.2"))
.Build();
FrameworkReferenceApp = CreateFrameworkReferenceApp();
}
}
// Verify that inner framework reference (<fxRefVersion>, <rollForward>)
// is correctly reconciled with app's framework reference 5.1.1 (defaults = RollForward:Minor). App fx reference is higher.
[Theory] // fxRefVersion rollForward resolvedFramework
[InlineData("5.0.0", Constants.RollForwardSetting.Disable, ResolvedFramework.FailedToReconcile)]
[InlineData("5.1.1", Constants.RollForwardSetting.Disable, "5.1.1")]
[InlineData("5.0.0", Constants.RollForwardSetting.LatestPatch, ResolvedFramework.FailedToReconcile)]
[InlineData("5.1.0", Constants.RollForwardSetting.LatestPatch, "5.1.3")]
[InlineData("5.1.1", Constants.RollForwardSetting.LatestPatch, "5.1.3")]
[InlineData("5.0.0", null, "5.1.3")]
[InlineData("5.1.1", null, "5.1.3")]
[InlineData("5.0.0", Constants.RollForwardSetting.Minor, "5.1.3")]
[InlineData("5.1.1", Constants.RollForwardSetting.Minor, "5.1.3")]
[InlineData("5.0.0", Constants.RollForwardSetting.LatestMinor, "5.6.0")]
[InlineData("5.1.1", Constants.RollForwardSetting.LatestMinor, "5.6.0")]
[InlineData("1.0.0", Constants.RollForwardSetting.Minor, ResolvedFramework.FailedToReconcile)]
[InlineData("1.0.0", Constants.RollForwardSetting.Major, "5.1.3")] // The app reference which is Minor wins
[InlineData("1.0.0", Constants.RollForwardSetting.LatestMajor, "5.6.0")] // The app reference which is Minor wins
public void ReconcileFrameworkReferences_InnerFrameworkReference_ToHigher(
string versionReference,
string rollForward,
string resolvedFramework)
{
RunTest(
runtimeConfig => runtimeConfig
.WithFramework(MiddleWare, "2.1.0")
.WithFramework(MicrosoftNETCoreApp, "5.1.1"),
dotnetCustomizer => dotnetCustomizer.Framework(MiddleWare).RuntimeConfig(runtimeConfig =>
runtimeConfig.GetFramework(MicrosoftNETCoreApp)
.WithRollForward(rollForward)
.Version = versionReference))
.ShouldHaveResolvedFrameworkOrFailedToReconcileFrameworkReference(
MicrosoftNETCoreApp, resolvedFramework, versionReference, "5.1.1");
}
// Verify that inner framework reference (<fxRefVersion>, <rollForward>)
// is correctly reconciled with app's framework reference 5.1.1 (defaults = RollForward:Minor). App fx reference is higher.
// In this case the direct reference from app is first, so the framework reference from app
// is actually resolved against the disk - and the resolved framework is than compared to
// the inner framework reference (potentially causing re-resolution).
[Theory] // fxRefVersion rollForward resolvedFramework
[InlineData("5.0.0", Constants.RollForwardSetting.Disable, ResolvedFramework.FailedToReconcile)]
[InlineData("5.1.1", Constants.RollForwardSetting.Disable, "5.1.1")]
[InlineData("5.0.0", Constants.RollForwardSetting.LatestPatch, ResolvedFramework.FailedToReconcile)]
[InlineData("5.1.0", Constants.RollForwardSetting.LatestPatch, "5.1.3")]
[InlineData("5.1.1", Constants.RollForwardSetting.LatestPatch, "5.1.3")]
[InlineData("5.0.0", null, "5.1.3")]
[InlineData("5.1.1", null, "5.1.3")]
[InlineData("5.0.0", Constants.RollForwardSetting.Minor, "5.1.3")]
[InlineData("5.1.1", Constants.RollForwardSetting.Minor, "5.1.3")]
[InlineData("5.0.0", Constants.RollForwardSetting.LatestMinor, "5.6.0")]
[InlineData("5.1.1", Constants.RollForwardSetting.LatestMinor, "5.6.0")]
[InlineData("1.0.0", Constants.RollForwardSetting.Minor, ResolvedFramework.FailedToReconcile)]
[InlineData("1.0.0", Constants.RollForwardSetting.Major, "5.1.3")] // The app reference which is Minor wins
[InlineData("1.0.0", Constants.RollForwardSetting.LatestMajor, "5.6.0")] // The app reference which is Minor wins
public void ReconcileFrameworkReferences_InnerFrameworkReference_ToHigher_HardResolve(
string versionReference,
string rollForward,
string resolvedFramework)
{
RunTest(
runtimeConfig => runtimeConfig
.WithFramework(MicrosoftNETCoreApp, "5.1.1")
.WithFramework(MiddleWare, "2.1.0"),
dotnetCustomizer => dotnetCustomizer.Framework(MiddleWare).RuntimeConfig(runtimeConfig =>
runtimeConfig.GetFramework(MicrosoftNETCoreApp)
.WithRollForward(rollForward)
.Version = versionReference))
.ShouldHaveResolvedFrameworkOrFailedToReconcileFrameworkReference(
MicrosoftNETCoreApp, resolvedFramework, versionReference, "5.1.1");
}
// Verify that inner framework reference (<fxRefVersion>, <rollForward>)
// is correctly reconciled with app's framework reference 5.1.1 (defaults = RollForward:Minor). App fx reference is lower.
// Also validates that since all relevant available versions are release,
// the DOTNET_ROLL_FORWARD_TO_PRERELEASE has no effect on the result.
[Theory] // fxRefVersion rollForward rollForwadToPreRelease resolvedFramework
[InlineData("5.1.3", Constants.RollForwardSetting.Disable, false, "5.1.3")]
[InlineData("5.4.0", null, false, "5.4.1")]
[InlineData("5.4.0", Constants.RollForwardSetting.Minor, false, "5.4.1")]
[InlineData("5.4.0", Constants.RollForwardSetting.Minor, true, "5.4.1")]
[InlineData("5.4.0", Constants.RollForwardSetting.LatestMinor, false, "5.6.0")]
[InlineData("5.4.0", Constants.RollForwardSetting.LatestMinor, true, "5.6.0")]
[InlineData("5.4.0", Constants.RollForwardSetting.Major, false, "5.4.1")] // The app's settings (Minor) wins, so effective reference is "5.4.0 minor"
[InlineData("5.4.0", Constants.RollForwardSetting.Major, true, "5.4.1")]
[InlineData("5.4.0", Constants.RollForwardSetting.LatestMajor, false, "5.6.0")] // The app's settings (Minor) wins, so effective reference is "5.4.0 minor highest"
[InlineData("5.4.0", Constants.RollForwardSetting.LatestMajor, true, "5.6.0")]
[InlineData("5.4.1", Constants.RollForwardSetting.Disable, false, "5.4.1")]
[InlineData("5.7.0", Constants.RollForwardSetting.Minor, false, ResolvedFramework.NotFound)]
[InlineData("5.7.0", Constants.RollForwardSetting.Minor, true, ResolvedFramework.NotFound)]
[InlineData("5.7.0", Constants.RollForwardSetting.LatestMinor, false, ResolvedFramework.NotFound)]
[InlineData("5.7.0", Constants.RollForwardSetting.Major, false, ResolvedFramework.NotFound)]
[InlineData("5.7.0", Constants.RollForwardSetting.LatestMajor, false, ResolvedFramework.NotFound)]
[InlineData("6.0.0", Constants.RollForwardSetting.Minor, false, ResolvedFramework.FailedToReconcile)]
[InlineData("6.0.0", Constants.RollForwardSetting.Minor, true, ResolvedFramework.FailedToReconcile)]
[InlineData("6.0.0", Constants.RollForwardSetting.Major, false, ResolvedFramework.FailedToReconcile)]
[InlineData("6.0.0", Constants.RollForwardSetting.LatestMajor, false, ResolvedFramework.FailedToReconcile)]
public void ReconcileFrameworkReferences_InnerFrameworkReference_ToLower(
string versionReference,
string rollForward,
bool rollForwardToPreRelease,
string resolvedFramework)
{
RunTest(
runtimeConfig => runtimeConfig
.WithFramework(MiddleWare, "2.1.0")
.WithFramework(MicrosoftNETCoreApp, "5.1.1"),
dotnetCustomizer => dotnetCustomizer.Framework(MiddleWare).RuntimeConfig(runtimeConfig =>
runtimeConfig.GetFramework(MicrosoftNETCoreApp)
.WithRollForward(rollForward)
.Version = versionReference),
rollForwardToPreRelease)
.ShouldHaveResolvedFrameworkOrFail(
MicrosoftNETCoreApp, resolvedFramework, "5.1.1", versionReference);
}
// Verify that inner framework reference (<fxRefVersion>, <rollForward>)
// is correctly reconciled with app's framework reference 5.1.1 (defaults = RollForward:Minor). App fx reference is lower.
// In this case the direct reference from app is first, so the framework reference from app
// is actually resolved against the disk - and the resolved framework is than compared to
// the inner framework reference (potentially causing re-resolution).
[Theory] // fxRefVersion rollForward resolvedFramework
[InlineData("5.1.3", Constants.RollForwardSetting.Disable, "5.1.3")]
[InlineData("5.4.0", null, "5.4.1")]
[InlineData("5.4.0", Constants.RollForwardSetting.Minor, "5.4.1")]
[InlineData("5.4.0", Constants.RollForwardSetting.LatestMinor, "5.6.0")]
[InlineData("5.4.0", Constants.RollForwardSetting.Major, "5.4.1")]
[InlineData("5.4.0", Constants.RollForwardSetting.LatestMajor, "5.6.0")]
[InlineData("5.4.1", Constants.RollForwardSetting.Disable, "5.4.1")]
[InlineData("5.7.0", Constants.RollForwardSetting.Minor, ResolvedFramework.NotFound)]
[InlineData("5.7.0", Constants.RollForwardSetting.LatestMinor, ResolvedFramework.NotFound)]
[InlineData("5.7.0", Constants.RollForwardSetting.Major, ResolvedFramework.NotFound)]
[InlineData("5.7.0", Constants.RollForwardSetting.LatestMajor, ResolvedFramework.NotFound)]
[InlineData("6.0.0", Constants.RollForwardSetting.Minor, ResolvedFramework.FailedToReconcile)]
[InlineData("6.0.0", Constants.RollForwardSetting.Major, ResolvedFramework.FailedToReconcile)]
public void ReconcileFrameworkReferences_InnerFrameworkReference_ToLower_HardResolve(
string versionReference,
string rollForward,
string resolvedFramework)
{
RunTest(
runtimeConfig => runtimeConfig
.WithFramework(MicrosoftNETCoreApp, "5.1.1")
.WithFramework(MiddleWare, "2.1.0"),
dotnetCustomizer => dotnetCustomizer.Framework(MiddleWare).RuntimeConfig(runtimeConfig =>
runtimeConfig.GetFramework(MicrosoftNETCoreApp)
.WithRollForward(rollForward)
.Version = versionReference))
.ShouldHaveResolvedFrameworkOrFail(
MicrosoftNETCoreApp, resolvedFramework, "5.1.1", versionReference);
}
// Verify that inner framework reference (<fxRefVersion>, <rollForward>)
// is correctly reconciled with app's framework reference 6.1.1-preview.0 (defaults = RollForward:Minor).
// Also validates the effect of DOTNET_ROLL_FORWARD_TO_PRERELEASE on the result.
[Theory] // fxRefVersion rollForward rollForwadToPreRelease resolvedFramework
[InlineData("6.0.0-preview.1", null, false, "6.1.1-preview.2")]
[InlineData("6.0.0", null, false, "6.2.1")]
[InlineData("6.0.0", Constants.RollForwardSetting.LatestPatch, false, ResolvedFramework.FailedToReconcile)]
[InlineData("6.0.0-preview.1", Constants.RollForwardSetting.LatestPatch, false, ResolvedFramework.FailedToReconcile)]
[InlineData("6.0.0-preview.1", Constants.RollForwardSetting.Minor, false, "6.1.1-preview.2")]
[InlineData("6.0.0", Constants.RollForwardSetting.Minor, false, "6.2.1")]
[InlineData("6.0.1-preview.0", Constants.RollForwardSetting.LatestPatch, false, ResolvedFramework.FailedToReconcile)]
[InlineData("6.1.0-preview.0", null, false, "6.1.1-preview.2")]
[InlineData("6.1.0-preview.0", null, true, "6.1.1-preview.2")]
[InlineData("6.1.0", null, false, "6.2.1")]
[InlineData("6.1.0", null, true, "6.1.1-preview.2")]
[InlineData("6.1.1-preview.0", null, false, "6.1.1-preview.2")]
[InlineData("6.1.1-preview.0", null, true, "6.1.1-preview.2")]
[InlineData("6.1.1-preview.0", Constants.RollForwardSetting.LatestPatch, false, "6.1.1-preview.2")]
[InlineData("6.1.1-preview.0", Constants.RollForwardSetting.Disable, false, ResolvedFramework.NotFound)]
[InlineData("6.1.1-preview.2", Constants.RollForwardSetting.Disable, false, "6.1.1-preview.2")]
[InlineData("6.1.1-preview.2", Constants.RollForwardSetting.Disable, true, "6.1.1-preview.2")]
[InlineData("6.1.1-preview.2", Constants.RollForwardSetting.LatestPatch, false, "6.1.1-preview.2")]
[InlineData("6.1.1-preview.2", Constants.RollForwardSetting.LatestPatch, true, "6.1.1-preview.2")]
[InlineData("6.1.1-preview.2", null, false, "6.1.1-preview.2")]
[InlineData("6.1.1-preview.2", null, true, "6.1.1-preview.2")]
[InlineData("6.1.1-preview.2", Constants.RollForwardSetting.Minor, false, "6.1.1-preview.2")]
[InlineData("6.1.1-preview.2", Constants.RollForwardSetting.Minor, true, "6.1.1-preview.2")]
[InlineData("6.1.1-preview.2", Constants.RollForwardSetting.LatestMinor, false, "6.2.1")]
[InlineData("6.1.1-preview.2", Constants.RollForwardSetting.LatestMinor, true, "6.2.1")]
[InlineData("6.1.1-preview.2", Constants.RollForwardSetting.Major, false, "6.1.1-preview.2")]
[InlineData("6.1.1-preview.2", Constants.RollForwardSetting.Major, true, "6.1.1-preview.2")]
[InlineData("6.1.1-preview.2", Constants.RollForwardSetting.LatestMajor, false, "6.2.1")]
[InlineData("6.1.1-preview.2", Constants.RollForwardSetting.LatestMajor, true, "6.2.1")]
[InlineData("6.2.1-preview.1", Constants.RollForwardSetting.Disable, false, ResolvedFramework.NotFound)]
[InlineData("6.2.1-preview.1", Constants.RollForwardSetting.LatestPatch, false, "6.2.1")]
[InlineData("6.2.1-preview.1", null, false, "6.2.1")]
[InlineData("6.2.1-preview.1", Constants.RollForwardSetting.Minor, false, "6.2.1")]
[InlineData("6.2.1-preview.1", Constants.RollForwardSetting.LatestMinor, false, "6.2.1")]
[InlineData("6.2.1-preview.1", Constants.RollForwardSetting.Major, false, "6.2.1")]
[InlineData("6.2.1-preview.1", Constants.RollForwardSetting.LatestMajor, false, "6.2.1")]
public void ReconcileFrameworkReferences_InnerFrameworkReference_PreRelease(
string versionReference,
string rollForward,
bool rollForwardToPreRelease,
string resolvedFramework)
{
RunTest(
runtimeConfig => runtimeConfig
.WithFramework(MicrosoftNETCoreApp, "6.1.1-preview.0")
.WithFramework(MiddleWare, "2.1.0"),
dotnetCustomizer => dotnetCustomizer.Framework(MiddleWare).RuntimeConfig(runtimeConfig =>
runtimeConfig.GetFramework(MicrosoftNETCoreApp)
.WithRollForward(rollForward)
.Version = versionReference),
rollForwardToPreRelease).ShouldHaveResolvedFrameworkOrFail(
MicrosoftNETCoreApp, resolvedFramework, versionReference, "6.1.1-preview.0");
}
// Verify that inner framework reference (<fxRefVersion>, <rollForward>)
// is correctly reconciled with app's framework reference 6.1.0 (defaults = RollForward:Minor).
// Also validates the effect of DOTNET_ROLL_FORWARD_TO_PRERELEASE on the result.
[Theory] // fxRefVersion rollForward rollForwadToPreRelease resolvedFramework
[InlineData("6.0.0", null, false, "6.1.0")]
[InlineData("6.0.0", null, true, "6.1.1-preview.3")]
[InlineData("6.0.0", Constants.RollForwardSetting.LatestPatch, false, ResolvedFramework.FailedToReconcile)]
[InlineData("6.0.0", Constants.RollForwardSetting.Minor, false, "6.1.0")]
[InlineData("6.0.0", Constants.RollForwardSetting.Minor, true, "6.1.1-preview.3")]
[InlineData("6.0.1-preview.0", Constants.RollForwardSetting.LatestPatch, false, ResolvedFramework.FailedToReconcile)]
[InlineData("6.1.0", null, false, "6.1.0")]
[InlineData("6.1.0", null, true, "6.1.1-preview.3")]
[InlineData("6.1.1-preview.0", null, false, "6.2.1")]
[InlineData("6.1.1-preview.0", null, true, "6.1.1-preview.2")]
[InlineData("6.1.1-preview.0", Constants.RollForwardSetting.Disable, false, ResolvedFramework.NotFound)]
[InlineData("6.1.1-preview.0", Constants.RollForwardSetting.LatestPatch, false, "6.1.1-preview.2")]
[InlineData("6.1.1-preview.2", Constants.RollForwardSetting.Disable, false, "6.1.1-preview.2")]
[InlineData("6.1.1-preview.2", Constants.RollForwardSetting.Disable, true, "6.1.1-preview.2")]
[InlineData("6.1.1-preview.2", Constants.RollForwardSetting.LatestPatch, false, "6.1.1-preview.2")]
[InlineData("6.1.1-preview.2", Constants.RollForwardSetting.LatestPatch, true, "6.1.1-preview.2")]
[InlineData("6.1.1-preview.2", null, false, "6.2.1")]
[InlineData("6.1.1-preview.2", null, true, "6.1.1-preview.2")]
[InlineData("6.1.1-preview.2", Constants.RollForwardSetting.Minor, false, "6.2.1")]
[InlineData("6.1.1-preview.2", Constants.RollForwardSetting.Minor, true, "6.1.1-preview.2")]
[InlineData("6.1.1-preview.2", Constants.RollForwardSetting.LatestMinor, false, "6.2.1")]
[InlineData("6.1.1-preview.2", Constants.RollForwardSetting.LatestMinor, true, "6.2.1")]
[InlineData("6.1.1-preview.2", Constants.RollForwardSetting.Major, false, "6.2.1")]
[InlineData("6.1.1-preview.2", Constants.RollForwardSetting.Major, true, "6.1.1-preview.2")]
[InlineData("6.1.1-preview.2", Constants.RollForwardSetting.LatestMajor, false, "6.2.1")]
[InlineData("6.1.1-preview.2", Constants.RollForwardSetting.LatestMajor, true, "6.2.1")]
[InlineData("6.2.1-preview.1", Constants.RollForwardSetting.Disable, false, ResolvedFramework.NotFound)]
[InlineData("6.2.1-preview.1", Constants.RollForwardSetting.LatestPatch, false, "6.2.1")]
[InlineData("6.2.1-preview.1", null, false, "6.2.1")]
[InlineData("6.2.1-preview.1", Constants.RollForwardSetting.Minor, false, "6.2.1")]
[InlineData("6.2.1-preview.1", Constants.RollForwardSetting.LatestMinor, false, "6.2.1")]
[InlineData("6.2.1-preview.1", Constants.RollForwardSetting.Major, false, "6.2.1")]
[InlineData("6.2.1-preview.1", Constants.RollForwardSetting.LatestMajor, false, "6.2.1")]
public void ReconcileFrameworkReferences_InnerFrameworkReference_Release(
string versionReference,
string rollForward,
bool rollForwardToPreRelease,
string resolvedFramework)
{
RunTest(
runtimeConfig => runtimeConfig
.WithFramework(MicrosoftNETCoreApp, "6.1.0")
.WithFramework(MiddleWare, "2.1.0"),
dotnetCustomizer => dotnetCustomizer.Framework(MiddleWare).RuntimeConfig(runtimeConfig =>
runtimeConfig.GetFramework(MicrosoftNETCoreApp)
.WithRollForward(rollForward)
.Version = versionReference),
rollForwardToPreRelease)
.ShouldHaveResolvedFrameworkOrFail(
MicrosoftNETCoreApp, resolvedFramework, versionReference, "6.1.0");
}
// Verify that inner framework reference 5.1.1 (defaults = RollForward:Minor)
// is correctly reconciled with app's framework reference (<fxRefVersion>, <rollForward>).
// App fx reference is lower.
[Theory] // fxRefVersion rollForward resolvedFramework
[InlineData("5.0.0", Constants.RollForwardSetting.Disable, ResolvedFramework.FailedToReconcile)]
[InlineData("5.1.1", Constants.RollForwardSetting.Disable, "5.1.1")]
[InlineData("5.1.3", Constants.RollForwardSetting.Disable, "5.1.3")]
[InlineData("5.0.0", Constants.RollForwardSetting.LatestPatch, ResolvedFramework.FailedToReconcile)]
[InlineData("5.1.0", Constants.RollForwardSetting.LatestPatch, "5.1.3")]
[InlineData("5.1.1", Constants.RollForwardSetting.LatestPatch, "5.1.3")]
[InlineData("5.0.0", null, "5.1.3")]
[InlineData("5.1.1", null, "5.1.3")]
[InlineData("5.0.0", Constants.RollForwardSetting.Minor, "5.1.3")]
[InlineData("5.1.1", Constants.RollForwardSetting.Minor, "5.1.3")]
[InlineData("5.0.0", Constants.RollForwardSetting.LatestMinor, "5.6.0")]
[InlineData("5.1.1", Constants.RollForwardSetting.LatestMinor, "5.6.0")]
[InlineData("1.0.0", Constants.RollForwardSetting.Minor, ResolvedFramework.FailedToReconcile)]
[InlineData("1.0.0", Constants.RollForwardSetting.Major, "5.1.3")]
[InlineData("5.1.1", Constants.RollForwardSetting.Major, "5.1.3")]
[InlineData("1.0.0", Constants.RollForwardSetting.LatestMajor, "5.6.0")]
[InlineData("5.1.1", Constants.RollForwardSetting.LatestMajor, "5.6.0")]
public void ReconcileFrameworkReferences_AppFrameworkReference_ToLower(
string versionReference,
string rollForward,
string resolvedFramework)
{
RunTest(
runtimeConfig => runtimeConfig
.WithFramework(MiddleWare, "2.1.0")
.WithFramework(new RuntimeConfig.Framework(MicrosoftNETCoreApp, versionReference)
.WithRollForward(rollForward)),
dotnetCustomizer => dotnetCustomizer.Framework(MiddleWare).RuntimeConfig(runtimeConfig =>
runtimeConfig.GetFramework(MicrosoftNETCoreApp)
.Version = "5.1.1"))
.ShouldHaveResolvedFrameworkOrFailedToReconcileFrameworkReference(
MicrosoftNETCoreApp, resolvedFramework, versionReference, "5.1.1");
}
// Verify that inner framework reference 5.1.1 (defaults = RollForward:Minor)
// is correctly reconciled with app's framework reference (<fxRefVersion>, <rollForward>).
// App fx reference is lower.
// In this case the direct reference from app is first, so the framework reference from app
// is actually resolved against the disk - and the resolved framework is than compared to
// the inner framework reference (potentially causing re-resolution).
[Theory] // fxRefVersion rollForward resolvedFramework
[InlineData("5.0.0", Constants.RollForwardSetting.Disable, ResolvedFramework.NotFound)]
[InlineData("5.1.1", Constants.RollForwardSetting.Disable, "5.1.1")]
[InlineData("5.1.3", Constants.RollForwardSetting.Disable, "5.1.3")]
[InlineData("5.0.0", Constants.RollForwardSetting.LatestPatch, ResolvedFramework.NotFound)]
[InlineData("5.1.0", Constants.RollForwardSetting.LatestPatch, "5.1.3")]
[InlineData("5.1.1", Constants.RollForwardSetting.LatestPatch, "5.1.3")]
[InlineData("5.0.0", null, "5.1.3")]
[InlineData("5.1.1", null, "5.1.3")]
[InlineData("5.0.0", Constants.RollForwardSetting.Minor, "5.1.3")]
[InlineData("5.1.1", Constants.RollForwardSetting.Minor, "5.1.3")]
[InlineData("5.0.0", Constants.RollForwardSetting.LatestMinor, "5.6.0")]
[InlineData("5.1.1", Constants.RollForwardSetting.LatestMinor, "5.6.0")]
[InlineData("1.0.0", Constants.RollForwardSetting.Minor, ResolvedFramework.NotFound)]
[InlineData("1.0.0", Constants.RollForwardSetting.Major, "5.1.3")]
[InlineData("1.0.0", Constants.RollForwardSetting.LatestMajor, "5.6.0")]
public void ReconcileFrameworkReferences_AppFrameworkReference_ToLower_HardResolve(
string versionReference,
string rollForward,
string resolvedFramework)
{
RunTest(
runtimeConfig => runtimeConfig
.WithFramework(new RuntimeConfig.Framework(MicrosoftNETCoreApp, versionReference)
.WithRollForward(rollForward))
.WithFramework(MiddleWare, "2.1.0"),
dotnetCustomizer => dotnetCustomizer.Framework(MiddleWare).RuntimeConfig(runtimeConfig =>
runtimeConfig.GetFramework(MicrosoftNETCoreApp)
.Version = "5.1.1"))
// Note that in this case (since the app reference is first) if the app's framework reference
// can't be resolved against the available frameworks, the error is actually a regular
// "can't find framework" and not a framework reconcile event.
.ShouldHaveResolvedFrameworkOrFailToFind(MicrosoftNETCoreApp, resolvedFramework);
}
// Verify that inner framework reference 5.1.1 (defaults = RollForward:Minor)
// is correctly reconciled with app's framework reference (<fxRefVersion>, <rollForward>).
// App fx reference is higher.
[Theory] // fxRefVersion rollForward resolvedFramework
[InlineData("5.4.0", null, "5.4.1")]
[InlineData("5.4.0", Constants.RollForwardSetting.Minor, "5.4.1")]
[InlineData("5.4.0", Constants.RollForwardSetting.LatestMinor, "5.6.0")]
[InlineData("5.4.0", Constants.RollForwardSetting.Major, "5.4.1")]
[InlineData("5.4.0", Constants.RollForwardSetting.LatestMajor, "5.6.0")]
[InlineData("5.4.1", Constants.RollForwardSetting.Disable, "5.4.1")]
[InlineData("6.0.0", Constants.RollForwardSetting.Minor, ResolvedFramework.FailedToReconcile)]
[InlineData("6.0.0", Constants.RollForwardSetting.Major, ResolvedFramework.FailedToReconcile)]
public void ReconcileFrameworkReferences_AppFrameworkReference_ToHigher(
string versionReference,
string rollForward,
string resolvedFramework)
{
RunTest(
runtimeConfig => runtimeConfig
.WithFramework(MiddleWare, "2.1.0")
.WithFramework(new RuntimeConfig.Framework(MicrosoftNETCoreApp, versionReference)
.WithRollForward(rollForward)),
dotnetCustomizer => dotnetCustomizer.Framework(MiddleWare).RuntimeConfig(runtimeConfig =>
runtimeConfig.GetFramework(MicrosoftNETCoreApp)
.Version = "5.1.1"))
.ShouldHaveResolvedFrameworkOrFailedToReconcileFrameworkReference(
MicrosoftNETCoreApp, resolvedFramework, "5.1.1", versionReference);
}
// Verify that inner framework reference 5.1.1 (defaults = RollForward:Minor)
// is correctly reconciled with app's framework reference (<fxRefVersion>, <rollForward>).
// App fx reference is higher.
// In this case the direct reference from app is first, so the framework reference from app
// is actually resolved against the disk - and the resolved framework is than compared to
// the inner framework reference (potentially causing re-resolution).
[Theory] // fxRefVersion rollForward resolvedFramework
[InlineData("5.4.0", null, "5.4.1")]
[InlineData("5.4.0", Constants.RollForwardSetting.Minor, "5.4.1")]
[InlineData("5.4.0", Constants.RollForwardSetting.LatestMinor, "5.6.0")]
[InlineData("5.4.0", Constants.RollForwardSetting.Major, "5.4.1")]
[InlineData("5.4.0", Constants.RollForwardSetting.LatestMajor, "5.6.0")]
[InlineData("5.4.1", Constants.RollForwardSetting.Disable, "5.4.1")]
[InlineData("6.0.0", Constants.RollForwardSetting.Minor, ResolvedFramework.FailedToReconcile)]
[InlineData("6.0.0", Constants.RollForwardSetting.Major, ResolvedFramework.FailedToReconcile)]
public void ReconcileFrameworkReferences_AppFrameworkReference_ToHigher_HardResolve(
string versionReference,
string rollForward,
string resolvedFramework)
{
RunTest(
runtimeConfig => runtimeConfig
.WithFramework(new RuntimeConfig.Framework(MicrosoftNETCoreApp, versionReference)
.WithRollForward(rollForward))
.WithFramework(MiddleWare, "2.1.0"),
dotnetCustomizer => dotnetCustomizer.Framework(MiddleWare).RuntimeConfig(runtimeConfig =>
runtimeConfig.GetFramework(MicrosoftNETCoreApp)
.Version = "5.1.1"))
.ShouldHaveResolvedFrameworkOrFailedToReconcileFrameworkReference(
MicrosoftNETCoreApp, resolvedFramework, "5.1.1", versionReference);
}
// Verify that inner framework reference 5.1.1 (defaults = RollForward:Minor)
// is correctly reconciled with another framework's framework reference (<fxRefVersion>, <rollForward>).
// The higher framework has fx reference with higher version.
[Theory] // fxRefVersion rollForward resolvedFramework
[InlineData("5.0.0", Constants.RollForwardSetting.Disable, ResolvedFramework.FailedToReconcile)]
[InlineData("5.1.1", Constants.RollForwardSetting.Disable, "5.1.1")]
[InlineData("5.0.0", Constants.RollForwardSetting.LatestPatch, ResolvedFramework.FailedToReconcile)]
[InlineData("5.1.0", Constants.RollForwardSetting.LatestPatch, "5.1.3")]
[InlineData("5.1.1", Constants.RollForwardSetting.LatestPatch, "5.1.3")]
[InlineData("5.0.0", null, "5.1.3")]
[InlineData("5.1.1", null, "5.1.3")]
[InlineData("5.0.0", Constants.RollForwardSetting.Minor, "5.1.3")]
[InlineData("5.1.1", Constants.RollForwardSetting.Minor, "5.1.3")]
[InlineData("5.0.0", Constants.RollForwardSetting.LatestMinor, "5.6.0")]
[InlineData("5.1.1", Constants.RollForwardSetting.LatestMinor, "5.6.0")]
[InlineData("1.0.0", Constants.RollForwardSetting.Minor, ResolvedFramework.FailedToReconcile)]
[InlineData("1.0.0", Constants.RollForwardSetting.Major, "5.1.3")]
[InlineData("1.0.0", Constants.RollForwardSetting.LatestMajor, "5.6.0")]
public void ReconcileFrameworkReferences_InnerToInnerFrameworkReference_ToLower(
string versionReference,
string rollForward,
string resolvedFramework)
{
RunTest(
runtimeConfig => runtimeConfig
.WithFramework(HighWare, "7.0.0"),
dotnetCustomizer =>
{
dotnetCustomizer.Framework(HighWare).RuntimeConfig(runtimeConfig =>
runtimeConfig.GetFramework(MicrosoftNETCoreApp)
.Version = "5.1.1");
dotnetCustomizer.Framework(MiddleWare).RuntimeConfig(runtimeConfig =>
runtimeConfig.GetFramework(MicrosoftNETCoreApp)
.WithRollForward(rollForward)
.Version = versionReference);
})
.ShouldHaveResolvedFrameworkOrFailedToReconcileFrameworkReference(
MicrosoftNETCoreApp, resolvedFramework, versionReference, "5.1.1");
}
// Verify that inner framework reference 5.1.1 (defaults = RollForward:Minor)
// is correctly reconciled with another framework's framework reference (<fxRefVersion>, <rollForward>).
// The higher framework has fx reference with lower version.
[Theory] // fxRefVersion rollForward resolvedFramework
[InlineData("5.1.3", Constants.RollForwardSetting.Disable, "5.1.3")]
[InlineData("5.4.0", null, "5.4.1")]
[InlineData("5.4.0", Constants.RollForwardSetting.Minor, "5.4.1")]
[InlineData("5.4.0", Constants.RollForwardSetting.LatestMinor, "5.6.0")]
[InlineData("5.4.0", Constants.RollForwardSetting.Major, "5.4.1")]
[InlineData("5.4.0", Constants.RollForwardSetting.LatestMajor, "5.6.0")]
[InlineData("5.4.1", Constants.RollForwardSetting.Disable, "5.4.1")]
[InlineData("6.0.0", Constants.RollForwardSetting.Minor, ResolvedFramework.FailedToReconcile)]
[InlineData("6.0.0", Constants.RollForwardSetting.Major, ResolvedFramework.FailedToReconcile)]
public void ReconcileFrameworkReferences_InnerToInnerFrameworkReference_ToHigher(
string versionReference,
string rollForward,
string resolvedFramework)
{
RunTest(
runtimeConfig => runtimeConfig
.WithFramework(HighWare, "7.0.0"),
dotnetCustomizer =>
{
dotnetCustomizer.Framework(HighWare).RuntimeConfig(runtimeConfig =>
runtimeConfig.GetFramework(MicrosoftNETCoreApp)
.Version = "5.1.1");
dotnetCustomizer.Framework(MiddleWare).RuntimeConfig(runtimeConfig =>
runtimeConfig.GetFramework(MicrosoftNETCoreApp)
.WithRollForward(rollForward)
.Version = versionReference);
})
.ShouldHaveResolvedFrameworkOrFailedToReconcileFrameworkReference(
MicrosoftNETCoreApp, resolvedFramework, "5.1.1", versionReference);
}
// This test:
// - Forces hard resolve of 5.1.1 -> 5.1.3 (direct reference from app)
// - Loads HighWare which has 5.4.1
// - This forces a retry since 5.1.3 was hard resolved, so we have reload with 5.4.1 instead
// - Loads MiddleWare which has 5.6.0
// - This forces a retry since by this time 5.4.1 was hard resolved, so we have to reload with 5.6.0 instead
[Fact]
public void FrameworkResolutionRetry_FrameworkChain()
{
RunTest(
runtimeConfig => runtimeConfig
.WithRollForward(Constants.RollForwardSetting.Major)
.WithFramework(MicrosoftNETCoreApp, "5.1.1")
.WithFramework(HighWare, "7.3.1"),
dotnetCustomizer =>
{
dotnetCustomizer.Framework(HighWare).RuntimeConfig(runtimeConfig =>
runtimeConfig.GetFramework(MicrosoftNETCoreApp)
.Version = "5.4.1");
dotnetCustomizer.Framework(MiddleWare).RuntimeConfig(runtimeConfig =>
runtimeConfig.GetFramework(MicrosoftNETCoreApp)
.Version = "5.6.0");
})
.Should().Pass()
.And.RestartedFrameworkResolution("5.1.1", "5.4.1")
.And.RestartedFrameworkResolution("5.4.1", "5.6.0")
.And.HaveResolvedFramework(MicrosoftNETCoreApp, "5.6.0");
}
// This test:
// - Forces hard resolve of 5.1.1 -> 5.1.3 (direct reference from app)
// - Loads MiddleWare which has 5.4.1
// - This forces a retry since 5.1.3 was hard resolved, so we have reload with 5.4.1 instead
// - Loads AnotherMiddleWare which has 5.6.0
// - This forces a retry since by this time 5.4.1 was hard resolved, so we have to reload with 5.6.0 instead
[Fact]
public void FrameworkResolutionRetry_FrameworkTree()
{
RunTest(
runtimeConfig => runtimeConfig
.WithRollForward(Constants.RollForwardSetting.Major)
.WithFramework(MicrosoftNETCoreApp, "5.1.1")
.WithFramework(MiddleWare, "2.1.2")
.WithFramework(AnotherMiddleWare, "3.0.0"),
dotnetCustomizer =>
{
dotnetCustomizer.Framework(MiddleWare).RuntimeConfig(runtimeConfig =>
runtimeConfig.GetFramework(MicrosoftNETCoreApp)
.Version = "5.4.1");
dotnetCustomizer.Framework(AnotherMiddleWare).RuntimeConfig(runtimeConfig =>
runtimeConfig.GetFramework(MicrosoftNETCoreApp)
.Version = "5.6.0");
})
.Should().Pass()
.And.RestartedFrameworkResolution("5.1.1", "5.4.1")
.And.RestartedFrameworkResolution("5.4.1", "5.6.0")
.And.HaveResolvedFramework(MicrosoftNETCoreApp, "5.6.0");
}
// Verifies that reconciling framework references correctly remembers whether it should prefer release versions or not.
[Theory]
[InlineData("6.0.0", "6.1.1-preview.0", "6.2.1")] // Release should prefer release even if there's a pre-release in the middle
[InlineData("6.1.0", "6.1.1-preview.0", "6.2.1")] // Release should prefer release even if there's a pre-release in the middle
[InlineData("6.1.1", "6.1.1-preview.0", "6.2.1")] // Release should prefer release even if there's a pre-release in the middle
[InlineData("6.0.0-preview.1", "6.1.1-preview.0", "6.1.1-preview.2")] // Both pre-relelase, take the closest even if it's pre-release
[InlineData("6.1.0-preview.0", "6.1.1", "6.2.1")] // Release should prefer release
[InlineData("6.1.1-preview.0", "6.1.0", "6.2.1")] // Release should prefer release
[InlineData("6.1.1-preview.0", "6.1.1", "6.2.1")] // Release should prefer release
public void PreferReleaseToRelease(string appVersionReference, string frameworkVersionReference, string resolvedFramework)
{
RunTest(
runtimeConfig => runtimeConfig
.WithFramework(MiddleWare, "2.1.2")
.WithFramework(MicrosoftNETCoreApp, appVersionReference),
dotnetCustomizer =>
{
dotnetCustomizer.Framework(MiddleWare).RuntimeConfig(runtimeConfig =>
runtimeConfig.GetFramework(MicrosoftNETCoreApp)
.Version = frameworkVersionReference);
})
.ShouldHaveResolvedFramework(MicrosoftNETCoreApp, resolvedFramework);
}
// Verify that inner framework reference (<fxRefVersion>, <fxRollForward>)
// is correctly reconciled with app's framework reference (<appRefVersion>, <appRollForward>).
// It then also tests it the other way round (as the result should not depend on which setting comes from FX and which from app)
// In this case the direct reference from app is first, so the framework reference from app
// is actually resolved against the disk - and the resolved framework is than compared to
// the inner framework reference (potentially causing re-resolution).
// This is mostly a collection of interesting cases as testing the full matrix is prohibitively large
[Theory] // appRefVersion appRollForward fxRefVersion fxRollForward resolvedFramework
// Disable + anything -> Disable
[InlineData("5.1.0", Constants.RollForwardSetting.Disable, "5.1.0", Constants.RollForwardSetting.Disable, ResolvedFramework.NotFound)]
[InlineData("5.1.0", Constants.RollForwardSetting.Disable, "5.1.0", Constants.RollForwardSetting.LatestPatch, ResolvedFramework.NotFound)]
[InlineData("5.1.0", Constants.RollForwardSetting.Disable, "5.1.0", Constants.RollForwardSetting.Minor, ResolvedFramework.NotFound)]
[InlineData("5.1.0", Constants.RollForwardSetting.Disable, "5.1.0", Constants.RollForwardSetting.LatestMinor, ResolvedFramework.NotFound)]
[InlineData("5.1.0", Constants.RollForwardSetting.Disable, "5.1.0", Constants.RollForwardSetting.Major, ResolvedFramework.NotFound)]
[InlineData("5.1.0", Constants.RollForwardSetting.Disable, "5.1.0", Constants.RollForwardSetting.LatestMajor, ResolvedFramework.NotFound)]
// Default - should apply normal Minor semantics
[InlineData("5.0.0", null, "5.0.0", null, "5.1.3")]
// Default + LatestPatch -> LatestPatch
[InlineData("5.0.0", null, "5.0.0", Constants.RollForwardSetting.LatestPatch, ResolvedFramework.NotFound)]
// Default + LatestMinor -> LatestMinor
[InlineData("5.0.0", null, "5.0.0", Constants.RollForwardSetting.LatestMinor, "5.6.0")]
// Default + Major -> Minor
[InlineData("5.0.0", null, "5.0.0", Constants.RollForwardSetting.Major, "5.1.3")]
// Default + LatestMajor -> LatestMinor
[InlineData("5.0.0", null, "5.0.0", Constants.RollForwardSetting.LatestMajor, "5.6.0")]
// LatestMinor + Major -> LatestMinor
[InlineData("5.0.0", Constants.RollForwardSetting.LatestMinor, "5.0.0", Constants.RollForwardSetting.Major, "5.6.0")]
// LatestMinor + LatestMajor -> LatestMinor
[InlineData("5.0.0", Constants.RollForwardSetting.LatestMinor, "5.0.0", Constants.RollForwardSetting.LatestMajor, "5.6.0")]
// LatestMajor + Major -> LatestMajor
[InlineData("4.0.0", Constants.RollForwardSetting.LatestMajor, "4.0.0", Constants.RollForwardSetting.Major, "6.2.1")]
// LatestMajor + Minor -> LatestMinor
[InlineData("4.0.0", Constants.RollForwardSetting.LatestMajor, "4.0.0", Constants.RollForwardSetting.Minor, ResolvedFramework.NotFound)]
[InlineData("5.0.0", Constants.RollForwardSetting.LatestMajor, "5.0.0", Constants.RollForwardSetting.Minor, "5.6.0")]
// LatestMinor + LatestPatch -> LatestPatch
[InlineData("5.1.0", Constants.RollForwardSetting.LatestMinor, "5.1.0", Constants.RollForwardSetting.LatestPatch, "5.1.3")]
[InlineData("5.0.0", Constants.RollForwardSetting.LatestMinor, "5.0.0", Constants.RollForwardSetting.LatestPatch, ResolvedFramework.NotFound)]
// LatestMajor + LatestPatch -> LatestPatch
[InlineData("5.1.0", Constants.RollForwardSetting.LatestMajor, "5.1.0", Constants.RollForwardSetting.LatestPatch, "5.1.3")]
[InlineData("5.0.0", Constants.RollForwardSetting.LatestMajor, "5.0.0", Constants.RollForwardSetting.LatestPatch, ResolvedFramework.NotFound)]
public void ReconcileFrameworkReferences_MergeRollForward(
string appVersionReference,
string appRollForward,
string fxVersionReference,
string fxRollForward,
string resolvedFramework)
{
RunTest(
runtimeConfig => runtimeConfig
.WithFramework(new RuntimeConfig.Framework(MicrosoftNETCoreApp, appVersionReference)
.WithRollForward(appRollForward))
.WithFramework(MiddleWare, "2.1.0"),
dotnetCustomizer => dotnetCustomizer.Framework(MiddleWare).RuntimeConfig(runtimeConfig =>
runtimeConfig.GetFramework(MicrosoftNETCoreApp)
.WithRollForward(fxRollForward)
.Version = fxVersionReference))
.ShouldHaveResolvedFrameworkOrFailToFind(MicrosoftNETCoreApp, resolvedFramework);
RunTest(
runtimeConfig => runtimeConfig
.WithFramework(new RuntimeConfig.Framework(MicrosoftNETCoreApp, fxVersionReference)
.WithRollForward(fxRollForward))
.WithFramework(MiddleWare, "2.1.0"),
dotnetCustomizer => dotnetCustomizer.Framework(MiddleWare).RuntimeConfig(runtimeConfig =>
runtimeConfig.GetFramework(MicrosoftNETCoreApp)
.WithRollForward(appRollForward)
.Version = appVersionReference))
.ShouldHaveResolvedFrameworkOrFailToFind(MicrosoftNETCoreApp, resolvedFramework);
}
// Verify that the "roll to highest version" flag is propagated into inner framework reference.
// The app references MiddleWare framework with the specified appRollForward setting
// then the MiddleWare framework references Microsoft.NETCore.App with the specified fxRefVersion and fxRollForward.
[Theory] // appRollForward fxRefVersion fxRollForward resolvedFramework
// LatestPatch does not imply roll_to_highest
[InlineData(Constants.RollForwardSetting.LatestPatch, "5.1.1", Constants.RollForwardSetting.Disable, "5.1.1")]
[InlineData(Constants.RollForwardSetting.LatestPatch, "5.1.0", Constants.RollForwardSetting.LatestPatch, "5.1.3")]
[InlineData(Constants.RollForwardSetting.LatestPatch, "5.1.0", null, "5.1.3")]
[InlineData(Constants.RollForwardSetting.LatestPatch, "5.1.0", Constants.RollForwardSetting.Minor, "5.1.3")]
[InlineData(Constants.RollForwardSetting.LatestPatch, "5.1.0", Constants.RollForwardSetting.Major, "5.1.3")]
[InlineData(Constants.RollForwardSetting.LatestPatch, "5.1.0", Constants.RollForwardSetting.LatestMajor, "6.2.1")]
[InlineData(Constants.RollForwardSetting.LatestPatch, "6.1.1-preview.1", Constants.RollForwardSetting.LatestPatch, "6.1.1-preview.2")]
[InlineData(Constants.RollForwardSetting.LatestPatch, "6.1.1-preview.2", Constants.RollForwardSetting.LatestPatch, "6.1.1-preview.2")]
// Minor/Major do not imply roll_to_highest
[InlineData(Constants.RollForwardSetting.Minor, "5.1.0", Constants.RollForwardSetting.Minor, "5.1.3")]
[InlineData(Constants.RollForwardSetting.Major, "5.1.0", Constants.RollForwardSetting.Minor, "5.1.3")]
[InlineData(Constants.RollForwardSetting.Minor, "6.1.1-preview.2", Constants.RollForwardSetting.Minor, "6.1.1-preview.2")]
[InlineData(Constants.RollForwardSetting.Minor, "6.1.1-preview.2", Constants.RollForwardSetting.LatestPatch, "6.1.1-preview.2")]
[InlineData(Constants.RollForwardSetting.Minor, "6.1.1-preview.1", Constants.RollForwardSetting.LatestPatch, "6.1.1-preview.2")]
[InlineData(Constants.RollForwardSetting.Major, "6.1.1-preview.2", Constants.RollForwardSetting.Major, "6.1.1-preview.2")]
[InlineData(Constants.RollForwardSetting.Major, "6.1.1-preview.2", Constants.RollForwardSetting.LatestPatch, "6.1.1-preview.2")]
[InlineData(Constants.RollForwardSetting.Major, "6.1.1-preview.1", Constants.RollForwardSetting.LatestPatch, "6.1.1-preview.2")]
// LatestMinor does imply roll_to_highest
[InlineData(Constants.RollForwardSetting.LatestMinor, "5.1.1", Constants.RollForwardSetting.Disable, "5.1.1")]
[InlineData(Constants.RollForwardSetting.LatestMinor, "5.1.0", Constants.RollForwardSetting.LatestPatch, "5.1.3")]
[InlineData(Constants.RollForwardSetting.LatestMinor, "5.1.0", null, "5.6.0")]
[InlineData(Constants.RollForwardSetting.LatestMinor, "5.1.0", Constants.RollForwardSetting.Minor, "5.6.0")]
[InlineData(Constants.RollForwardSetting.LatestMinor, "5.1.0", Constants.RollForwardSetting.Major, "6.2.1")]
[InlineData(Constants.RollForwardSetting.LatestMinor, "5.1.0", Constants.RollForwardSetting.LatestMajor, "6.2.1")]
// In this case the "roll to highest" should not impact the pre-release search since it should not have any influence on LatestPatch behavior
// which for pre-release versions is to pick the closest match - in this case the exact match exists, so it should pick that one.
[InlineData(Constants.RollForwardSetting.LatestMinor, "6.1.1-preview.2", Constants.RollForwardSetting.LatestPatch, "6.1.1-preview.2")]
[InlineData(Constants.RollForwardSetting.LatestMinor, "6.1.1-preview.1", Constants.RollForwardSetting.LatestPatch, "6.1.1-preview.2")]
// LatestMajor does imply roll_to_highest
[InlineData(Constants.RollForwardSetting.LatestMajor, "5.1.1", Constants.RollForwardSetting.Disable, "5.1.1")]
[InlineData(Constants.RollForwardSetting.LatestMajor, "5.1.0", Constants.RollForwardSetting.LatestPatch, "5.1.3")]
[InlineData(Constants.RollForwardSetting.LatestMajor, "5.1.0", null, "5.6.0")]
[InlineData(Constants.RollForwardSetting.LatestMajor, "5.1.0", Constants.RollForwardSetting.Minor, "5.6.0")]
[InlineData(Constants.RollForwardSetting.LatestMajor, "5.1.0", Constants.RollForwardSetting.Major, "6.2.1")]
[InlineData(Constants.RollForwardSetting.LatestMajor, "5.1.0", Constants.RollForwardSetting.LatestMajor, "6.2.1")]
// In this case the "roll to highest" should not impact the pre-release search since it should not have any influence on LatestPatch behavior
// which for pre-release versions is to pick the closest match - in this case the exact match exists, so it should pick that one.
[InlineData(Constants.RollForwardSetting.LatestMajor, "6.1.1-preview.2", Constants.RollForwardSetting.LatestPatch, "6.1.1-preview.2")]
[InlineData(Constants.RollForwardSetting.LatestMajor, "6.1.1-preview.1", Constants.RollForwardSetting.LatestPatch, "6.1.1-preview.2")]
public void PropagateRollToHighestVersion(string appRollForward, string fxRefVersion, string fxRollForward, string resolvedFramework)
{
RunTest(
runtimeConfig => runtimeConfig
.WithFramework(new RuntimeConfig.Framework(MiddleWare, "2.1.0")
.WithRollForward(appRollForward)),
dotnetCustomizer => dotnetCustomizer.Framework(MiddleWare).RuntimeConfig(runtimeConfig =>
runtimeConfig.GetFramework(MicrosoftNETCoreApp)
.WithRollForward(fxRollForward)
.Version = fxRefVersion))
.ShouldHaveResolvedFrameworkOrFailToFind(MicrosoftNETCoreApp, resolvedFramework);
}
// Verify that the "roll to highest version" flag is propagated across multiple layers of framework references.
// The app references HighWare framework, which in turn references MiddleWare framework which then references
// Microsoft.NETCore.App. Each level specify a roll forward option.
[Theory] // appRollForward fxRefVersion higherFxRollForward lowerFxRollForward resolvedFramework
[InlineData(Constants.RollForwardSetting.LatestPatch, "5.1.0", Constants.RollForwardSetting.Minor, Constants.RollForwardSetting.Minor, "5.1.3")]
[InlineData(Constants.RollForwardSetting.LatestMinor, "5.1.0", null, null, "5.6.0")]
[InlineData(Constants.RollForwardSetting.LatestMinor, "5.1.0", Constants.RollForwardSetting.Minor, Constants.RollForwardSetting.Minor, "5.6.0")]
[InlineData(Constants.RollForwardSetting.LatestMinor, "5.1.0", Constants.RollForwardSetting.LatestPatch, Constants.RollForwardSetting.LatestPatch, "5.1.3")]
[InlineData(Constants.RollForwardSetting.LatestMajor, "5.1.0", Constants.RollForwardSetting.Minor, Constants.RollForwardSetting.Minor, "5.6.0")]
[InlineData(Constants.RollForwardSetting.LatestMajor, "5.1.0", Constants.RollForwardSetting.LatestPatch, Constants.RollForwardSetting.LatestPatch, "5.1.3")]
public void PropagateRollToHighestVersionAcrossMultipleFrameworks(
string appRollForward,
string fxRefVersion,
string higherFxRollForward,
string lowerFxRollForward,
string resolvedFramework)
{
RunTest(
runtimeConfig => runtimeConfig
.WithFramework(new RuntimeConfig.Framework(HighWare, "7.3.1")
.WithRollForward(appRollForward)),
dotnetCustomizer =>
{
dotnetCustomizer.Framework(HighWare).RuntimeConfig(runtimeConfig =>
{
runtimeConfig.RemoveFramework(MicrosoftNETCoreApp);
runtimeConfig.GetFramework(MiddleWare)
.WithRollForward(higherFxRollForward);
});
dotnetCustomizer.Framework(MiddleWare).RuntimeConfig(runtimeConfig =>
runtimeConfig.GetFramework(MicrosoftNETCoreApp)
.WithRollForward(lowerFxRollForward)
.Version = fxRefVersion);
})
.ShouldHaveResolvedFrameworkOrFailToFind(MicrosoftNETCoreApp, resolvedFramework);
}
private CommandResult RunTest(
Func<RuntimeConfig, RuntimeConfig> runtimeConfig,
Action<DotNetCliExtensions.DotNetCliCustomizer> customizeDotNet = null,
bool rollForwardToPreRelease = false)
{
return RunTest(
SharedState.DotNetWithMultipleFrameworks,
SharedState.FrameworkReferenceApp,
new TestSettings()
.WithRuntimeConfigCustomizer(runtimeConfig)
.WithDotnetCustomizer(customizeDotNet)
.WithEnvironment(Constants.RollForwardToPreRelease.EnvironmentVariable, rollForwardToPreRelease ? "1" : "0"));
}
}
}
| -1 |
dotnet/runtime | 66,250 | use the name of the header when throwing exception for invalid chars | Fixes #65136 | pedrobsaila | 2022-03-05T20:18:11Z | 2022-03-08T16:15:18Z | 8fe2d2b0bbb7bec841c97372528531c071b29c16 | fa5dcda23c681031b5d2bd68482702baef80b836 | use the name of the header when throwing exception for invalid chars. Fixes #65136 | ./src/tests/JIT/jit64/valuetypes/nullable/box-unbox/value/box-unbox-value015.cs | // Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
// <Area> Nullable - Box-Unbox </Area>
// <Title> Nullable type with unbox box expr </Title>
// <Description>
// checking type of UIntPtr using is operator
// </Description>
// <RelatedBugs> </RelatedBugs>
//<Expects Status=success></Expects>
// <Code>
using System.Runtime.InteropServices;
using System;
internal class NullableTest
{
private static bool BoxUnboxToNQ(ValueType o)
{
return Helper.Compare((UIntPtr)o, Helper.Create(default(UIntPtr)));
}
private static bool BoxUnboxToQ(ValueType o)
{
return Helper.Compare((UIntPtr?)o, Helper.Create(default(UIntPtr)));
}
private static int Main()
{
UIntPtr? s = Helper.Create(default(UIntPtr));
if (BoxUnboxToNQ(s) && BoxUnboxToQ(s))
return ExitCode.Passed;
else
return ExitCode.Failed;
}
}
| // Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
// <Area> Nullable - Box-Unbox </Area>
// <Title> Nullable type with unbox box expr </Title>
// <Description>
// checking type of UIntPtr using is operator
// </Description>
// <RelatedBugs> </RelatedBugs>
//<Expects Status=success></Expects>
// <Code>
using System.Runtime.InteropServices;
using System;
internal class NullableTest
{
private static bool BoxUnboxToNQ(ValueType o)
{
return Helper.Compare((UIntPtr)o, Helper.Create(default(UIntPtr)));
}
private static bool BoxUnboxToQ(ValueType o)
{
return Helper.Compare((UIntPtr?)o, Helper.Create(default(UIntPtr)));
}
private static int Main()
{
UIntPtr? s = Helper.Create(default(UIntPtr));
if (BoxUnboxToNQ(s) && BoxUnboxToQ(s))
return ExitCode.Passed;
else
return ExitCode.Failed;
}
}
| -1 |
dotnet/runtime | 66,250 | use the name of the header when throwing exception for invalid chars | Fixes #65136 | pedrobsaila | 2022-03-05T20:18:11Z | 2022-03-08T16:15:18Z | 8fe2d2b0bbb7bec841c97372528531c071b29c16 | fa5dcda23c681031b5d2bd68482702baef80b836 | use the name of the header when throwing exception for invalid chars. Fixes #65136 | ./src/libraries/System.Net.Primitives/src/System/Net/Sockets/AddressFamily.cs | // Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
namespace System.Net.Sockets
{
/// <summary>Specifies the addressing scheme that an instance of the Socket class can use.</summary>
public enum AddressFamily
{
Unknown = -1, // Unknown
Unspecified = 0, // Unspecified
Unix = 1, // Local to host (pipes, portals)
InterNetwork = 2, // Internetwork: UDP, TCP, etc.
ImpLink = 3, // ARPAnet imp addresses
Pup = 4, // pup protocols: e.g. BSP
Chaos = 5, // MIT CHAOS protocols
NS = 6, // XEROX NS protocols
Ipx = NS, // IPX and SPX
Iso = 7, // ISO protocols
Osi = Iso, // OSI is ISO
Ecma = 8, // European Computer Manufacturers
DataKit = 9, // DataKit protocols
Ccitt = 10, // CCITT protocols, X.25 etc
Sna = 11, // IBM SNA
DecNet = 12, // DECnet
DataLink = 13, // Direct data link interface
Lat = 14, // LAT
HyperChannel = 15, // NSC Hyperchannel
AppleTalk = 16, // AppleTalk
NetBios = 17, // NetBios-style addresses
VoiceView = 18, // VoiceView
FireFox = 19, // FireFox
Banyan = 21, // Banyan
Atm = 22, // Native ATM Services
InterNetworkV6 = 23, // Internetwork Version 6
Cluster = 24, // Microsoft Wolfpack
Ieee12844 = 25, // IEEE 1284.4 WG AF
Irda = 26, // IrDA
NetworkDesigners = 28, // Network Designers OSI & gateway enabled protocols
Max = 29, // Max
// Unix specific values are past Uint16.MaxValue to avoid conflicts with Windows values.
// On Windows we pass values straight to OS and if we add new protocol supported by Windows,
// we should use actual OS value.
Packet = 65536, // Linux Packet
ControllerAreaNetwork = 65537, // Controller Area Network automotive bus protocol
}
}
| // Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
namespace System.Net.Sockets
{
/// <summary>Specifies the addressing scheme that an instance of the Socket class can use.</summary>
public enum AddressFamily
{
Unknown = -1, // Unknown
Unspecified = 0, // Unspecified
Unix = 1, // Local to host (pipes, portals)
InterNetwork = 2, // Internetwork: UDP, TCP, etc.
ImpLink = 3, // ARPAnet imp addresses
Pup = 4, // pup protocols: e.g. BSP
Chaos = 5, // MIT CHAOS protocols
NS = 6, // XEROX NS protocols
Ipx = NS, // IPX and SPX
Iso = 7, // ISO protocols
Osi = Iso, // OSI is ISO
Ecma = 8, // European Computer Manufacturers
DataKit = 9, // DataKit protocols
Ccitt = 10, // CCITT protocols, X.25 etc
Sna = 11, // IBM SNA
DecNet = 12, // DECnet
DataLink = 13, // Direct data link interface
Lat = 14, // LAT
HyperChannel = 15, // NSC Hyperchannel
AppleTalk = 16, // AppleTalk
NetBios = 17, // NetBios-style addresses
VoiceView = 18, // VoiceView
FireFox = 19, // FireFox
Banyan = 21, // Banyan
Atm = 22, // Native ATM Services
InterNetworkV6 = 23, // Internetwork Version 6
Cluster = 24, // Microsoft Wolfpack
Ieee12844 = 25, // IEEE 1284.4 WG AF
Irda = 26, // IrDA
NetworkDesigners = 28, // Network Designers OSI & gateway enabled protocols
Max = 29, // Max
// Unix specific values are past Uint16.MaxValue to avoid conflicts with Windows values.
// On Windows we pass values straight to OS and if we add new protocol supported by Windows,
// we should use actual OS value.
Packet = 65536, // Linux Packet
ControllerAreaNetwork = 65537, // Controller Area Network automotive bus protocol
}
}
| -1 |
dotnet/runtime | 66,250 | use the name of the header when throwing exception for invalid chars | Fixes #65136 | pedrobsaila | 2022-03-05T20:18:11Z | 2022-03-08T16:15:18Z | 8fe2d2b0bbb7bec841c97372528531c071b29c16 | fa5dcda23c681031b5d2bd68482702baef80b836 | use the name of the header when throwing exception for invalid chars. Fixes #65136 | ./src/libraries/System.Diagnostics.EventLog/tests/System/Diagnostics/Reader/EventLogPropertySelectorTests.cs | // Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
using System.Collections.Generic;
using System.Diagnostics.Eventing.Reader;
using Xunit;
namespace System.Diagnostics.Tests
{
public class EventLogPropertySelectorTests
{
[ConditionalFact(typeof(Helpers), nameof(Helpers.SupportsEventLogs))]
public void Ctor_NullPropertyQueries_Throws()
{
Assert.Throws<ArgumentNullException>(() => new EventLogPropertySelector(null));
}
[ConditionalFact(typeof(Helpers), nameof(Helpers.SupportsEventLogs))]
public void Ctor_NonEmptyPropertyQuery_Success()
{
IDictionary<string, string> dictionary = new SortedDictionary<string, string>() { ["key"] = "value" };
var selector = new EventLogPropertySelector(dictionary.Keys);
Assert.NotNull(selector);
selector.Dispose();
}
[ConditionalFact(typeof(Helpers), nameof(Helpers.SupportsEventLogs))]
public void GetPropertyValues_MatchProviderIdUsingProviderMetadata_Success()
{
Dictionary<string, Guid> providerNameAndIds = new Dictionary<string, Guid>();
string logName = "Application";
string queryString = "*[System/Level=4]";
var xPathEnum = new List<string>() { "Event/System/EventID", "Event/System/Provider/@Name" };
var logPropertyContext = new EventLogPropertySelector(xPathEnum);
var eventsQuery = new EventLogQuery(logName, PathType.LogName, queryString);
try
{
using (var logReader = new EventLogReader(eventsQuery))
{
for (EventLogRecord eventRecord = (EventLogRecord)logReader.ReadEvent();
eventRecord != null;
eventRecord = (EventLogRecord)logReader.ReadEvent())
{
IList<object> logEventProps;
logEventProps = eventRecord.GetPropertyValues(logPropertyContext);
int eventId;
Assert.True(int.TryParse(string.Format("{0}", logEventProps[0]), out eventId));
string providerName = (string)logEventProps[1];
if (!providerNameAndIds.ContainsKey(providerName) && eventRecord.ProviderId.HasValue)
{
providerNameAndIds.Add(providerName, eventRecord.ProviderId.Value);
}
}
}
}
catch (EventLogNotFoundException) { }
if (providerNameAndIds.Count > 0)
{
using (var session = new EventLogSession())
{
foreach (var nameAndId in providerNameAndIds)
{
ProviderMetadata providerMetadata = null;
try
{
providerMetadata = new ProviderMetadata(nameAndId.Key);
Assert.Equal(providerMetadata.Id, nameAndId.Value);
}
catch (EventLogException)
{
continue;
}
finally
{
providerMetadata?.Dispose();
}
}
}
}
}
}
}
| // Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
using System.Collections.Generic;
using System.Diagnostics.Eventing.Reader;
using Xunit;
namespace System.Diagnostics.Tests
{
public class EventLogPropertySelectorTests
{
[ConditionalFact(typeof(Helpers), nameof(Helpers.SupportsEventLogs))]
public void Ctor_NullPropertyQueries_Throws()
{
Assert.Throws<ArgumentNullException>(() => new EventLogPropertySelector(null));
}
[ConditionalFact(typeof(Helpers), nameof(Helpers.SupportsEventLogs))]
public void Ctor_NonEmptyPropertyQuery_Success()
{
IDictionary<string, string> dictionary = new SortedDictionary<string, string>() { ["key"] = "value" };
var selector = new EventLogPropertySelector(dictionary.Keys);
Assert.NotNull(selector);
selector.Dispose();
}
[ConditionalFact(typeof(Helpers), nameof(Helpers.SupportsEventLogs))]
public void GetPropertyValues_MatchProviderIdUsingProviderMetadata_Success()
{
Dictionary<string, Guid> providerNameAndIds = new Dictionary<string, Guid>();
string logName = "Application";
string queryString = "*[System/Level=4]";
var xPathEnum = new List<string>() { "Event/System/EventID", "Event/System/Provider/@Name" };
var logPropertyContext = new EventLogPropertySelector(xPathEnum);
var eventsQuery = new EventLogQuery(logName, PathType.LogName, queryString);
try
{
using (var logReader = new EventLogReader(eventsQuery))
{
for (EventLogRecord eventRecord = (EventLogRecord)logReader.ReadEvent();
eventRecord != null;
eventRecord = (EventLogRecord)logReader.ReadEvent())
{
IList<object> logEventProps;
logEventProps = eventRecord.GetPropertyValues(logPropertyContext);
int eventId;
Assert.True(int.TryParse(string.Format("{0}", logEventProps[0]), out eventId));
string providerName = (string)logEventProps[1];
if (!providerNameAndIds.ContainsKey(providerName) && eventRecord.ProviderId.HasValue)
{
providerNameAndIds.Add(providerName, eventRecord.ProviderId.Value);
}
}
}
}
catch (EventLogNotFoundException) { }
if (providerNameAndIds.Count > 0)
{
using (var session = new EventLogSession())
{
foreach (var nameAndId in providerNameAndIds)
{
ProviderMetadata providerMetadata = null;
try
{
providerMetadata = new ProviderMetadata(nameAndId.Key);
Assert.Equal(providerMetadata.Id, nameAndId.Value);
}
catch (EventLogException)
{
continue;
}
finally
{
providerMetadata?.Dispose();
}
}
}
}
}
}
}
| -1 |
dotnet/runtime | 66,250 | use the name of the header when throwing exception for invalid chars | Fixes #65136 | pedrobsaila | 2022-03-05T20:18:11Z | 2022-03-08T16:15:18Z | 8fe2d2b0bbb7bec841c97372528531c071b29c16 | fa5dcda23c681031b5d2bd68482702baef80b836 | use the name of the header when throwing exception for invalid chars. Fixes #65136 | ./src/libraries/System.Private.CoreLib/src/System/Buffers/Text/Utf8Formatter/Utf8Formatter.Date.O.cs | // Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
namespace System.Buffers.Text
{
public static partial class Utf8Formatter
{
//
// Roundtrippable format. One of
//
// 012345678901234567890123456789012
// ---------------------------------
// 2017-06-12T05:30:45.7680000-07:00
// 2017-06-12T05:30:45.7680000Z (Z is short for "+00:00" but also distinguishes DateTimeKind.Utc from DateTimeKind.Local)
// 2017-06-12T05:30:45.7680000 (interpreted as local time wrt to current time zone)
//
private static bool TryFormatDateTimeO(DateTime value, TimeSpan offset, Span<byte> destination, out int bytesWritten)
{
const int MinimumBytesNeeded = 27;
int bytesRequired = MinimumBytesNeeded;
DateTimeKind kind = DateTimeKind.Local;
if (offset == Utf8Constants.NullUtcOffset)
{
kind = value.Kind;
if (kind == DateTimeKind.Local)
{
offset = TimeZoneInfo.Local.GetUtcOffset(value);
bytesRequired += 6;
}
else if (kind == DateTimeKind.Utc)
{
bytesRequired++;
}
}
else
{
bytesRequired += 6;
}
if (destination.Length < bytesRequired)
{
bytesWritten = 0;
return false;
}
bytesWritten = bytesRequired;
// Hoist most of the bounds checks on buffer.
{ _ = destination[MinimumBytesNeeded - 1]; }
value.GetDate(out int year, out int month, out int day);
value.GetTimePrecise(out int hour, out int minute, out int second, out int ticks);
FormattingHelpers.WriteFourDecimalDigits((uint)year, destination, 0);
destination[4] = Utf8Constants.Minus;
FormattingHelpers.WriteTwoDecimalDigits((uint)month, destination, 5);
destination[7] = Utf8Constants.Minus;
FormattingHelpers.WriteTwoDecimalDigits((uint)day, destination, 8);
destination[10] = TimeMarker;
FormattingHelpers.WriteTwoDecimalDigits((uint)hour, destination, 11);
destination[13] = Utf8Constants.Colon;
FormattingHelpers.WriteTwoDecimalDigits((uint)minute, destination, 14);
destination[16] = Utf8Constants.Colon;
FormattingHelpers.WriteTwoDecimalDigits((uint)second, destination, 17);
destination[19] = Utf8Constants.Period;
FormattingHelpers.WriteDigits((uint)ticks, destination.Slice(20, 7));
if (kind == DateTimeKind.Local)
{
int offsetTotalMinutes = (int)(offset.Ticks / TimeSpan.TicksPerMinute);
byte sign;
if (offsetTotalMinutes < 0)
{
sign = Utf8Constants.Minus;
offsetTotalMinutes = -offsetTotalMinutes;
}
else
{
sign = Utf8Constants.Plus;
}
int offsetHours = Math.DivRem(offsetTotalMinutes, 60, out int offsetMinutes);
// Writing the value backward allows the JIT to optimize by
// performing a single bounds check against buffer.
FormattingHelpers.WriteTwoDecimalDigits((uint)offsetMinutes, destination, 31);
destination[30] = Utf8Constants.Colon;
FormattingHelpers.WriteTwoDecimalDigits((uint)offsetHours, destination, 28);
destination[27] = sign;
}
else if (kind == DateTimeKind.Utc)
{
destination[27] = UtcMarker;
}
return true;
}
}
}
| // Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
namespace System.Buffers.Text
{
public static partial class Utf8Formatter
{
//
// Roundtrippable format. One of
//
// 012345678901234567890123456789012
// ---------------------------------
// 2017-06-12T05:30:45.7680000-07:00
// 2017-06-12T05:30:45.7680000Z (Z is short for "+00:00" but also distinguishes DateTimeKind.Utc from DateTimeKind.Local)
// 2017-06-12T05:30:45.7680000 (interpreted as local time wrt to current time zone)
//
private static bool TryFormatDateTimeO(DateTime value, TimeSpan offset, Span<byte> destination, out int bytesWritten)
{
const int MinimumBytesNeeded = 27;
int bytesRequired = MinimumBytesNeeded;
DateTimeKind kind = DateTimeKind.Local;
if (offset == Utf8Constants.NullUtcOffset)
{
kind = value.Kind;
if (kind == DateTimeKind.Local)
{
offset = TimeZoneInfo.Local.GetUtcOffset(value);
bytesRequired += 6;
}
else if (kind == DateTimeKind.Utc)
{
bytesRequired++;
}
}
else
{
bytesRequired += 6;
}
if (destination.Length < bytesRequired)
{
bytesWritten = 0;
return false;
}
bytesWritten = bytesRequired;
// Hoist most of the bounds checks on buffer.
{ _ = destination[MinimumBytesNeeded - 1]; }
value.GetDate(out int year, out int month, out int day);
value.GetTimePrecise(out int hour, out int minute, out int second, out int ticks);
FormattingHelpers.WriteFourDecimalDigits((uint)year, destination, 0);
destination[4] = Utf8Constants.Minus;
FormattingHelpers.WriteTwoDecimalDigits((uint)month, destination, 5);
destination[7] = Utf8Constants.Minus;
FormattingHelpers.WriteTwoDecimalDigits((uint)day, destination, 8);
destination[10] = TimeMarker;
FormattingHelpers.WriteTwoDecimalDigits((uint)hour, destination, 11);
destination[13] = Utf8Constants.Colon;
FormattingHelpers.WriteTwoDecimalDigits((uint)minute, destination, 14);
destination[16] = Utf8Constants.Colon;
FormattingHelpers.WriteTwoDecimalDigits((uint)second, destination, 17);
destination[19] = Utf8Constants.Period;
FormattingHelpers.WriteDigits((uint)ticks, destination.Slice(20, 7));
if (kind == DateTimeKind.Local)
{
int offsetTotalMinutes = (int)(offset.Ticks / TimeSpan.TicksPerMinute);
byte sign;
if (offsetTotalMinutes < 0)
{
sign = Utf8Constants.Minus;
offsetTotalMinutes = -offsetTotalMinutes;
}
else
{
sign = Utf8Constants.Plus;
}
int offsetHours = Math.DivRem(offsetTotalMinutes, 60, out int offsetMinutes);
// Writing the value backward allows the JIT to optimize by
// performing a single bounds check against buffer.
FormattingHelpers.WriteTwoDecimalDigits((uint)offsetMinutes, destination, 31);
destination[30] = Utf8Constants.Colon;
FormattingHelpers.WriteTwoDecimalDigits((uint)offsetHours, destination, 28);
destination[27] = sign;
}
else if (kind == DateTimeKind.Utc)
{
destination[27] = UtcMarker;
}
return true;
}
}
}
| -1 |
dotnet/runtime | 66,250 | use the name of the header when throwing exception for invalid chars | Fixes #65136 | pedrobsaila | 2022-03-05T20:18:11Z | 2022-03-08T16:15:18Z | 8fe2d2b0bbb7bec841c97372528531c071b29c16 | fa5dcda23c681031b5d2bd68482702baef80b836 | use the name of the header when throwing exception for invalid chars. Fixes #65136 | ./src/libraries/Common/src/System/Text/ValueStringBuilder.cs | // Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
using System.Buffers;
using System.Diagnostics;
using System.Runtime.CompilerServices;
using System.Runtime.InteropServices;
namespace System.Text
{
internal ref partial struct ValueStringBuilder
{
private char[]? _arrayToReturnToPool;
private Span<char> _chars;
private int _pos;
public ValueStringBuilder(Span<char> initialBuffer)
{
_arrayToReturnToPool = null;
_chars = initialBuffer;
_pos = 0;
}
public ValueStringBuilder(int initialCapacity)
{
_arrayToReturnToPool = ArrayPool<char>.Shared.Rent(initialCapacity);
_chars = _arrayToReturnToPool;
_pos = 0;
}
public int Length
{
get => _pos;
set
{
Debug.Assert(value >= 0);
Debug.Assert(value <= _chars.Length);
_pos = value;
}
}
public int Capacity => _chars.Length;
public void EnsureCapacity(int capacity)
{
// This is not expected to be called this with negative capacity
Debug.Assert(capacity >= 0);
// If the caller has a bug and calls this with negative capacity, make sure to call Grow to throw an exception.
if ((uint)capacity > (uint)_chars.Length)
Grow(capacity - _pos);
}
/// <summary>
/// Get a pinnable reference to the builder.
/// Does not ensure there is a null char after <see cref="Length"/>
/// This overload is pattern matched in the C# 7.3+ compiler so you can omit
/// the explicit method call, and write eg "fixed (char* c = builder)"
/// </summary>
public ref char GetPinnableReference()
{
return ref MemoryMarshal.GetReference(_chars);
}
/// <summary>
/// Get a pinnable reference to the builder.
/// </summary>
/// <param name="terminate">Ensures that the builder has a null char after <see cref="Length"/></param>
public ref char GetPinnableReference(bool terminate)
{
if (terminate)
{
EnsureCapacity(Length + 1);
_chars[Length] = '\0';
}
return ref MemoryMarshal.GetReference(_chars);
}
public ref char this[int index]
{
get
{
Debug.Assert(index < _pos);
return ref _chars[index];
}
}
public override string ToString()
{
string s = _chars.Slice(0, _pos).ToString();
Dispose();
return s;
}
/// <summary>Returns the underlying storage of the builder.</summary>
public Span<char> RawChars => _chars;
/// <summary>
/// Returns a span around the contents of the builder.
/// </summary>
/// <param name="terminate">Ensures that the builder has a null char after <see cref="Length"/></param>
public ReadOnlySpan<char> AsSpan(bool terminate)
{
if (terminate)
{
EnsureCapacity(Length + 1);
_chars[Length] = '\0';
}
return _chars.Slice(0, _pos);
}
public ReadOnlySpan<char> AsSpan() => _chars.Slice(0, _pos);
public ReadOnlySpan<char> AsSpan(int start) => _chars.Slice(start, _pos - start);
public ReadOnlySpan<char> AsSpan(int start, int length) => _chars.Slice(start, length);
public bool TryCopyTo(Span<char> destination, out int charsWritten)
{
if (_chars.Slice(0, _pos).TryCopyTo(destination))
{
charsWritten = _pos;
Dispose();
return true;
}
else
{
charsWritten = 0;
Dispose();
return false;
}
}
public void Insert(int index, char value, int count)
{
if (_pos > _chars.Length - count)
{
Grow(count);
}
int remaining = _pos - index;
_chars.Slice(index, remaining).CopyTo(_chars.Slice(index + count));
_chars.Slice(index, count).Fill(value);
_pos += count;
}
public void Insert(int index, string? s)
{
if (s == null)
{
return;
}
int count = s.Length;
if (_pos > (_chars.Length - count))
{
Grow(count);
}
int remaining = _pos - index;
_chars.Slice(index, remaining).CopyTo(_chars.Slice(index + count));
s
#if !NETCOREAPP
.AsSpan()
#endif
.CopyTo(_chars.Slice(index));
_pos += count;
}
[MethodImpl(MethodImplOptions.AggressiveInlining)]
public void Append(char c)
{
int pos = _pos;
if ((uint)pos < (uint)_chars.Length)
{
_chars[pos] = c;
_pos = pos + 1;
}
else
{
GrowAndAppend(c);
}
}
[MethodImpl(MethodImplOptions.AggressiveInlining)]
public void Append(string? s)
{
if (s == null)
{
return;
}
int pos = _pos;
if (s.Length == 1 && (uint)pos < (uint)_chars.Length) // very common case, e.g. appending strings from NumberFormatInfo like separators, percent symbols, etc.
{
_chars[pos] = s[0];
_pos = pos + 1;
}
else
{
AppendSlow(s);
}
}
private void AppendSlow(string s)
{
int pos = _pos;
if (pos > _chars.Length - s.Length)
{
Grow(s.Length);
}
s
#if !NETCOREAPP
.AsSpan()
#endif
.CopyTo(_chars.Slice(pos));
_pos += s.Length;
}
public void Append(char c, int count)
{
if (_pos > _chars.Length - count)
{
Grow(count);
}
Span<char> dst = _chars.Slice(_pos, count);
for (int i = 0; i < dst.Length; i++)
{
dst[i] = c;
}
_pos += count;
}
public unsafe void Append(char* value, int length)
{
int pos = _pos;
if (pos > _chars.Length - length)
{
Grow(length);
}
Span<char> dst = _chars.Slice(_pos, length);
for (int i = 0; i < dst.Length; i++)
{
dst[i] = *value++;
}
_pos += length;
}
public void Append(ReadOnlySpan<char> value)
{
int pos = _pos;
if (pos > _chars.Length - value.Length)
{
Grow(value.Length);
}
value.CopyTo(_chars.Slice(_pos));
_pos += value.Length;
}
[MethodImpl(MethodImplOptions.AggressiveInlining)]
public Span<char> AppendSpan(int length)
{
int origPos = _pos;
if (origPos > _chars.Length - length)
{
Grow(length);
}
_pos = origPos + length;
return _chars.Slice(origPos, length);
}
[MethodImpl(MethodImplOptions.NoInlining)]
private void GrowAndAppend(char c)
{
Grow(1);
Append(c);
}
/// <summary>
/// Resize the internal buffer either by doubling current buffer size or
/// by adding <paramref name="additionalCapacityBeyondPos"/> to
/// <see cref="_pos"/> whichever is greater.
/// </summary>
/// <param name="additionalCapacityBeyondPos">
/// Number of chars requested beyond current position.
/// </param>
[MethodImpl(MethodImplOptions.NoInlining)]
private void Grow(int additionalCapacityBeyondPos)
{
Debug.Assert(additionalCapacityBeyondPos > 0);
Debug.Assert(_pos > _chars.Length - additionalCapacityBeyondPos, "Grow called incorrectly, no resize is needed.");
// Make sure to let Rent throw an exception if the caller has a bug and the desired capacity is negative
char[] poolArray = ArrayPool<char>.Shared.Rent((int)Math.Max((uint)(_pos + additionalCapacityBeyondPos), (uint)_chars.Length * 2));
_chars.Slice(0, _pos).CopyTo(poolArray);
char[]? toReturn = _arrayToReturnToPool;
_chars = _arrayToReturnToPool = poolArray;
if (toReturn != null)
{
ArrayPool<char>.Shared.Return(toReturn);
}
}
[MethodImpl(MethodImplOptions.AggressiveInlining)]
public void Dispose()
{
char[]? toReturn = _arrayToReturnToPool;
this = default; // for safety, to avoid using pooled array if this instance is erroneously appended to again
if (toReturn != null)
{
ArrayPool<char>.Shared.Return(toReturn);
}
}
}
}
| // Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
using System.Buffers;
using System.Diagnostics;
using System.Runtime.CompilerServices;
using System.Runtime.InteropServices;
namespace System.Text
{
internal ref partial struct ValueStringBuilder
{
private char[]? _arrayToReturnToPool;
private Span<char> _chars;
private int _pos;
public ValueStringBuilder(Span<char> initialBuffer)
{
_arrayToReturnToPool = null;
_chars = initialBuffer;
_pos = 0;
}
public ValueStringBuilder(int initialCapacity)
{
_arrayToReturnToPool = ArrayPool<char>.Shared.Rent(initialCapacity);
_chars = _arrayToReturnToPool;
_pos = 0;
}
public int Length
{
get => _pos;
set
{
Debug.Assert(value >= 0);
Debug.Assert(value <= _chars.Length);
_pos = value;
}
}
public int Capacity => _chars.Length;
public void EnsureCapacity(int capacity)
{
// This is not expected to be called this with negative capacity
Debug.Assert(capacity >= 0);
// If the caller has a bug and calls this with negative capacity, make sure to call Grow to throw an exception.
if ((uint)capacity > (uint)_chars.Length)
Grow(capacity - _pos);
}
/// <summary>
/// Get a pinnable reference to the builder.
/// Does not ensure there is a null char after <see cref="Length"/>
/// This overload is pattern matched in the C# 7.3+ compiler so you can omit
/// the explicit method call, and write eg "fixed (char* c = builder)"
/// </summary>
public ref char GetPinnableReference()
{
return ref MemoryMarshal.GetReference(_chars);
}
/// <summary>
/// Get a pinnable reference to the builder.
/// </summary>
/// <param name="terminate">Ensures that the builder has a null char after <see cref="Length"/></param>
public ref char GetPinnableReference(bool terminate)
{
if (terminate)
{
EnsureCapacity(Length + 1);
_chars[Length] = '\0';
}
return ref MemoryMarshal.GetReference(_chars);
}
public ref char this[int index]
{
get
{
Debug.Assert(index < _pos);
return ref _chars[index];
}
}
public override string ToString()
{
string s = _chars.Slice(0, _pos).ToString();
Dispose();
return s;
}
/// <summary>Returns the underlying storage of the builder.</summary>
public Span<char> RawChars => _chars;
/// <summary>
/// Returns a span around the contents of the builder.
/// </summary>
/// <param name="terminate">Ensures that the builder has a null char after <see cref="Length"/></param>
public ReadOnlySpan<char> AsSpan(bool terminate)
{
if (terminate)
{
EnsureCapacity(Length + 1);
_chars[Length] = '\0';
}
return _chars.Slice(0, _pos);
}
public ReadOnlySpan<char> AsSpan() => _chars.Slice(0, _pos);
public ReadOnlySpan<char> AsSpan(int start) => _chars.Slice(start, _pos - start);
public ReadOnlySpan<char> AsSpan(int start, int length) => _chars.Slice(start, length);
public bool TryCopyTo(Span<char> destination, out int charsWritten)
{
if (_chars.Slice(0, _pos).TryCopyTo(destination))
{
charsWritten = _pos;
Dispose();
return true;
}
else
{
charsWritten = 0;
Dispose();
return false;
}
}
public void Insert(int index, char value, int count)
{
if (_pos > _chars.Length - count)
{
Grow(count);
}
int remaining = _pos - index;
_chars.Slice(index, remaining).CopyTo(_chars.Slice(index + count));
_chars.Slice(index, count).Fill(value);
_pos += count;
}
public void Insert(int index, string? s)
{
if (s == null)
{
return;
}
int count = s.Length;
if (_pos > (_chars.Length - count))
{
Grow(count);
}
int remaining = _pos - index;
_chars.Slice(index, remaining).CopyTo(_chars.Slice(index + count));
s
#if !NETCOREAPP
.AsSpan()
#endif
.CopyTo(_chars.Slice(index));
_pos += count;
}
[MethodImpl(MethodImplOptions.AggressiveInlining)]
public void Append(char c)
{
int pos = _pos;
if ((uint)pos < (uint)_chars.Length)
{
_chars[pos] = c;
_pos = pos + 1;
}
else
{
GrowAndAppend(c);
}
}
[MethodImpl(MethodImplOptions.AggressiveInlining)]
public void Append(string? s)
{
if (s == null)
{
return;
}
int pos = _pos;
if (s.Length == 1 && (uint)pos < (uint)_chars.Length) // very common case, e.g. appending strings from NumberFormatInfo like separators, percent symbols, etc.
{
_chars[pos] = s[0];
_pos = pos + 1;
}
else
{
AppendSlow(s);
}
}
private void AppendSlow(string s)
{
int pos = _pos;
if (pos > _chars.Length - s.Length)
{
Grow(s.Length);
}
s
#if !NETCOREAPP
.AsSpan()
#endif
.CopyTo(_chars.Slice(pos));
_pos += s.Length;
}
public void Append(char c, int count)
{
if (_pos > _chars.Length - count)
{
Grow(count);
}
Span<char> dst = _chars.Slice(_pos, count);
for (int i = 0; i < dst.Length; i++)
{
dst[i] = c;
}
_pos += count;
}
public unsafe void Append(char* value, int length)
{
int pos = _pos;
if (pos > _chars.Length - length)
{
Grow(length);
}
Span<char> dst = _chars.Slice(_pos, length);
for (int i = 0; i < dst.Length; i++)
{
dst[i] = *value++;
}
_pos += length;
}
public void Append(ReadOnlySpan<char> value)
{
int pos = _pos;
if (pos > _chars.Length - value.Length)
{
Grow(value.Length);
}
value.CopyTo(_chars.Slice(_pos));
_pos += value.Length;
}
[MethodImpl(MethodImplOptions.AggressiveInlining)]
public Span<char> AppendSpan(int length)
{
int origPos = _pos;
if (origPos > _chars.Length - length)
{
Grow(length);
}
_pos = origPos + length;
return _chars.Slice(origPos, length);
}
[MethodImpl(MethodImplOptions.NoInlining)]
private void GrowAndAppend(char c)
{
Grow(1);
Append(c);
}
/// <summary>
/// Resize the internal buffer either by doubling current buffer size or
/// by adding <paramref name="additionalCapacityBeyondPos"/> to
/// <see cref="_pos"/> whichever is greater.
/// </summary>
/// <param name="additionalCapacityBeyondPos">
/// Number of chars requested beyond current position.
/// </param>
[MethodImpl(MethodImplOptions.NoInlining)]
private void Grow(int additionalCapacityBeyondPos)
{
Debug.Assert(additionalCapacityBeyondPos > 0);
Debug.Assert(_pos > _chars.Length - additionalCapacityBeyondPos, "Grow called incorrectly, no resize is needed.");
// Make sure to let Rent throw an exception if the caller has a bug and the desired capacity is negative
char[] poolArray = ArrayPool<char>.Shared.Rent((int)Math.Max((uint)(_pos + additionalCapacityBeyondPos), (uint)_chars.Length * 2));
_chars.Slice(0, _pos).CopyTo(poolArray);
char[]? toReturn = _arrayToReturnToPool;
_chars = _arrayToReturnToPool = poolArray;
if (toReturn != null)
{
ArrayPool<char>.Shared.Return(toReturn);
}
}
[MethodImpl(MethodImplOptions.AggressiveInlining)]
public void Dispose()
{
char[]? toReturn = _arrayToReturnToPool;
this = default; // for safety, to avoid using pooled array if this instance is erroneously appended to again
if (toReturn != null)
{
ArrayPool<char>.Shared.Return(toReturn);
}
}
}
}
| -1 |
dotnet/runtime | 66,250 | use the name of the header when throwing exception for invalid chars | Fixes #65136 | pedrobsaila | 2022-03-05T20:18:11Z | 2022-03-08T16:15:18Z | 8fe2d2b0bbb7bec841c97372528531c071b29c16 | fa5dcda23c681031b5d2bd68482702baef80b836 | use the name of the header when throwing exception for invalid chars. Fixes #65136 | ./src/coreclr/tools/Common/TypeSystem/IL/Stubs/ValueTypeGetFieldHelperMethodOverride.cs | // Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
using System.Collections.Generic;
using Internal.TypeSystem;
namespace Internal.IL.Stubs
{
/// <summary>
/// Synthetic method override of "int ValueType.__GetFieldHelper(Int32, out EETypePtr)". This method is injected
/// into all value types that cannot have their Equals(object) and GetHashCode() methods operate on individual
/// bytes. The purpose of the override is to provide access to the value types' fields and their types.
/// </summary>
public sealed partial class ValueTypeGetFieldHelperMethodOverride : ILStubMethod
{
private DefType _owningType;
private MethodSignature _signature;
internal ValueTypeGetFieldHelperMethodOverride(DefType owningType)
{
_owningType = owningType;
}
public override TypeSystemContext Context
{
get
{
return _owningType.Context;
}
}
public override TypeDesc OwningType
{
get
{
return _owningType;
}
}
public override MethodSignature Signature
{
get
{
if (_signature == null)
{
TypeSystemContext context = _owningType.Context;
TypeDesc int32Type = context.GetWellKnownType(WellKnownType.Int32);
TypeDesc eeTypePtrType = context.SystemModule.GetKnownType("System", "EETypePtr");
_signature = new MethodSignature(0, 0, int32Type, new[] {
int32Type,
eeTypePtrType.MakeByRefType()
});
}
return _signature;
}
}
public override MethodIL EmitIL()
{
TypeDesc owningType = _owningType.InstantiateAsOpen();
ILEmitter emitter = new ILEmitter();
TypeDesc eeTypePtrType = Context.SystemModule.GetKnownType("System", "EETypePtr");
MethodDesc eeTypePtrOfMethod = eeTypePtrType.GetKnownMethod("EETypePtrOf", null);
ILToken eeTypePtrToken = emitter.NewToken(eeTypePtrType);
var switchStream = emitter.NewCodeStream();
var getFieldStream = emitter.NewCodeStream();
ArrayBuilder<ILCodeLabel> fieldGetters = new ArrayBuilder<ILCodeLabel>();
foreach (FieldDesc field in owningType.GetFields())
{
if (field.IsStatic)
continue;
ILCodeLabel label = emitter.NewCodeLabel();
fieldGetters.Add(label);
getFieldStream.EmitLabel(label);
getFieldStream.EmitLdArg(2);
// We need something we can instantiate EETypePtrOf over. Also, the classlib
// code doesn't handle pointers.
TypeDesc boxableFieldType = field.FieldType;
if (boxableFieldType.IsPointer || boxableFieldType.IsFunctionPointer)
boxableFieldType = Context.GetWellKnownType(WellKnownType.IntPtr);
// The fact that the type is a reference type is sufficient for the callers.
// Don't unnecessarily create an MethodTable for the field type.
if (!boxableFieldType.IsSignatureVariable && !boxableFieldType.IsValueType)
boxableFieldType = Context.GetWellKnownType(WellKnownType.Object);
// If this is an enum, it's okay to Equals/GetHashCode the underlying type.
// Don't unnecessarily create an MethodTable for the enum.
boxableFieldType = boxableFieldType.UnderlyingType;
MethodDesc ptrOfField = eeTypePtrOfMethod.MakeInstantiatedMethod(boxableFieldType);
getFieldStream.Emit(ILOpcode.call, emitter.NewToken(ptrOfField));
getFieldStream.Emit(ILOpcode.stobj, eeTypePtrToken);
getFieldStream.EmitLdArg(0);
getFieldStream.Emit(ILOpcode.ldflda, emitter.NewToken(field));
getFieldStream.EmitLdArg(0);
getFieldStream.Emit(ILOpcode.sub);
getFieldStream.Emit(ILOpcode.ret);
}
if (fieldGetters.Count > 0)
{
switchStream.EmitLdArg(1);
switchStream.EmitSwitch(fieldGetters.ToArray());
}
switchStream.EmitLdc(fieldGetters.Count);
switchStream.Emit(ILOpcode.ret);
return emitter.Link(this);
}
public override Instantiation Instantiation
{
get
{
return Instantiation.Empty;
}
}
public override bool IsVirtual
{
get
{
return true;
}
}
public override string Name
{
get
{
return "__GetFieldHelper";
}
}
public override string DiagnosticName
{
get
{
return "__GetFieldHelper";
}
}
}
}
| // Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
using System.Collections.Generic;
using Internal.TypeSystem;
namespace Internal.IL.Stubs
{
/// <summary>
/// Synthetic method override of "int ValueType.__GetFieldHelper(Int32, out EETypePtr)". This method is injected
/// into all value types that cannot have their Equals(object) and GetHashCode() methods operate on individual
/// bytes. The purpose of the override is to provide access to the value types' fields and their types.
/// </summary>
public sealed partial class ValueTypeGetFieldHelperMethodOverride : ILStubMethod
{
private DefType _owningType;
private MethodSignature _signature;
internal ValueTypeGetFieldHelperMethodOverride(DefType owningType)
{
_owningType = owningType;
}
public override TypeSystemContext Context
{
get
{
return _owningType.Context;
}
}
public override TypeDesc OwningType
{
get
{
return _owningType;
}
}
public override MethodSignature Signature
{
get
{
if (_signature == null)
{
TypeSystemContext context = _owningType.Context;
TypeDesc int32Type = context.GetWellKnownType(WellKnownType.Int32);
TypeDesc eeTypePtrType = context.SystemModule.GetKnownType("System", "EETypePtr");
_signature = new MethodSignature(0, 0, int32Type, new[] {
int32Type,
eeTypePtrType.MakeByRefType()
});
}
return _signature;
}
}
public override MethodIL EmitIL()
{
TypeDesc owningType = _owningType.InstantiateAsOpen();
ILEmitter emitter = new ILEmitter();
TypeDesc eeTypePtrType = Context.SystemModule.GetKnownType("System", "EETypePtr");
MethodDesc eeTypePtrOfMethod = eeTypePtrType.GetKnownMethod("EETypePtrOf", null);
ILToken eeTypePtrToken = emitter.NewToken(eeTypePtrType);
var switchStream = emitter.NewCodeStream();
var getFieldStream = emitter.NewCodeStream();
ArrayBuilder<ILCodeLabel> fieldGetters = new ArrayBuilder<ILCodeLabel>();
foreach (FieldDesc field in owningType.GetFields())
{
if (field.IsStatic)
continue;
ILCodeLabel label = emitter.NewCodeLabel();
fieldGetters.Add(label);
getFieldStream.EmitLabel(label);
getFieldStream.EmitLdArg(2);
// We need something we can instantiate EETypePtrOf over. Also, the classlib
// code doesn't handle pointers.
TypeDesc boxableFieldType = field.FieldType;
if (boxableFieldType.IsPointer || boxableFieldType.IsFunctionPointer)
boxableFieldType = Context.GetWellKnownType(WellKnownType.IntPtr);
// The fact that the type is a reference type is sufficient for the callers.
// Don't unnecessarily create an MethodTable for the field type.
if (!boxableFieldType.IsSignatureVariable && !boxableFieldType.IsValueType)
boxableFieldType = Context.GetWellKnownType(WellKnownType.Object);
// If this is an enum, it's okay to Equals/GetHashCode the underlying type.
// Don't unnecessarily create an MethodTable for the enum.
boxableFieldType = boxableFieldType.UnderlyingType;
MethodDesc ptrOfField = eeTypePtrOfMethod.MakeInstantiatedMethod(boxableFieldType);
getFieldStream.Emit(ILOpcode.call, emitter.NewToken(ptrOfField));
getFieldStream.Emit(ILOpcode.stobj, eeTypePtrToken);
getFieldStream.EmitLdArg(0);
getFieldStream.Emit(ILOpcode.ldflda, emitter.NewToken(field));
getFieldStream.EmitLdArg(0);
getFieldStream.Emit(ILOpcode.sub);
getFieldStream.Emit(ILOpcode.ret);
}
if (fieldGetters.Count > 0)
{
switchStream.EmitLdArg(1);
switchStream.EmitSwitch(fieldGetters.ToArray());
}
switchStream.EmitLdc(fieldGetters.Count);
switchStream.Emit(ILOpcode.ret);
return emitter.Link(this);
}
public override Instantiation Instantiation
{
get
{
return Instantiation.Empty;
}
}
public override bool IsVirtual
{
get
{
return true;
}
}
public override string Name
{
get
{
return "__GetFieldHelper";
}
}
public override string DiagnosticName
{
get
{
return "__GetFieldHelper";
}
}
}
}
| -1 |
dotnet/runtime | 66,250 | use the name of the header when throwing exception for invalid chars | Fixes #65136 | pedrobsaila | 2022-03-05T20:18:11Z | 2022-03-08T16:15:18Z | 8fe2d2b0bbb7bec841c97372528531c071b29c16 | fa5dcda23c681031b5d2bd68482702baef80b836 | use the name of the header when throwing exception for invalid chars. Fixes #65136 | ./src/tests/JIT/HardwareIntrinsics/General/Vector128/EqualsAny.Int64.cs | // Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
/******************************************************************************
* This file is auto-generated from a template file by the GenerateTests.csx *
* script in tests\src\JIT\HardwareIntrinsics\X86\Shared. In order to make *
* changes, please update the corresponding template and run according to the *
* directions listed in the file. *
******************************************************************************/
using System;
using System.Runtime.CompilerServices;
using System.Runtime.InteropServices;
using System.Runtime.Intrinsics;
namespace JIT.HardwareIntrinsics.General
{
public static partial class Program
{
private static void EqualsAnyInt64()
{
var test = new VectorBooleanBinaryOpTest__EqualsAnyInt64();
// Validates basic functionality works, using Unsafe.Read
test.RunBasicScenario_UnsafeRead();
// Validates calling via reflection works, using Unsafe.Read
test.RunReflectionScenario_UnsafeRead();
// Validates passing a static member works
test.RunClsVarScenario();
// Validates passing a local works, using Unsafe.Read
test.RunLclVarScenario_UnsafeRead();
// Validates passing the field of a local class works
test.RunClassLclFldScenario();
// Validates passing an instance member of a class works
test.RunClassFldScenario();
// Validates passing the field of a local struct works
test.RunStructLclFldScenario();
// Validates passing an instance member of a struct works
test.RunStructFldScenario();
if (!test.Succeeded)
{
throw new Exception("One or more scenarios did not complete as expected.");
}
}
}
public sealed unsafe class VectorBooleanBinaryOpTest__EqualsAnyInt64
{
private struct DataTable
{
private byte[] inArray1;
private byte[] inArray2;
private GCHandle inHandle1;
private GCHandle inHandle2;
private ulong alignment;
public DataTable(Int64[] inArray1, Int64[] inArray2, int alignment)
{
int sizeOfinArray1 = inArray1.Length * Unsafe.SizeOf<Int64>();
int sizeOfinArray2 = inArray2.Length * Unsafe.SizeOf<Int64>();
if ((alignment != 32 && alignment != 16 && alignment != 8) || (alignment * 2) < sizeOfinArray1 || (alignment * 2) < sizeOfinArray2)
{
throw new ArgumentException("Invalid value of alignment");
}
this.inArray1 = new byte[alignment * 2];
this.inArray2 = new byte[alignment * 2];
this.inHandle1 = GCHandle.Alloc(this.inArray1, GCHandleType.Pinned);
this.inHandle2 = GCHandle.Alloc(this.inArray2, GCHandleType.Pinned);
this.alignment = (ulong)alignment;
Unsafe.CopyBlockUnaligned(ref Unsafe.AsRef<byte>(inArray1Ptr), ref Unsafe.As<Int64, byte>(ref inArray1[0]), (uint)sizeOfinArray1);
Unsafe.CopyBlockUnaligned(ref Unsafe.AsRef<byte>(inArray2Ptr), ref Unsafe.As<Int64, byte>(ref inArray2[0]), (uint)sizeOfinArray2);
}
public void* inArray1Ptr => Align((byte*)(inHandle1.AddrOfPinnedObject().ToPointer()), alignment);
public void* inArray2Ptr => Align((byte*)(inHandle2.AddrOfPinnedObject().ToPointer()), alignment);
public void Dispose()
{
inHandle1.Free();
inHandle2.Free();
}
private static unsafe void* Align(byte* buffer, ulong expectedAlignment)
{
return (void*)(((ulong)buffer + expectedAlignment - 1) & ~(expectedAlignment - 1));
}
}
private struct TestStruct
{
public Vector128<Int64> _fld1;
public Vector128<Int64> _fld2;
public static TestStruct Create()
{
var testStruct = new TestStruct();
for (var i = 0; i < Op1ElementCount; i++) { _data1[i] = TestLibrary.Generator.GetInt64(); }
Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector128<Int64>, byte>(ref testStruct._fld1), ref Unsafe.As<Int64, byte>(ref _data1[0]), (uint)Unsafe.SizeOf<Vector128<Int64>>());
for (var i = 0; i < Op2ElementCount; i++) { _data2[i] = TestLibrary.Generator.GetInt64(); }
Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector128<Int64>, byte>(ref testStruct._fld2), ref Unsafe.As<Int64, byte>(ref _data2[0]), (uint)Unsafe.SizeOf<Vector128<Int64>>());
return testStruct;
}
public void RunStructFldScenario(VectorBooleanBinaryOpTest__EqualsAnyInt64 testClass)
{
var result = Vector128.EqualsAny(_fld1, _fld2);
testClass.ValidateResult(_fld1, _fld2, result);
}
}
private static readonly int LargestVectorSize = 16;
private static readonly int Op1ElementCount = Unsafe.SizeOf<Vector128<Int64>>() / sizeof(Int64);
private static readonly int Op2ElementCount = Unsafe.SizeOf<Vector128<Int64>>() / sizeof(Int64);
private static Int64[] _data1 = new Int64[Op1ElementCount];
private static Int64[] _data2 = new Int64[Op2ElementCount];
private static Vector128<Int64> _clsVar1;
private static Vector128<Int64> _clsVar2;
private Vector128<Int64> _fld1;
private Vector128<Int64> _fld2;
private DataTable _dataTable;
static VectorBooleanBinaryOpTest__EqualsAnyInt64()
{
for (var i = 0; i < Op1ElementCount; i++) { _data1[i] = TestLibrary.Generator.GetInt64(); }
Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector128<Int64>, byte>(ref _clsVar1), ref Unsafe.As<Int64, byte>(ref _data1[0]), (uint)Unsafe.SizeOf<Vector128<Int64>>());
for (var i = 0; i < Op2ElementCount; i++) { _data2[i] = TestLibrary.Generator.GetInt64(); }
Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector128<Int64>, byte>(ref _clsVar2), ref Unsafe.As<Int64, byte>(ref _data2[0]), (uint)Unsafe.SizeOf<Vector128<Int64>>());
}
public VectorBooleanBinaryOpTest__EqualsAnyInt64()
{
Succeeded = true;
for (var i = 0; i < Op1ElementCount; i++) { _data1[i] = TestLibrary.Generator.GetInt64(); }
Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector128<Int64>, byte>(ref _fld1), ref Unsafe.As<Int64, byte>(ref _data1[0]), (uint)Unsafe.SizeOf<Vector128<Int64>>());
for (var i = 0; i < Op2ElementCount; i++) { _data2[i] = TestLibrary.Generator.GetInt64(); }
Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector128<Int64>, byte>(ref _fld2), ref Unsafe.As<Int64, byte>(ref _data2[0]), (uint)Unsafe.SizeOf<Vector128<Int64>>());
for (var i = 0; i < Op1ElementCount; i++) { _data1[i] = TestLibrary.Generator.GetInt64(); }
for (var i = 0; i < Op2ElementCount; i++) { _data2[i] = TestLibrary.Generator.GetInt64(); }
_dataTable = new DataTable(_data1, _data2, LargestVectorSize);
}
public bool Succeeded { get; set; }
public void RunBasicScenario_UnsafeRead()
{
TestLibrary.TestFramework.BeginScenario(nameof(RunBasicScenario_UnsafeRead));
var result = Vector128.EqualsAny(
Unsafe.Read<Vector128<Int64>>(_dataTable.inArray1Ptr),
Unsafe.Read<Vector128<Int64>>(_dataTable.inArray2Ptr)
);
ValidateResult(_dataTable.inArray1Ptr, _dataTable.inArray2Ptr, result);
}
public void RunReflectionScenario_UnsafeRead()
{
TestLibrary.TestFramework.BeginScenario(nameof(RunReflectionScenario_UnsafeRead));
var method = typeof(Vector128).GetMethod(nameof(Vector128.EqualsAny), new Type[] {
typeof(Vector128<Int64>),
typeof(Vector128<Int64>)
});
if (method is null)
{
method = typeof(Vector128).GetMethod(nameof(Vector128.EqualsAny), 1, new Type[] {
typeof(Vector128<>).MakeGenericType(Type.MakeGenericMethodParameter(0)),
typeof(Vector128<>).MakeGenericType(Type.MakeGenericMethodParameter(0))
});
}
if (method.IsGenericMethodDefinition)
{
method = method.MakeGenericMethod(typeof(Int64));
}
var result = method.Invoke(null, new object[] {
Unsafe.Read<Vector128<Int64>>(_dataTable.inArray1Ptr),
Unsafe.Read<Vector128<Int64>>(_dataTable.inArray2Ptr)
});
ValidateResult(_dataTable.inArray1Ptr, _dataTable.inArray2Ptr, (bool)(result));
}
public void RunClsVarScenario()
{
TestLibrary.TestFramework.BeginScenario(nameof(RunClsVarScenario));
var result = Vector128.EqualsAny(
_clsVar1,
_clsVar2
);
ValidateResult(_clsVar1, _clsVar2, result);
}
public void RunLclVarScenario_UnsafeRead()
{
TestLibrary.TestFramework.BeginScenario(nameof(RunLclVarScenario_UnsafeRead));
var op1 = Unsafe.Read<Vector128<Int64>>(_dataTable.inArray1Ptr);
var op2 = Unsafe.Read<Vector128<Int64>>(_dataTable.inArray2Ptr);
var result = Vector128.EqualsAny(op1, op2);
ValidateResult(op1, op2, result);
}
public void RunClassLclFldScenario()
{
TestLibrary.TestFramework.BeginScenario(nameof(RunClassLclFldScenario));
var test = new VectorBooleanBinaryOpTest__EqualsAnyInt64();
var result = Vector128.EqualsAny(test._fld1, test._fld2);
ValidateResult(test._fld1, test._fld2, result);
}
public void RunClassFldScenario()
{
TestLibrary.TestFramework.BeginScenario(nameof(RunClassFldScenario));
var result = Vector128.EqualsAny(_fld1, _fld2);
ValidateResult(_fld1, _fld2, result);
}
public void RunStructLclFldScenario()
{
TestLibrary.TestFramework.BeginScenario(nameof(RunStructLclFldScenario));
var test = TestStruct.Create();
var result = Vector128.EqualsAny(test._fld1, test._fld2);
ValidateResult(test._fld1, test._fld2, result);
}
public void RunStructFldScenario()
{
TestLibrary.TestFramework.BeginScenario(nameof(RunStructFldScenario));
var test = TestStruct.Create();
test.RunStructFldScenario(this);
}
private void ValidateResult(Vector128<Int64> op1, Vector128<Int64> op2, bool result, [CallerMemberName] string method = "")
{
Int64[] inArray1 = new Int64[Op1ElementCount];
Int64[] inArray2 = new Int64[Op2ElementCount];
Unsafe.WriteUnaligned(ref Unsafe.As<Int64, byte>(ref inArray1[0]), op1);
Unsafe.WriteUnaligned(ref Unsafe.As<Int64, byte>(ref inArray2[0]), op2);
ValidateResult(inArray1, inArray2, result, method);
}
private void ValidateResult(void* op1, void* op2, bool result, [CallerMemberName] string method = "")
{
Int64[] inArray1 = new Int64[Op1ElementCount];
Int64[] inArray2 = new Int64[Op2ElementCount];
Unsafe.CopyBlockUnaligned(ref Unsafe.As<Int64, byte>(ref inArray1[0]), ref Unsafe.AsRef<byte>(op1), (uint)Unsafe.SizeOf<Vector128<Int64>>());
Unsafe.CopyBlockUnaligned(ref Unsafe.As<Int64, byte>(ref inArray2[0]), ref Unsafe.AsRef<byte>(op2), (uint)Unsafe.SizeOf<Vector128<Int64>>());
ValidateResult(inArray1, inArray2, result, method);
}
private void ValidateResult(Int64[] left, Int64[] right, bool result, [CallerMemberName] string method = "")
{
bool succeeded = true;
var expectedResult = false;
for (var i = 0; i < Op1ElementCount; i++)
{
expectedResult |= (left[i] == right[i]);
}
succeeded = (expectedResult == result);
if (!succeeded)
{
TestLibrary.TestFramework.LogInformation($"{nameof(Vector128)}.{nameof(Vector128.EqualsAny)}<Int64>(Vector128<Int64>, Vector128<Int64>): {method} failed:");
TestLibrary.TestFramework.LogInformation($" left: ({string.Join(", ", left)})");
TestLibrary.TestFramework.LogInformation($" right: ({string.Join(", ", right)})");
TestLibrary.TestFramework.LogInformation($" result: ({result})");
TestLibrary.TestFramework.LogInformation(string.Empty);
Succeeded = false;
}
}
}
}
| // Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
/******************************************************************************
* This file is auto-generated from a template file by the GenerateTests.csx *
* script in tests\src\JIT\HardwareIntrinsics\X86\Shared. In order to make *
* changes, please update the corresponding template and run according to the *
* directions listed in the file. *
******************************************************************************/
using System;
using System.Runtime.CompilerServices;
using System.Runtime.InteropServices;
using System.Runtime.Intrinsics;
namespace JIT.HardwareIntrinsics.General
{
public static partial class Program
{
private static void EqualsAnyInt64()
{
var test = new VectorBooleanBinaryOpTest__EqualsAnyInt64();
// Validates basic functionality works, using Unsafe.Read
test.RunBasicScenario_UnsafeRead();
// Validates calling via reflection works, using Unsafe.Read
test.RunReflectionScenario_UnsafeRead();
// Validates passing a static member works
test.RunClsVarScenario();
// Validates passing a local works, using Unsafe.Read
test.RunLclVarScenario_UnsafeRead();
// Validates passing the field of a local class works
test.RunClassLclFldScenario();
// Validates passing an instance member of a class works
test.RunClassFldScenario();
// Validates passing the field of a local struct works
test.RunStructLclFldScenario();
// Validates passing an instance member of a struct works
test.RunStructFldScenario();
if (!test.Succeeded)
{
throw new Exception("One or more scenarios did not complete as expected.");
}
}
}
public sealed unsafe class VectorBooleanBinaryOpTest__EqualsAnyInt64
{
private struct DataTable
{
private byte[] inArray1;
private byte[] inArray2;
private GCHandle inHandle1;
private GCHandle inHandle2;
private ulong alignment;
public DataTable(Int64[] inArray1, Int64[] inArray2, int alignment)
{
int sizeOfinArray1 = inArray1.Length * Unsafe.SizeOf<Int64>();
int sizeOfinArray2 = inArray2.Length * Unsafe.SizeOf<Int64>();
if ((alignment != 32 && alignment != 16 && alignment != 8) || (alignment * 2) < sizeOfinArray1 || (alignment * 2) < sizeOfinArray2)
{
throw new ArgumentException("Invalid value of alignment");
}
this.inArray1 = new byte[alignment * 2];
this.inArray2 = new byte[alignment * 2];
this.inHandle1 = GCHandle.Alloc(this.inArray1, GCHandleType.Pinned);
this.inHandle2 = GCHandle.Alloc(this.inArray2, GCHandleType.Pinned);
this.alignment = (ulong)alignment;
Unsafe.CopyBlockUnaligned(ref Unsafe.AsRef<byte>(inArray1Ptr), ref Unsafe.As<Int64, byte>(ref inArray1[0]), (uint)sizeOfinArray1);
Unsafe.CopyBlockUnaligned(ref Unsafe.AsRef<byte>(inArray2Ptr), ref Unsafe.As<Int64, byte>(ref inArray2[0]), (uint)sizeOfinArray2);
}
public void* inArray1Ptr => Align((byte*)(inHandle1.AddrOfPinnedObject().ToPointer()), alignment);
public void* inArray2Ptr => Align((byte*)(inHandle2.AddrOfPinnedObject().ToPointer()), alignment);
public void Dispose()
{
inHandle1.Free();
inHandle2.Free();
}
private static unsafe void* Align(byte* buffer, ulong expectedAlignment)
{
return (void*)(((ulong)buffer + expectedAlignment - 1) & ~(expectedAlignment - 1));
}
}
private struct TestStruct
{
public Vector128<Int64> _fld1;
public Vector128<Int64> _fld2;
public static TestStruct Create()
{
var testStruct = new TestStruct();
for (var i = 0; i < Op1ElementCount; i++) { _data1[i] = TestLibrary.Generator.GetInt64(); }
Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector128<Int64>, byte>(ref testStruct._fld1), ref Unsafe.As<Int64, byte>(ref _data1[0]), (uint)Unsafe.SizeOf<Vector128<Int64>>());
for (var i = 0; i < Op2ElementCount; i++) { _data2[i] = TestLibrary.Generator.GetInt64(); }
Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector128<Int64>, byte>(ref testStruct._fld2), ref Unsafe.As<Int64, byte>(ref _data2[0]), (uint)Unsafe.SizeOf<Vector128<Int64>>());
return testStruct;
}
public void RunStructFldScenario(VectorBooleanBinaryOpTest__EqualsAnyInt64 testClass)
{
var result = Vector128.EqualsAny(_fld1, _fld2);
testClass.ValidateResult(_fld1, _fld2, result);
}
}
private static readonly int LargestVectorSize = 16;
private static readonly int Op1ElementCount = Unsafe.SizeOf<Vector128<Int64>>() / sizeof(Int64);
private static readonly int Op2ElementCount = Unsafe.SizeOf<Vector128<Int64>>() / sizeof(Int64);
private static Int64[] _data1 = new Int64[Op1ElementCount];
private static Int64[] _data2 = new Int64[Op2ElementCount];
private static Vector128<Int64> _clsVar1;
private static Vector128<Int64> _clsVar2;
private Vector128<Int64> _fld1;
private Vector128<Int64> _fld2;
private DataTable _dataTable;
static VectorBooleanBinaryOpTest__EqualsAnyInt64()
{
for (var i = 0; i < Op1ElementCount; i++) { _data1[i] = TestLibrary.Generator.GetInt64(); }
Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector128<Int64>, byte>(ref _clsVar1), ref Unsafe.As<Int64, byte>(ref _data1[0]), (uint)Unsafe.SizeOf<Vector128<Int64>>());
for (var i = 0; i < Op2ElementCount; i++) { _data2[i] = TestLibrary.Generator.GetInt64(); }
Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector128<Int64>, byte>(ref _clsVar2), ref Unsafe.As<Int64, byte>(ref _data2[0]), (uint)Unsafe.SizeOf<Vector128<Int64>>());
}
public VectorBooleanBinaryOpTest__EqualsAnyInt64()
{
Succeeded = true;
for (var i = 0; i < Op1ElementCount; i++) { _data1[i] = TestLibrary.Generator.GetInt64(); }
Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector128<Int64>, byte>(ref _fld1), ref Unsafe.As<Int64, byte>(ref _data1[0]), (uint)Unsafe.SizeOf<Vector128<Int64>>());
for (var i = 0; i < Op2ElementCount; i++) { _data2[i] = TestLibrary.Generator.GetInt64(); }
Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector128<Int64>, byte>(ref _fld2), ref Unsafe.As<Int64, byte>(ref _data2[0]), (uint)Unsafe.SizeOf<Vector128<Int64>>());
for (var i = 0; i < Op1ElementCount; i++) { _data1[i] = TestLibrary.Generator.GetInt64(); }
for (var i = 0; i < Op2ElementCount; i++) { _data2[i] = TestLibrary.Generator.GetInt64(); }
_dataTable = new DataTable(_data1, _data2, LargestVectorSize);
}
public bool Succeeded { get; set; }
public void RunBasicScenario_UnsafeRead()
{
TestLibrary.TestFramework.BeginScenario(nameof(RunBasicScenario_UnsafeRead));
var result = Vector128.EqualsAny(
Unsafe.Read<Vector128<Int64>>(_dataTable.inArray1Ptr),
Unsafe.Read<Vector128<Int64>>(_dataTable.inArray2Ptr)
);
ValidateResult(_dataTable.inArray1Ptr, _dataTable.inArray2Ptr, result);
}
public void RunReflectionScenario_UnsafeRead()
{
TestLibrary.TestFramework.BeginScenario(nameof(RunReflectionScenario_UnsafeRead));
var method = typeof(Vector128).GetMethod(nameof(Vector128.EqualsAny), new Type[] {
typeof(Vector128<Int64>),
typeof(Vector128<Int64>)
});
if (method is null)
{
method = typeof(Vector128).GetMethod(nameof(Vector128.EqualsAny), 1, new Type[] {
typeof(Vector128<>).MakeGenericType(Type.MakeGenericMethodParameter(0)),
typeof(Vector128<>).MakeGenericType(Type.MakeGenericMethodParameter(0))
});
}
if (method.IsGenericMethodDefinition)
{
method = method.MakeGenericMethod(typeof(Int64));
}
var result = method.Invoke(null, new object[] {
Unsafe.Read<Vector128<Int64>>(_dataTable.inArray1Ptr),
Unsafe.Read<Vector128<Int64>>(_dataTable.inArray2Ptr)
});
ValidateResult(_dataTable.inArray1Ptr, _dataTable.inArray2Ptr, (bool)(result));
}
public void RunClsVarScenario()
{
TestLibrary.TestFramework.BeginScenario(nameof(RunClsVarScenario));
var result = Vector128.EqualsAny(
_clsVar1,
_clsVar2
);
ValidateResult(_clsVar1, _clsVar2, result);
}
public void RunLclVarScenario_UnsafeRead()
{
TestLibrary.TestFramework.BeginScenario(nameof(RunLclVarScenario_UnsafeRead));
var op1 = Unsafe.Read<Vector128<Int64>>(_dataTable.inArray1Ptr);
var op2 = Unsafe.Read<Vector128<Int64>>(_dataTable.inArray2Ptr);
var result = Vector128.EqualsAny(op1, op2);
ValidateResult(op1, op2, result);
}
public void RunClassLclFldScenario()
{
TestLibrary.TestFramework.BeginScenario(nameof(RunClassLclFldScenario));
var test = new VectorBooleanBinaryOpTest__EqualsAnyInt64();
var result = Vector128.EqualsAny(test._fld1, test._fld2);
ValidateResult(test._fld1, test._fld2, result);
}
public void RunClassFldScenario()
{
TestLibrary.TestFramework.BeginScenario(nameof(RunClassFldScenario));
var result = Vector128.EqualsAny(_fld1, _fld2);
ValidateResult(_fld1, _fld2, result);
}
public void RunStructLclFldScenario()
{
TestLibrary.TestFramework.BeginScenario(nameof(RunStructLclFldScenario));
var test = TestStruct.Create();
var result = Vector128.EqualsAny(test._fld1, test._fld2);
ValidateResult(test._fld1, test._fld2, result);
}
public void RunStructFldScenario()
{
TestLibrary.TestFramework.BeginScenario(nameof(RunStructFldScenario));
var test = TestStruct.Create();
test.RunStructFldScenario(this);
}
private void ValidateResult(Vector128<Int64> op1, Vector128<Int64> op2, bool result, [CallerMemberName] string method = "")
{
Int64[] inArray1 = new Int64[Op1ElementCount];
Int64[] inArray2 = new Int64[Op2ElementCount];
Unsafe.WriteUnaligned(ref Unsafe.As<Int64, byte>(ref inArray1[0]), op1);
Unsafe.WriteUnaligned(ref Unsafe.As<Int64, byte>(ref inArray2[0]), op2);
ValidateResult(inArray1, inArray2, result, method);
}
private void ValidateResult(void* op1, void* op2, bool result, [CallerMemberName] string method = "")
{
Int64[] inArray1 = new Int64[Op1ElementCount];
Int64[] inArray2 = new Int64[Op2ElementCount];
Unsafe.CopyBlockUnaligned(ref Unsafe.As<Int64, byte>(ref inArray1[0]), ref Unsafe.AsRef<byte>(op1), (uint)Unsafe.SizeOf<Vector128<Int64>>());
Unsafe.CopyBlockUnaligned(ref Unsafe.As<Int64, byte>(ref inArray2[0]), ref Unsafe.AsRef<byte>(op2), (uint)Unsafe.SizeOf<Vector128<Int64>>());
ValidateResult(inArray1, inArray2, result, method);
}
private void ValidateResult(Int64[] left, Int64[] right, bool result, [CallerMemberName] string method = "")
{
bool succeeded = true;
var expectedResult = false;
for (var i = 0; i < Op1ElementCount; i++)
{
expectedResult |= (left[i] == right[i]);
}
succeeded = (expectedResult == result);
if (!succeeded)
{
TestLibrary.TestFramework.LogInformation($"{nameof(Vector128)}.{nameof(Vector128.EqualsAny)}<Int64>(Vector128<Int64>, Vector128<Int64>): {method} failed:");
TestLibrary.TestFramework.LogInformation($" left: ({string.Join(", ", left)})");
TestLibrary.TestFramework.LogInformation($" right: ({string.Join(", ", right)})");
TestLibrary.TestFramework.LogInformation($" result: ({result})");
TestLibrary.TestFramework.LogInformation(string.Empty);
Succeeded = false;
}
}
}
}
| -1 |
dotnet/runtime | 66,250 | use the name of the header when throwing exception for invalid chars | Fixes #65136 | pedrobsaila | 2022-03-05T20:18:11Z | 2022-03-08T16:15:18Z | 8fe2d2b0bbb7bec841c97372528531c071b29c16 | fa5dcda23c681031b5d2bd68482702baef80b836 | use the name of the header when throwing exception for invalid chars. Fixes #65136 | ./src/tests/JIT/HardwareIntrinsics/X86/Sse2/ConvertToInt32WithTruncation.Int32.cs | // Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
/******************************************************************************
* This file is auto-generated from a template file by the GenerateTests.csx *
* script in tests\src\JIT\HardwareIntrinsics\X86\Shared. In order to make *
* changes, please update the corresponding template and run according to the *
* directions listed in the file. *
******************************************************************************/
using System;
using System.Runtime.CompilerServices;
using System.Runtime.InteropServices;
using System.Runtime.Intrinsics;
using System.Runtime.Intrinsics.X86;
namespace JIT.HardwareIntrinsics.X86
{
public static partial class Program
{
private static void ConvertToInt32WithTruncationInt32Vector128Double()
{
var test = new SimdScalarUnaryOpConvertTest__ConvertToInt32WithTruncationInt32Vector128Double();
if (test.IsSupported && (true))
{
// Validates basic functionality works, using Unsafe.Read
test.RunBasicScenario_UnsafeRead();
if (Sse2.IsSupported)
{
// Validates basic functionality works, using Load
test.RunBasicScenario_Load();
// Validates basic functionality works, using LoadAligned
test.RunBasicScenario_LoadAligned();
}
// Validates calling via reflection works, using Unsafe.Read
test.RunReflectionScenario_UnsafeRead();
if (Sse2.IsSupported)
{
// Validates calling via reflection works, using Load
test.RunReflectionScenario_Load();
// Validates calling via reflection works, using LoadAligned
test.RunReflectionScenario_LoadAligned();
}
// Validates passing a static member works
test.RunClsVarScenario();
// Validates passing a local works, using Unsafe.Read
test.RunLclVarScenario_UnsafeRead();
if (Sse2.IsSupported)
{
// Validates passing a local works, using Load
test.RunLclVarScenario_Load();
// Validates passing a local works, using LoadAligned
test.RunLclVarScenario_LoadAligned();
}
// Validates passing the field of a local class works
test.RunClassLclFldScenario();
// Validates passing an instance member of a class works
test.RunClassFldScenario();
// Validates passing the field of a local struct works
test.RunStructLclFldScenario();
// Validates passing an instance member of a struct works
test.RunStructFldScenario();
}
else
{
// Validates we throw on unsupported hardware
test.RunUnsupportedScenario();
}
if (!test.Succeeded)
{
throw new Exception("One or more scenarios did not complete as expected.");
}
}
}
public sealed unsafe class SimdScalarUnaryOpConvertTest__ConvertToInt32WithTruncationInt32Vector128Double
{
private struct TestStruct
{
public Vector128<Double> _fld;
public static TestStruct Create()
{
var testStruct = new TestStruct();
for (var i = 0; i < Op1ElementCount; i++) { _data[i] = TestLibrary.Generator.GetDouble(); }
Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector128<Double>, byte>(ref testStruct._fld), ref Unsafe.As<Double, byte>(ref _data[0]), (uint)Unsafe.SizeOf<Vector128<Double>>());
return testStruct;
}
public void RunStructFldScenario(SimdScalarUnaryOpConvertTest__ConvertToInt32WithTruncationInt32Vector128Double testClass)
{
var result = Sse2.ConvertToInt32WithTruncation(_fld);
testClass.ValidateResult(_fld, result);
}
}
private static readonly int LargestVectorSize = 16;
private static readonly int Op1ElementCount = Unsafe.SizeOf<Vector128<Double>>() / sizeof(Double);
private static Double[] _data = new Double[Op1ElementCount];
private static Vector128<Double> _clsVar;
private Vector128<Double> _fld;
private SimdScalarUnaryOpTest__DataTable<Double> _dataTable;
static SimdScalarUnaryOpConvertTest__ConvertToInt32WithTruncationInt32Vector128Double()
{
for (var i = 0; i < Op1ElementCount; i++) { _data[i] = TestLibrary.Generator.GetDouble(); }
Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector128<Double>, byte>(ref _clsVar), ref Unsafe.As<Double, byte>(ref _data[0]), (uint)Unsafe.SizeOf<Vector128<Double>>());
}
public SimdScalarUnaryOpConvertTest__ConvertToInt32WithTruncationInt32Vector128Double()
{
Succeeded = true;
for (var i = 0; i < Op1ElementCount; i++) { _data[i] = TestLibrary.Generator.GetDouble(); }
Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector128<Double>, byte>(ref _fld), ref Unsafe.As<Double, byte>(ref _data[0]), (uint)Unsafe.SizeOf<Vector128<Double>>());
for (var i = 0; i < Op1ElementCount; i++) { _data[i] = TestLibrary.Generator.GetDouble(); }
_dataTable = new SimdScalarUnaryOpTest__DataTable<Double>(_data, LargestVectorSize);
}
public bool IsSupported => Sse2.IsSupported;
public bool Succeeded { get; set; }
public void RunBasicScenario_UnsafeRead()
{
TestLibrary.TestFramework.BeginScenario(nameof(RunBasicScenario_UnsafeRead));
var result = Sse2.ConvertToInt32WithTruncation(
Unsafe.Read<Vector128<Double>>(_dataTable.inArrayPtr)
);
ValidateResult(_dataTable.inArrayPtr, result);
}
public void RunBasicScenario_Load()
{
TestLibrary.TestFramework.BeginScenario(nameof(RunBasicScenario_Load));
var result = Sse2.ConvertToInt32WithTruncation(
Sse2.LoadVector128((Double*)(_dataTable.inArrayPtr))
);
ValidateResult(_dataTable.inArrayPtr, result);
}
public void RunBasicScenario_LoadAligned()
{
TestLibrary.TestFramework.BeginScenario(nameof(RunBasicScenario_LoadAligned));
var result = Sse2.ConvertToInt32WithTruncation(
Sse2.LoadAlignedVector128((Double*)(_dataTable.inArrayPtr))
);
ValidateResult(_dataTable.inArrayPtr, result);
}
public void RunReflectionScenario_UnsafeRead()
{
TestLibrary.TestFramework.BeginScenario(nameof(RunReflectionScenario_UnsafeRead));
var result = typeof(Sse2).GetMethod(nameof(Sse2.ConvertToInt32WithTruncation), new Type[] { typeof(Vector128<Double>) })
.Invoke(null, new object[] {
Unsafe.Read<Vector128<Double>>(_dataTable.inArrayPtr)
});
ValidateResult(_dataTable.inArrayPtr, (Int32)(result));
}
public void RunReflectionScenario_Load()
{
TestLibrary.TestFramework.BeginScenario(nameof(RunReflectionScenario_Load));
var result = typeof(Sse2).GetMethod(nameof(Sse2.ConvertToInt32WithTruncation), new Type[] { typeof(Vector128<Double>) })
.Invoke(null, new object[] {
Sse2.LoadVector128((Double*)(_dataTable.inArrayPtr))
});
ValidateResult(_dataTable.inArrayPtr, (Int32)(result));
}
public void RunReflectionScenario_LoadAligned()
{
TestLibrary.TestFramework.BeginScenario(nameof(RunReflectionScenario_LoadAligned));
var result = typeof(Sse2).GetMethod(nameof(Sse2.ConvertToInt32WithTruncation), new Type[] { typeof(Vector128<Double>) })
.Invoke(null, new object[] {
Sse2.LoadAlignedVector128((Double*)(_dataTable.inArrayPtr))
});
ValidateResult(_dataTable.inArrayPtr, (Int32)(result));
}
public void RunClsVarScenario()
{
TestLibrary.TestFramework.BeginScenario(nameof(RunClsVarScenario));
var result = Sse2.ConvertToInt32WithTruncation(
_clsVar
);
ValidateResult(_clsVar, result);
}
public void RunLclVarScenario_UnsafeRead()
{
TestLibrary.TestFramework.BeginScenario(nameof(RunLclVarScenario_UnsafeRead));
var firstOp = Unsafe.Read<Vector128<Double>>(_dataTable.inArrayPtr);
var result = Sse2.ConvertToInt32WithTruncation(firstOp);
ValidateResult(firstOp, result);
}
public void RunLclVarScenario_Load()
{
TestLibrary.TestFramework.BeginScenario(nameof(RunLclVarScenario_Load));
var firstOp = Sse2.LoadVector128((Double*)(_dataTable.inArrayPtr));
var result = Sse2.ConvertToInt32WithTruncation(firstOp);
ValidateResult(firstOp, result);
}
public void RunLclVarScenario_LoadAligned()
{
TestLibrary.TestFramework.BeginScenario(nameof(RunLclVarScenario_LoadAligned));
var firstOp = Sse2.LoadAlignedVector128((Double*)(_dataTable.inArrayPtr));
var result = Sse2.ConvertToInt32WithTruncation(firstOp);
ValidateResult(firstOp, result);
}
public void RunClassLclFldScenario()
{
TestLibrary.TestFramework.BeginScenario(nameof(RunClassLclFldScenario));
var test = new SimdScalarUnaryOpConvertTest__ConvertToInt32WithTruncationInt32Vector128Double();
var result = Sse2.ConvertToInt32WithTruncation(test._fld);
ValidateResult(test._fld, result);
}
public void RunClassFldScenario()
{
TestLibrary.TestFramework.BeginScenario(nameof(RunClassFldScenario));
var result = Sse2.ConvertToInt32WithTruncation(_fld);
ValidateResult(_fld, result);
}
public void RunStructLclFldScenario()
{
TestLibrary.TestFramework.BeginScenario(nameof(RunStructLclFldScenario));
var test = TestStruct.Create();
var result = Sse2.ConvertToInt32WithTruncation(test._fld);
ValidateResult(test._fld, result);
}
public void RunStructFldScenario()
{
TestLibrary.TestFramework.BeginScenario(nameof(RunStructFldScenario));
var test = TestStruct.Create();
test.RunStructFldScenario(this);
}
public void RunUnsupportedScenario()
{
TestLibrary.TestFramework.BeginScenario(nameof(RunUnsupportedScenario));
Succeeded = false;
try
{
RunBasicScenario_UnsafeRead();
}
catch (PlatformNotSupportedException)
{
Succeeded = true;
}
}
private void ValidateResult(Vector128<Double> firstOp, Int32 result, [CallerMemberName] string method = "")
{
Double[] inArray = new Double[Op1ElementCount];
Unsafe.WriteUnaligned(ref Unsafe.As<Double, byte>(ref inArray[0]), firstOp);
ValidateResult(inArray, result, method);
}
private void ValidateResult(void* firstOp, Int32 result, [CallerMemberName] string method = "")
{
Double[] inArray = new Double[Op1ElementCount];
Unsafe.CopyBlockUnaligned(ref Unsafe.As<Double, byte>(ref inArray[0]), ref Unsafe.AsRef<byte>(firstOp), (uint)Unsafe.SizeOf<Vector128<Double>>());
ValidateResult(inArray, result, method);
}
private void ValidateResult(Double[] firstOp, Int32 result, [CallerMemberName] string method = "")
{
if ((int) firstOp[0] != result)
{
Succeeded = false;
}
if (!Succeeded)
{
TestLibrary.TestFramework.LogInformation($"{nameof(Sse2)}.{nameof(Sse2.ConvertToInt32WithTruncation)}<Int32>(Vector128<Double>): {method} failed:");
TestLibrary.TestFramework.LogInformation($" firstOp: ({string.Join(", ", firstOp)})");
TestLibrary.TestFramework.LogInformation($" result: result");
TestLibrary.TestFramework.LogInformation(string.Empty);
}
}
}
}
| // Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
/******************************************************************************
* This file is auto-generated from a template file by the GenerateTests.csx *
* script in tests\src\JIT\HardwareIntrinsics\X86\Shared. In order to make *
* changes, please update the corresponding template and run according to the *
* directions listed in the file. *
******************************************************************************/
using System;
using System.Runtime.CompilerServices;
using System.Runtime.InteropServices;
using System.Runtime.Intrinsics;
using System.Runtime.Intrinsics.X86;
namespace JIT.HardwareIntrinsics.X86
{
public static partial class Program
{
private static void ConvertToInt32WithTruncationInt32Vector128Double()
{
var test = new SimdScalarUnaryOpConvertTest__ConvertToInt32WithTruncationInt32Vector128Double();
if (test.IsSupported && (true))
{
// Validates basic functionality works, using Unsafe.Read
test.RunBasicScenario_UnsafeRead();
if (Sse2.IsSupported)
{
// Validates basic functionality works, using Load
test.RunBasicScenario_Load();
// Validates basic functionality works, using LoadAligned
test.RunBasicScenario_LoadAligned();
}
// Validates calling via reflection works, using Unsafe.Read
test.RunReflectionScenario_UnsafeRead();
if (Sse2.IsSupported)
{
// Validates calling via reflection works, using Load
test.RunReflectionScenario_Load();
// Validates calling via reflection works, using LoadAligned
test.RunReflectionScenario_LoadAligned();
}
// Validates passing a static member works
test.RunClsVarScenario();
// Validates passing a local works, using Unsafe.Read
test.RunLclVarScenario_UnsafeRead();
if (Sse2.IsSupported)
{
// Validates passing a local works, using Load
test.RunLclVarScenario_Load();
// Validates passing a local works, using LoadAligned
test.RunLclVarScenario_LoadAligned();
}
// Validates passing the field of a local class works
test.RunClassLclFldScenario();
// Validates passing an instance member of a class works
test.RunClassFldScenario();
// Validates passing the field of a local struct works
test.RunStructLclFldScenario();
// Validates passing an instance member of a struct works
test.RunStructFldScenario();
}
else
{
// Validates we throw on unsupported hardware
test.RunUnsupportedScenario();
}
if (!test.Succeeded)
{
throw new Exception("One or more scenarios did not complete as expected.");
}
}
}
public sealed unsafe class SimdScalarUnaryOpConvertTest__ConvertToInt32WithTruncationInt32Vector128Double
{
private struct TestStruct
{
public Vector128<Double> _fld;
public static TestStruct Create()
{
var testStruct = new TestStruct();
for (var i = 0; i < Op1ElementCount; i++) { _data[i] = TestLibrary.Generator.GetDouble(); }
Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector128<Double>, byte>(ref testStruct._fld), ref Unsafe.As<Double, byte>(ref _data[0]), (uint)Unsafe.SizeOf<Vector128<Double>>());
return testStruct;
}
public void RunStructFldScenario(SimdScalarUnaryOpConvertTest__ConvertToInt32WithTruncationInt32Vector128Double testClass)
{
var result = Sse2.ConvertToInt32WithTruncation(_fld);
testClass.ValidateResult(_fld, result);
}
}
private static readonly int LargestVectorSize = 16;
private static readonly int Op1ElementCount = Unsafe.SizeOf<Vector128<Double>>() / sizeof(Double);
private static Double[] _data = new Double[Op1ElementCount];
private static Vector128<Double> _clsVar;
private Vector128<Double> _fld;
private SimdScalarUnaryOpTest__DataTable<Double> _dataTable;
static SimdScalarUnaryOpConvertTest__ConvertToInt32WithTruncationInt32Vector128Double()
{
for (var i = 0; i < Op1ElementCount; i++) { _data[i] = TestLibrary.Generator.GetDouble(); }
Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector128<Double>, byte>(ref _clsVar), ref Unsafe.As<Double, byte>(ref _data[0]), (uint)Unsafe.SizeOf<Vector128<Double>>());
}
public SimdScalarUnaryOpConvertTest__ConvertToInt32WithTruncationInt32Vector128Double()
{
Succeeded = true;
for (var i = 0; i < Op1ElementCount; i++) { _data[i] = TestLibrary.Generator.GetDouble(); }
Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector128<Double>, byte>(ref _fld), ref Unsafe.As<Double, byte>(ref _data[0]), (uint)Unsafe.SizeOf<Vector128<Double>>());
for (var i = 0; i < Op1ElementCount; i++) { _data[i] = TestLibrary.Generator.GetDouble(); }
_dataTable = new SimdScalarUnaryOpTest__DataTable<Double>(_data, LargestVectorSize);
}
public bool IsSupported => Sse2.IsSupported;
public bool Succeeded { get; set; }
public void RunBasicScenario_UnsafeRead()
{
TestLibrary.TestFramework.BeginScenario(nameof(RunBasicScenario_UnsafeRead));
var result = Sse2.ConvertToInt32WithTruncation(
Unsafe.Read<Vector128<Double>>(_dataTable.inArrayPtr)
);
ValidateResult(_dataTable.inArrayPtr, result);
}
public void RunBasicScenario_Load()
{
TestLibrary.TestFramework.BeginScenario(nameof(RunBasicScenario_Load));
var result = Sse2.ConvertToInt32WithTruncation(
Sse2.LoadVector128((Double*)(_dataTable.inArrayPtr))
);
ValidateResult(_dataTable.inArrayPtr, result);
}
public void RunBasicScenario_LoadAligned()
{
TestLibrary.TestFramework.BeginScenario(nameof(RunBasicScenario_LoadAligned));
var result = Sse2.ConvertToInt32WithTruncation(
Sse2.LoadAlignedVector128((Double*)(_dataTable.inArrayPtr))
);
ValidateResult(_dataTable.inArrayPtr, result);
}
public void RunReflectionScenario_UnsafeRead()
{
TestLibrary.TestFramework.BeginScenario(nameof(RunReflectionScenario_UnsafeRead));
var result = typeof(Sse2).GetMethod(nameof(Sse2.ConvertToInt32WithTruncation), new Type[] { typeof(Vector128<Double>) })
.Invoke(null, new object[] {
Unsafe.Read<Vector128<Double>>(_dataTable.inArrayPtr)
});
ValidateResult(_dataTable.inArrayPtr, (Int32)(result));
}
public void RunReflectionScenario_Load()
{
TestLibrary.TestFramework.BeginScenario(nameof(RunReflectionScenario_Load));
var result = typeof(Sse2).GetMethod(nameof(Sse2.ConvertToInt32WithTruncation), new Type[] { typeof(Vector128<Double>) })
.Invoke(null, new object[] {
Sse2.LoadVector128((Double*)(_dataTable.inArrayPtr))
});
ValidateResult(_dataTable.inArrayPtr, (Int32)(result));
}
public void RunReflectionScenario_LoadAligned()
{
TestLibrary.TestFramework.BeginScenario(nameof(RunReflectionScenario_LoadAligned));
var result = typeof(Sse2).GetMethod(nameof(Sse2.ConvertToInt32WithTruncation), new Type[] { typeof(Vector128<Double>) })
.Invoke(null, new object[] {
Sse2.LoadAlignedVector128((Double*)(_dataTable.inArrayPtr))
});
ValidateResult(_dataTable.inArrayPtr, (Int32)(result));
}
public void RunClsVarScenario()
{
TestLibrary.TestFramework.BeginScenario(nameof(RunClsVarScenario));
var result = Sse2.ConvertToInt32WithTruncation(
_clsVar
);
ValidateResult(_clsVar, result);
}
public void RunLclVarScenario_UnsafeRead()
{
TestLibrary.TestFramework.BeginScenario(nameof(RunLclVarScenario_UnsafeRead));
var firstOp = Unsafe.Read<Vector128<Double>>(_dataTable.inArrayPtr);
var result = Sse2.ConvertToInt32WithTruncation(firstOp);
ValidateResult(firstOp, result);
}
public void RunLclVarScenario_Load()
{
TestLibrary.TestFramework.BeginScenario(nameof(RunLclVarScenario_Load));
var firstOp = Sse2.LoadVector128((Double*)(_dataTable.inArrayPtr));
var result = Sse2.ConvertToInt32WithTruncation(firstOp);
ValidateResult(firstOp, result);
}
public void RunLclVarScenario_LoadAligned()
{
TestLibrary.TestFramework.BeginScenario(nameof(RunLclVarScenario_LoadAligned));
var firstOp = Sse2.LoadAlignedVector128((Double*)(_dataTable.inArrayPtr));
var result = Sse2.ConvertToInt32WithTruncation(firstOp);
ValidateResult(firstOp, result);
}
public void RunClassLclFldScenario()
{
TestLibrary.TestFramework.BeginScenario(nameof(RunClassLclFldScenario));
var test = new SimdScalarUnaryOpConvertTest__ConvertToInt32WithTruncationInt32Vector128Double();
var result = Sse2.ConvertToInt32WithTruncation(test._fld);
ValidateResult(test._fld, result);
}
public void RunClassFldScenario()
{
TestLibrary.TestFramework.BeginScenario(nameof(RunClassFldScenario));
var result = Sse2.ConvertToInt32WithTruncation(_fld);
ValidateResult(_fld, result);
}
public void RunStructLclFldScenario()
{
TestLibrary.TestFramework.BeginScenario(nameof(RunStructLclFldScenario));
var test = TestStruct.Create();
var result = Sse2.ConvertToInt32WithTruncation(test._fld);
ValidateResult(test._fld, result);
}
public void RunStructFldScenario()
{
TestLibrary.TestFramework.BeginScenario(nameof(RunStructFldScenario));
var test = TestStruct.Create();
test.RunStructFldScenario(this);
}
public void RunUnsupportedScenario()
{
TestLibrary.TestFramework.BeginScenario(nameof(RunUnsupportedScenario));
Succeeded = false;
try
{
RunBasicScenario_UnsafeRead();
}
catch (PlatformNotSupportedException)
{
Succeeded = true;
}
}
private void ValidateResult(Vector128<Double> firstOp, Int32 result, [CallerMemberName] string method = "")
{
Double[] inArray = new Double[Op1ElementCount];
Unsafe.WriteUnaligned(ref Unsafe.As<Double, byte>(ref inArray[0]), firstOp);
ValidateResult(inArray, result, method);
}
private void ValidateResult(void* firstOp, Int32 result, [CallerMemberName] string method = "")
{
Double[] inArray = new Double[Op1ElementCount];
Unsafe.CopyBlockUnaligned(ref Unsafe.As<Double, byte>(ref inArray[0]), ref Unsafe.AsRef<byte>(firstOp), (uint)Unsafe.SizeOf<Vector128<Double>>());
ValidateResult(inArray, result, method);
}
private void ValidateResult(Double[] firstOp, Int32 result, [CallerMemberName] string method = "")
{
if ((int) firstOp[0] != result)
{
Succeeded = false;
}
if (!Succeeded)
{
TestLibrary.TestFramework.LogInformation($"{nameof(Sse2)}.{nameof(Sse2.ConvertToInt32WithTruncation)}<Int32>(Vector128<Double>): {method} failed:");
TestLibrary.TestFramework.LogInformation($" firstOp: ({string.Join(", ", firstOp)})");
TestLibrary.TestFramework.LogInformation($" result: result");
TestLibrary.TestFramework.LogInformation(string.Empty);
}
}
}
}
| -1 |
dotnet/runtime | 66,250 | use the name of the header when throwing exception for invalid chars | Fixes #65136 | pedrobsaila | 2022-03-05T20:18:11Z | 2022-03-08T16:15:18Z | 8fe2d2b0bbb7bec841c97372528531c071b29c16 | fa5dcda23c681031b5d2bd68482702baef80b836 | use the name of the header when throwing exception for invalid chars. Fixes #65136 | ./src/tests/JIT/jit64/valuetypes/nullable/box-unbox/value/box-unbox-value014.csproj | <Project Sdk="Microsoft.NET.Sdk">
<PropertyGroup>
<OutputType>Exe</OutputType>
<CLRTestPriority>1</CLRTestPriority>
</PropertyGroup>
<PropertyGroup>
<DebugType>PdbOnly</DebugType>
</PropertyGroup>
<ItemGroup>
<Compile Include="box-unbox-value014.cs" />
<Compile Include="..\structdef.cs" />
</ItemGroup>
</Project>
| <Project Sdk="Microsoft.NET.Sdk">
<PropertyGroup>
<OutputType>Exe</OutputType>
<CLRTestPriority>1</CLRTestPriority>
</PropertyGroup>
<PropertyGroup>
<DebugType>PdbOnly</DebugType>
</PropertyGroup>
<ItemGroup>
<Compile Include="box-unbox-value014.cs" />
<Compile Include="..\structdef.cs" />
</ItemGroup>
</Project>
| -1 |
dotnet/runtime | 66,250 | use the name of the header when throwing exception for invalid chars | Fixes #65136 | pedrobsaila | 2022-03-05T20:18:11Z | 2022-03-08T16:15:18Z | 8fe2d2b0bbb7bec841c97372528531c071b29c16 | fa5dcda23c681031b5d2bd68482702baef80b836 | use the name of the header when throwing exception for invalid chars. Fixes #65136 | ./src/mono/mono/utils/mono-proclib.c | /**
* \file
* Copyright 2008-2011 Novell Inc
* Copyright 2011 Xamarin Inc
* Licensed under the MIT license. See LICENSE file in the project root for full license information.
*/
#include "config.h"
#include "utils/mono-proclib.h"
#include "utils/mono-time.h"
#include "utils/mono-errno.h"
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <fcntl.h>
#ifdef HAVE_UNISTD_H
#include <unistd.h>
#endif
#ifdef HAVE_SCHED_GETAFFINITY
#include <sched.h>
#endif
#include <utils/mono-mmap.h>
#include <utils/strenc-internals.h>
#include <utils/strenc.h>
#include <utils/mono-error-internals.h>
#include <utils/mono-logger-internals.h>
#if defined(_POSIX_VERSION)
#ifdef HAVE_SYS_ERRNO_H
#include <sys/errno.h>
#endif
#ifdef HAVE_SYS_PARAM_H
#include <sys/param.h>
#endif
#include <errno.h>
#ifdef HAVE_SYS_TYPES_H
#include <sys/types.h>
#endif
#ifdef HAVE_SYS_SYSCTL_H
#include <sys/sysctl.h>
#endif
#ifdef HAVE_SYS_RESOURCE_H
#include <sys/resource.h>
#endif
#endif
#if defined(__HAIKU__)
#include <os/kernel/OS.h>
#endif
#if defined(_AIX)
#include <procinfo.h>
#endif
#if defined(__APPLE__) || defined(__FreeBSD__) || defined(__OpenBSD__) || defined(__NetBSD__)
#include <sys/proc.h>
#if defined(__APPLE__)
#include <mach/mach.h>
#endif
#ifdef HAVE_SYS_USER_H
#include <sys/user.h>
#endif
#ifdef HAVE_STRUCT_KINFO_PROC_KP_PROC
# define kinfo_starttime_member kp_proc.p_starttime
# define kinfo_pid_member kp_proc.p_pid
# define kinfo_name_member kp_proc.p_comm
#elif defined(__NetBSD__)
# define kinfo_starttime_member p_ustart_sec
# define kinfo_pid_member p_pid
# define kinfo_name_member p_comm
#elif defined(__OpenBSD__)
// Can not figure out how to get the proc's start time on OpenBSD
# undef kinfo_starttime_member
# define kinfo_pid_member p_pid
# define kinfo_name_member p_comm
#else
#define kinfo_starttime_member ki_start
#define kinfo_pid_member ki_pid
#define kinfo_name_member ki_comm
#endif
#define USE_SYSCTL 1
#endif
#ifdef HAVE_SCHED_GETAFFINITY
# ifndef HAVE_GNU_CPU_COUNT
static int
CPU_COUNT(cpu_set_t *set)
{
int i, count = 0;
for (int i = 0; i < CPU_SETSIZE; i++)
if (CPU_ISSET(i, set))
count++;
return count;
}
# endif
#endif
/**
* mono_process_list:
* \param size a pointer to a location where the size of the returned array is stored
* \returns an array of pid values for the processes currently running on the system.
* The size of the array is stored in \p size.
*/
gpointer*
mono_process_list (int *size)
{
#if USE_SYSCTL
int res, i;
#ifdef KERN_PROC2
int mib [6];
size_t data_len = sizeof (struct kinfo_proc2) * 400;
struct kinfo_proc2 *processes = g_malloc (data_len);
#else
int mib [4];
size_t data_len = sizeof (struct kinfo_proc) * 16;
struct kinfo_proc *processes;
int limit = 8;
#endif /* KERN_PROC2 */
void **buf = NULL;
if (size)
*size = 0;
#ifdef KERN_PROC2
if (!processes)
return NULL;
mib [0] = CTL_KERN;
mib [1] = KERN_PROC2;
mib [2] = KERN_PROC_ALL;
mib [3] = 0;
mib [4] = sizeof(struct kinfo_proc2);
mib [5] = 400; /* XXX */
res = sysctl (mib, 6, processes, &data_len, NULL, 0);
if (res < 0) {
g_free (processes);
return NULL;
}
#else
processes = NULL;
while (limit) {
mib [0] = CTL_KERN;
mib [1] = KERN_PROC;
mib [2] = KERN_PROC_ALL;
mib [3] = 0;
res = sysctl (mib, 3, NULL, &data_len, NULL, 0);
if (res)
return NULL;
processes = (struct kinfo_proc *) g_malloc (data_len);
res = sysctl (mib, 3, processes, &data_len, NULL, 0);
if (res < 0) {
g_free (processes);
if (errno != ENOMEM)
return NULL;
limit --;
} else {
break;
}
}
#endif /* KERN_PROC2 */
#ifdef KERN_PROC2
res = data_len/sizeof (struct kinfo_proc2);
#else
res = data_len/sizeof (struct kinfo_proc);
#endif /* KERN_PROC2 */
buf = (void **) g_realloc (buf, res * sizeof (void*));
for (i = 0; i < res; ++i)
buf [i] = GINT_TO_POINTER (processes [i].kinfo_pid_member);
g_free (processes);
if (size)
*size = res;
return buf;
#elif defined(__HAIKU__)
int32 cookie = 0;
int32 i = 0;
team_info ti;
system_info si;
get_system_info(&si);
void **buf = g_calloc(si.used_teams, sizeof(void*));
while (get_next_team_info(&cookie, &ti) == B_OK && i < si.used_teams) {
buf[i++] = GINT_TO_POINTER (ti.team);
}
*size = i;
return buf;
#elif defined(_AIX)
void **buf = NULL;
struct procentry64 *procs = NULL;
int count = 0;
int i = 0;
pid_t pid = 1; // start at 1, 0 is a null process (???)
// count number of procs + compensate for new ones forked in while we do it.
// (it's not an atomic operation) 1000000 is the limit IBM ps seems to use
// when I inspected it under truss. the second call we do to getprocs64 will
// then only allocate what we need, instead of allocating some obscenely large
// array on the heap.
count = getprocs64(NULL, sizeof (struct procentry64), NULL, 0, &pid, 1000000);
if (count < 1)
goto cleanup;
count += 10;
pid = 1; // reset the pid cookie
// 5026 bytes is the ideal size for the C struct. you may not like it, but
// this is what peak allocation looks like
procs = g_calloc (count, sizeof (struct procentry64));
// the man page recommends you do this in a loop, but you can also just do it
// in one shot; again, like what ps does. let the returned count (in case it's
// less) be what we then allocate the array of pids from (in case of ANOTHER
// system-wide race condition with processes)
count = getprocs64 (procs, sizeof (struct procentry64), NULL, 0, &pid, count);
if (count < 1 || procs == NULL)
goto cleanup;
buf = g_calloc (count, sizeof (void*));
for (i = 0; i < count; i++) {
buf[i] = GINT_TO_POINTER (procs[i].pi_pid);
}
*size = i;
cleanup:
g_free (procs);
return buf;
#else
const char *name;
void **buf = NULL;
int count = 0;
int i = 0;
GDir *dir = g_dir_open ("/proc/", 0, NULL);
if (!dir) {
if (size)
*size = 0;
return NULL;
}
while ((name = g_dir_read_name (dir))) {
int pid;
char *nend;
pid = strtol (name, &nend, 10);
if (pid <= 0 || nend == name || *nend)
continue;
if (i >= count) {
if (!count)
count = 16;
else
count *= 2;
buf = (void **)g_realloc (buf, count * sizeof (void*));
}
buf [i++] = GINT_TO_POINTER (pid);
}
g_dir_close (dir);
if (size)
*size = i;
return buf;
#endif
}
static G_GNUC_UNUSED char*
get_pid_status_item_buf (int pid, const char *item, char *rbuf, int blen, MonoProcessError *error)
{
char buf [256];
char *s;
FILE *f;
size_t len = strlen (item);
g_snprintf (buf, sizeof (buf), "/proc/%d/status", pid);
f = fopen (buf, "r");
if (!f) {
if (error)
*error = MONO_PROCESS_ERROR_NOT_FOUND;
return NULL;
}
while ((s = fgets (buf, sizeof (buf), f))) {
if (*item != *buf)
continue;
if (strncmp (buf, item, len))
continue;
s = buf + len;
while (g_ascii_isspace (*s)) s++;
if (*s++ != ':')
continue;
while (g_ascii_isspace (*s)) s++;
fclose (f);
len = strlen (s);
memcpy (rbuf, s, MIN (len, blen));
rbuf [MIN (len, blen) - 1] = 0;
if (error)
*error = MONO_PROCESS_ERROR_NONE;
return rbuf;
}
fclose (f);
if (error)
*error = MONO_PROCESS_ERROR_OTHER;
return NULL;
}
#if USE_SYSCTL
#ifdef KERN_PROC2
#define KINFO_PROC struct kinfo_proc2
#else
#define KINFO_PROC struct kinfo_proc
#endif
static gboolean
sysctl_kinfo_proc (gpointer pid, KINFO_PROC* processi)
{
int res;
size_t data_len = sizeof (KINFO_PROC);
#ifdef KERN_PROC2
int mib [6];
mib [0] = CTL_KERN;
mib [1] = KERN_PROC2;
mib [2] = KERN_PROC_PID;
mib [3] = GPOINTER_TO_UINT (pid);
mib [4] = sizeof(KINFO_PROC);
mib [5] = 400; /* XXX */
res = sysctl (mib, 6, processi, &data_len, NULL, 0);
#else
int mib [4];
mib [0] = CTL_KERN;
mib [1] = KERN_PROC;
mib [2] = KERN_PROC_PID;
mib [3] = GPOINTER_TO_UINT (pid);
res = sysctl (mib, 4, processi, &data_len, NULL, 0);
#endif /* KERN_PROC2 */
if (res < 0 || data_len != sizeof (KINFO_PROC))
return FALSE;
return TRUE;
}
#endif /* USE_SYSCTL */
/**
* mono_process_get_name:
* \param pid pid of the process
* \param buf byte buffer where to store the name of the prcoess
* \param len size of the buffer \p buf
* \returns the name of the process identified by \p pid, storing it
* inside \p buf for a maximum of len bytes (including the terminating 0).
*/
char*
mono_process_get_name (gpointer pid, char *buf, int len)
{
#if USE_SYSCTL
KINFO_PROC processi;
memset (buf, 0, len);
if (sysctl_kinfo_proc (pid, &processi))
memcpy (buf, processi.kinfo_name_member, len - 1);
return buf;
#elif defined(_AIX)
struct procentry64 proc;
pid_t newpid = GPOINTER_TO_INT (pid);
if (getprocs64 (&proc, sizeof (struct procentry64), NULL, 0, &newpid, 1) == 1) {
g_strlcpy (buf, proc.pi_comm, len - 1);
}
return buf;
#else
char fname [128];
FILE *file;
char *p;
size_t r;
sprintf (fname, "/proc/%d/cmdline", GPOINTER_TO_INT (pid));
buf [0] = 0;
file = fopen (fname, "r");
if (!file)
return buf;
r = fread (buf, 1, len - 1, file);
fclose (file);
buf [r] = 0;
p = strrchr (buf, '/');
if (p)
return p + 1;
if (r == 0) {
return get_pid_status_item_buf (GPOINTER_TO_INT (pid), "Name", buf, len, NULL);
}
return buf;
#endif
}
void
mono_process_get_times (gpointer pid, gint64 *start_time, gint64 *user_time, gint64 *kernel_time)
{
if (user_time)
*user_time = mono_process_get_data (pid, MONO_PROCESS_USER_TIME);
if (kernel_time)
*kernel_time = mono_process_get_data (pid, MONO_PROCESS_SYSTEM_TIME);
if (start_time) {
*start_time = 0;
#if USE_SYSCTL && defined(kinfo_starttime_member)
{
KINFO_PROC processi;
if (sysctl_kinfo_proc (pid, &processi)) {
#if defined(__NetBSD__)
struct timeval tv;
tv.tv_sec = processi.kinfo_starttime_member;
tv.tv_usec = processi.p_ustart_usec;
*start_time = mono_100ns_datetime_from_timeval(tv);
#else
*start_time = mono_100ns_datetime_from_timeval (processi.kinfo_starttime_member);
#endif
}
}
#endif
if (*start_time == 0) {
static guint64 boot_time = 0;
if (!boot_time)
boot_time = mono_100ns_datetime () - mono_msec_boottime () * 10000;
*start_time = boot_time + mono_process_get_data (pid, MONO_PROCESS_ELAPSED);
}
}
}
/*
* /proc/pid/stat format:
* pid (cmdname) S
* [0] ppid pgid sid tty_nr tty_pgrp flags min_flt cmin_flt maj_flt cmaj_flt
* [10] utime stime cutime cstime prio nice threads 0 start_time vsize
* [20] rss rsslim start_code end_code start_stack esp eip pending blocked sigign
* [30] sigcatch wchan 0 0 exit_signal cpu rt_prio policy
*/
#define RET_ERROR(err) do { \
if (error) *error = (err); \
return 0; \
} while (0)
static gint64
get_process_stat_item (int pid, int pos, int sum, MonoProcessError *error)
{
#if defined(__APPLE__)
double process_user_time = 0, process_system_time = 0;//, process_percent = 0;
task_t task;
struct task_basic_info t_info;
mach_msg_type_number_t t_info_count = TASK_BASIC_INFO_COUNT, th_count;
thread_array_t th_array;
size_t i;
kern_return_t ret;
if (pid == getpid ()) {
/* task_for_pid () doesn't work on ios, even for the current process */
task = mach_task_self ();
} else {
do {
ret = task_for_pid (mach_task_self (), pid, &task);
} while (ret == KERN_ABORTED);
if (ret != KERN_SUCCESS)
RET_ERROR (MONO_PROCESS_ERROR_NOT_FOUND);
}
do {
ret = task_info (task, TASK_BASIC_INFO, (task_info_t)&t_info, &t_info_count);
} while (ret == KERN_ABORTED);
if (ret != KERN_SUCCESS) {
if (pid != getpid ())
mach_port_deallocate (mach_task_self (), task);
RET_ERROR (MONO_PROCESS_ERROR_OTHER);
}
do {
ret = task_threads (task, &th_array, &th_count);
} while (ret == KERN_ABORTED);
if (ret != KERN_SUCCESS) {
if (pid != getpid ())
mach_port_deallocate (mach_task_self (), task);
RET_ERROR (MONO_PROCESS_ERROR_OTHER);
}
for (i = 0; i < th_count; i++) {
double thread_user_time, thread_system_time;//, thread_percent;
struct thread_basic_info th_info;
mach_msg_type_number_t th_info_count = THREAD_BASIC_INFO_COUNT;
do {
ret = thread_info(th_array[i], THREAD_BASIC_INFO, (thread_info_t)&th_info, &th_info_count);
} while (ret == KERN_ABORTED);
if (ret == KERN_SUCCESS) {
thread_user_time = th_info.user_time.seconds + th_info.user_time.microseconds / 1e6;
thread_system_time = th_info.system_time.seconds + th_info.system_time.microseconds / 1e6;
//thread_percent = (double)th_info.cpu_usage / TH_USAGE_SCALE;
process_user_time += thread_user_time;
process_system_time += thread_system_time;
//process_percent += th_percent;
}
}
for (i = 0; i < th_count; i++)
mach_port_deallocate(task, th_array[i]);
if (pid != getpid ())
mach_port_deallocate (mach_task_self (), task);
process_user_time += t_info.user_time.seconds + t_info.user_time.microseconds / 1e6;
process_system_time += t_info.system_time.seconds + t_info.system_time.microseconds / 1e6;
if (pos == 10 && sum == TRUE)
return (gint64)((process_user_time + process_system_time) * 10000000);
else if (pos == 10)
return (gint64)(process_user_time * 10000000);
else if (pos == 11)
return (gint64)(process_system_time * 10000000);
return 0;
#else
char buf [512];
char *s, *end;
FILE *f;
size_t len;
int i;
gint64 value;
g_snprintf (buf, sizeof (buf), "/proc/%d/stat", pid);
f = fopen (buf, "r");
if (!f)
RET_ERROR (MONO_PROCESS_ERROR_NOT_FOUND);
len = fread (buf, 1, sizeof (buf), f);
fclose (f);
if (len <= 0)
RET_ERROR (MONO_PROCESS_ERROR_OTHER);
s = strchr (buf, ')');
if (!s)
RET_ERROR (MONO_PROCESS_ERROR_OTHER);
s++;
while (g_ascii_isspace (*s)) s++;
if (!*s)
RET_ERROR (MONO_PROCESS_ERROR_OTHER);
/* skip the status char */
while (*s && !g_ascii_isspace (*s)) s++;
if (!*s)
RET_ERROR (MONO_PROCESS_ERROR_OTHER);
for (i = 0; i < pos; ++i) {
while (g_ascii_isspace (*s)) s++;
if (!*s)
RET_ERROR (MONO_PROCESS_ERROR_OTHER);
while (*s && !g_ascii_isspace (*s)) s++;
if (!*s)
RET_ERROR (MONO_PROCESS_ERROR_OTHER);
}
/* we are finally at the needed item */
value = strtoul (s, &end, 0);
/* add also the following value */
if (sum) {
while (g_ascii_isspace (*s)) s++;
if (!*s)
RET_ERROR (MONO_PROCESS_ERROR_OTHER);
value += strtoul (s, &end, 0);
}
if (error)
*error = MONO_PROCESS_ERROR_NONE;
return value;
#endif
}
static int
get_user_hz (void)
{
static int user_hz = 0;
if (user_hz == 0) {
#if defined (_SC_CLK_TCK) && defined (HAVE_SYSCONF)
user_hz = sysconf (_SC_CLK_TCK);
#endif
if (user_hz == 0)
user_hz = 100;
}
return user_hz;
}
static gint64
get_process_stat_time (int pid, int pos, int sum, MonoProcessError *error)
{
gint64 val = get_process_stat_item (pid, pos, sum, error);
#if defined(__APPLE__)
return val;
#else
/* return 100ns ticks */
return (val * 10000000) / get_user_hz ();
#endif
}
static gint64
get_pid_status_item (int pid, const char *item, MonoProcessError *error, int multiplier)
{
#if defined(__APPLE__)
// ignore the multiplier
gint64 ret;
task_t task;
task_vm_info_data_t t_info;
mach_msg_type_number_t info_count = TASK_VM_INFO_COUNT;
kern_return_t mach_ret;
if (pid == getpid ()) {
/* task_for_pid () doesn't work on ios, even for the current process */
task = mach_task_self ();
} else {
do {
mach_ret = task_for_pid (mach_task_self (), pid, &task);
} while (mach_ret == KERN_ABORTED);
if (mach_ret != KERN_SUCCESS)
RET_ERROR (MONO_PROCESS_ERROR_NOT_FOUND);
}
do {
mach_ret = task_info (task, TASK_VM_INFO, (task_info_t)&t_info, &info_count);
} while (mach_ret == KERN_ABORTED);
if (mach_ret != KERN_SUCCESS) {
if (pid != getpid ())
mach_port_deallocate (mach_task_self (), task);
RET_ERROR (MONO_PROCESS_ERROR_OTHER);
}
if(strcmp (item, "VmData") == 0)
ret = t_info.internal + t_info.compressed;
else if (strcmp (item, "VmRSS") == 0)
ret = t_info.resident_size;
else if(strcmp (item, "VmHWM") == 0)
ret = t_info.resident_size_peak;
else if (strcmp (item, "VmSize") == 0 || strcmp (item, "VmPeak") == 0)
ret = t_info.virtual_size;
else if (strcmp (item, "Threads") == 0) {
struct task_basic_info t_info;
mach_msg_type_number_t th_count = TASK_BASIC_INFO_COUNT;
do {
mach_ret = task_info (task, TASK_BASIC_INFO, (task_info_t)&t_info, &th_count);
} while (mach_ret == KERN_ABORTED);
if (mach_ret != KERN_SUCCESS) {
if (pid != getpid ())
mach_port_deallocate (mach_task_self (), task);
RET_ERROR (MONO_PROCESS_ERROR_OTHER);
}
ret = th_count;
} else if (strcmp (item, "VmSwap") == 0)
ret = t_info.compressed;
else
ret = 0;
if (pid != getpid ())
mach_port_deallocate (mach_task_self (), task);
return ret;
#else
char buf [64];
char *s;
s = get_pid_status_item_buf (pid, item, buf, sizeof (buf), error);
if (s)
return ((gint64) atol (s)) * multiplier;
return 0;
#endif
}
/**
* mono_process_get_data:
* \param pid pid of the process
* \param data description of data to return
* \returns a data item of a process like user time, memory use etc,
* according to the \p data argumet.
*/
gint64
mono_process_get_data_with_error (gpointer pid, MonoProcessData data, MonoProcessError *error)
{
gint64 val;
int rpid = GPOINTER_TO_INT (pid);
if (error)
*error = MONO_PROCESS_ERROR_OTHER;
switch (data) {
case MONO_PROCESS_NUM_THREADS:
return get_pid_status_item (rpid, "Threads", error, 1);
case MONO_PROCESS_USER_TIME:
return get_process_stat_time (rpid, 10, FALSE, error);
case MONO_PROCESS_SYSTEM_TIME:
return get_process_stat_time (rpid, 11, FALSE, error);
case MONO_PROCESS_TOTAL_TIME:
return get_process_stat_time (rpid, 10, TRUE, error);
case MONO_PROCESS_WORKING_SET:
return get_pid_status_item (rpid, "VmRSS", error, 1024);
case MONO_PROCESS_WORKING_SET_PEAK:
val = get_pid_status_item (rpid, "VmHWM", error, 1024);
if (val == 0)
val = get_pid_status_item (rpid, "VmRSS", error, 1024);
return val;
case MONO_PROCESS_PRIVATE_BYTES:
return get_pid_status_item (rpid, "VmData", error, 1024);
case MONO_PROCESS_VIRTUAL_BYTES:
return get_pid_status_item (rpid, "VmSize", error, 1024);
case MONO_PROCESS_VIRTUAL_BYTES_PEAK:
val = get_pid_status_item (rpid, "VmPeak", error, 1024);
if (val == 0)
val = get_pid_status_item (rpid, "VmSize", error, 1024);
return val;
case MONO_PROCESS_FAULTS:
return get_process_stat_item (rpid, 6, TRUE, error);
case MONO_PROCESS_ELAPSED:
return get_process_stat_time (rpid, 18, FALSE, error);
case MONO_PROCESS_PPID:
return get_process_stat_time (rpid, 0, FALSE, error);
case MONO_PROCESS_PAGED_BYTES:
return get_pid_status_item (rpid, "VmSwap", error, 1024);
/* Nothing yet */
case MONO_PROCESS_END:
return 0;
}
return 0;
}
gint64
mono_process_get_data (gpointer pid, MonoProcessData data)
{
MonoProcessError error;
return mono_process_get_data_with_error (pid, data, &error);
}
#ifndef HOST_WIN32
int
mono_process_current_pid ()
{
#if defined(HAVE_GETPID)
return (int) getpid ();
#elif defined(HOST_WASI)
return 0;
#else
#error getpid
#endif
}
#endif /* !HOST_WIN32 */
/**
* mono_cpu_count:
* \returns the number of processors on the system.
*/
#ifndef HOST_WIN32
int
mono_cpu_count (void)
{
#ifdef HOST_ANDROID
/* Android tries really hard to save power by powering off CPUs on SMP phones which
* means the normal way to query cpu count returns a wrong value with userspace API.
* Instead we use /sys entries to query the actual hardware CPU count.
*/
int count = 0;
char buffer[8] = {'\0'};
int present = open ("/sys/devices/system/cpu/present", O_RDONLY);
/* Format of the /sys entry is a cpulist of indexes which in the case
* of present is always of the form "0-(n-1)" when there is more than
* 1 core, n being the number of CPU cores in the system. Otherwise
* the value is simply 0
*/
if (present != -1 && read (present, (char*)buffer, sizeof (buffer)) > 3)
count = strtol (((char*)buffer) + 2, NULL, 10);
if (present != -1)
close (present);
if (count > 0)
return count + 1;
#endif
#if defined(HOST_ARM) || defined (HOST_ARM64)
/*
* Recap from Alexander Köplinger <[email protected]>:
*
* When we merged the change from PR #2722, we started seeing random failures on ARM in
* the MonoTests.System.Threading.ThreadPoolTests.SetAndGetMaxThreads and
* MonoTests.System.Threading.ManualResetEventSlimTests.Constructor_Defaults tests. Both
* of those tests are dealing with Environment.ProcessorCount to verify some implementation
* details.
*
* It turns out that on the Jetson TK1 board we use on public Jenkins and on ARM kernels
* in general, the value returned by sched_getaffinity (or _SC_NPROCESSORS_ONLN) doesn't
* contain CPUs/cores that are powered off for power saving reasons. This is contrary to
* what happens on x86, where even cores in deep-sleep state are returned [1], [2]. This
* means that we would get a processor count of 1 at one point in time and a higher value
* when load increases later on as the system wakes CPUs.
*
* Various runtime pieces like the threadpool and also user code however relies on the
* value returned by Environment.ProcessorCount e.g. for deciding how many parallel tasks
* to start, thereby limiting the performance when that code thinks we only have one CPU.
*
* Talking to a few people, this was the reason why we changed to _SC_NPROCESSORS_CONF in
* mono#1688 and why we added a special case for Android in mono@de3addc to get the "real"
* number of processors in the system.
*
* Because of those issues Android/Dalvik also switched from _ONLN to _SC_NPROCESSORS_CONF
* for the Java API Runtime.availableProcessors() too [3], citing:
* > Traditionally this returned the number currently online, but many mobile devices are
* able to take unused cores offline to save power, so releases newer than Android 4.2 (Jelly
* Bean) return the maximum number of cores that could be made available if there were no
* power or heat constraints.
*
* The problem with sticking to _SC_NPROCESSORS_CONF however is that it breaks down in
* constrained environments like Docker or with an explicit CPU affinity set by the Linux
* `taskset` command, They'd get a higher CPU count than can be used, start more threads etc.
* which results in unnecessary context switches and overloaded systems. That's why we need
* to respect sched_getaffinity.
*
* So while in an ideal world we would be able to rely on sched_getaffinity/_SC_NPROCESSORS_ONLN
* to return the number of theoretically available CPUs regardless of power saving measures
* everywhere, we can't do this on ARM.
*
* I think the pragmatic solution is the following:
* * use sched_getaffinity (+ fallback to _SC_NPROCESSORS_ONLN in case of error) on x86. This
* ensures we're inline with what OpenJDK [4] and CoreCLR [5] do
* * use _SC_NPROCESSORS_CONF exclusively on ARM (I think we could eventually even get rid of
* the HOST_ANDROID special case)
*
* Helpful links:
*
* [1] https://sourceware.org/ml/libc-alpha/2013-07/msg00383.html
* [2] https://lists.01.org/pipermail/powertop/2012-September/000433.html
* [3] https://android.googlesource.com/platform/libcore/+/750dc634e56c58d1d04f6a138734ac2b772900b5%5E1..750dc634e56c58d1d04f6a138734ac2b772900b5/
* [4] https://bugs.openjdk.java.net/browse/JDK-6515172
* [5] https://github.com/dotnet/coreclr/blob/7058273693db2555f127ce16e6b0c5b40fb04867/src/pal/src/misc/sysinfo.cpp#L148
*/
#if defined (_SC_NPROCESSORS_CONF) && defined (HAVE_SYSCONF)
{
int count = sysconf (_SC_NPROCESSORS_CONF);
if (count > 0)
return count;
}
#endif
#else
#ifdef HAVE_SCHED_GETAFFINITY
{
cpu_set_t set;
if (sched_getaffinity (mono_process_current_pid (), sizeof (set), &set) == 0)
return CPU_COUNT (&set);
}
#endif
#if defined (_SC_NPROCESSORS_ONLN) && defined (HAVE_SYSCONF)
{
int count = sysconf (_SC_NPROCESSORS_ONLN);
if (count > 0)
return count;
}
#endif
#endif /* defined(HOST_ARM) || defined (HOST_ARM64) */
#ifdef USE_SYSCTL
{
int count;
int mib [2];
size_t len = sizeof (int);
mib [0] = CTL_HW;
mib [1] = HW_NCPU;
if (sysctl (mib, 2, &count, &len, NULL, 0) == 0)
return count;
}
#endif
/* FIXME: warn */
return 1;
}
#endif /* !HOST_WIN32 */
static void
get_cpu_times (int cpu_id, gint64 *user, gint64 *systemt, gint64 *irq, gint64 *sirq, gint64 *idle)
{
char buf [256];
char *s;
int uhz = get_user_hz ();
guint64 user_ticks = 0, nice_ticks = 0, system_ticks = 0, idle_ticks = 0, irq_ticks = 0, sirq_ticks = 0;
FILE *f = fopen ("/proc/stat", "r");
if (!f)
return;
if (cpu_id < 0)
uhz *= mono_cpu_count ();
while ((s = fgets (buf, sizeof (buf), f))) {
char *data = NULL;
if (cpu_id < 0 && strncmp (s, "cpu", 3) == 0 && g_ascii_isspace (s [3])) {
data = s + 4;
} else if (cpu_id >= 0 && strncmp (s, "cpu", 3) == 0 && strtol (s + 3, &data, 10) == cpu_id) {
if (data == s + 3)
continue;
data++;
} else {
continue;
}
user_ticks = strtoull (data, &data, 10);
nice_ticks = strtoull (data, &data, 10);
system_ticks = strtoull (data, &data, 10);
idle_ticks = strtoull (data, &data, 10);
/* iowait_ticks = strtoull (data, &data, 10); */
irq_ticks = strtoull (data, &data, 10);
sirq_ticks = strtoull (data, &data, 10);
break;
}
fclose (f);
if (user)
*user = (user_ticks + nice_ticks) * 10000000 / uhz;
if (systemt)
*systemt = (system_ticks) * 10000000 / uhz;
if (irq)
*irq = (irq_ticks) * 10000000 / uhz;
if (sirq)
*sirq = (sirq_ticks) * 10000000 / uhz;
if (idle)
*idle = (idle_ticks) * 10000000 / uhz;
}
/**
* mono_cpu_get_data:
* \param cpu_id processor number or -1 to get a summary of all the processors
* \param data type of data to retrieve
* Get data about a processor on the system, like time spent in user space or idle time.
*/
gint64
mono_cpu_get_data (int cpu_id, MonoCpuData data, MonoProcessError *error)
{
gint64 value = 0;
if (error)
*error = MONO_PROCESS_ERROR_NONE;
switch (data) {
case MONO_CPU_USER_TIME:
get_cpu_times (cpu_id, &value, NULL, NULL, NULL, NULL);
break;
case MONO_CPU_PRIV_TIME:
get_cpu_times (cpu_id, NULL, &value, NULL, NULL, NULL);
break;
case MONO_CPU_INTR_TIME:
get_cpu_times (cpu_id, NULL, NULL, &value, NULL, NULL);
break;
case MONO_CPU_DCP_TIME:
get_cpu_times (cpu_id, NULL, NULL, NULL, &value, NULL);
break;
case MONO_CPU_IDLE_TIME:
get_cpu_times (cpu_id, NULL, NULL, NULL, NULL, &value);
break;
case MONO_CPU_END:
/* Nothing yet */
return 0;
}
return value;
}
int
mono_atexit (void (*func)(void))
{
#if defined(HOST_ANDROID) || !defined(HAVE_ATEXIT)
/* Some versions of android libc doesn't define atexit () */
return 0;
#else
return atexit (func);
#endif
}
#ifndef HOST_WIN32
gboolean
mono_pe_file_time_date_stamp (const gunichar2 *filename, guint32 *out)
{
void *map_handle;
guint32 map_size;
gpointer file_map = mono_pe_file_map (filename, &map_size, &map_handle);
if (!file_map)
return FALSE;
/* Figure this out when we support 64bit PE files */
if (1) {
IMAGE_DOS_HEADER *dos_header = (IMAGE_DOS_HEADER *)file_map;
if (dos_header->e_magic != IMAGE_DOS_SIGNATURE) {
mono_pe_file_unmap (file_map, map_handle);
return FALSE;
}
IMAGE_NT_HEADERS32 *nt_headers = (IMAGE_NT_HEADERS32 *)((guint8 *)file_map + GUINT32_FROM_LE (dos_header->e_lfanew));
if (nt_headers->Signature != IMAGE_NT_SIGNATURE) {
mono_pe_file_unmap (file_map, map_handle);
return FALSE;
}
*out = nt_headers->FileHeader.TimeDateStamp;
} else {
g_assert_not_reached ();
}
mono_pe_file_unmap (file_map, map_handle);
return TRUE;
}
gpointer
mono_pe_file_map (const gunichar2 *filename, guint32 *map_size, void **handle)
{
gchar *filename_ext = NULL;
gchar *located_filename = NULL;
guint64 fsize = 0;
gpointer file_map = NULL;
ERROR_DECL (error);
MonoFileMap *filed = NULL;
/* According to the MSDN docs, a search path is applied to
* filename. FIXME: implement this, for now just pass it
* straight to open
*/
filename_ext = mono_unicode_to_external_checked (filename, error);
// This block was added to diagnose https://github.com/mono/mono/issues/14730, remove after resolved
if (G_UNLIKELY (filename_ext == NULL)) {
GString *raw_bytes = g_string_new (NULL);
const gunichar2 *p = filename;
while (*p)
g_string_append_printf (raw_bytes, "%04X ", *p++);
g_assertf (filename_ext != NULL, "%s: unicode conversion returned NULL; %s; input was: %s", __func__, mono_error_get_message (error), raw_bytes->str);
g_string_free (raw_bytes, TRUE);
}
if (filename_ext == NULL) {
mono_trace (G_LOG_LEVEL_DEBUG, MONO_TRACE_IO_LAYER_PROCESS, "%s: unicode conversion returned NULL; %s", __func__, mono_error_get_message (error));
mono_error_cleanup (error);
goto exit;
}
if ((filed = mono_file_map_open (filename_ext)) == NULL) {
mono_trace (G_LOG_LEVEL_DEBUG, MONO_TRACE_IO_LAYER_PROCESS, "%s: Error opening file %s (3): %s", __func__, filename_ext, strerror (errno));
goto exit;
}
fsize = mono_file_map_size (filed);
if (fsize == 0) {
mono_trace (G_LOG_LEVEL_DEBUG, MONO_TRACE_IO_LAYER_PROCESS, "%s: Error stat()ing file %s: %s", __func__, filename_ext, strerror (errno));
goto exit;
}
g_assert (fsize <= G_MAXUINT32);
*map_size = fsize;
/* Check basic file size */
if (fsize < sizeof(IMAGE_DOS_HEADER)) {
mono_trace (G_LOG_LEVEL_DEBUG, MONO_TRACE_IO_LAYER_PROCESS, "%s: File %s is too small: %" PRId64, __func__, filename_ext, fsize);
goto exit;
}
file_map = mono_file_map (fsize, MONO_MMAP_READ | MONO_MMAP_PRIVATE, mono_file_map_fd (filed), 0, handle);
if (file_map == NULL) {
mono_trace (G_LOG_LEVEL_DEBUG, MONO_TRACE_IO_LAYER_PROCESS, "%s: Error mmap()int file %s: %s", __func__, filename_ext, strerror (errno));
goto exit;
}
exit:
if (filed)
mono_file_map_close (filed);
g_free (located_filename);
g_free (filename_ext);
return file_map;
}
void
mono_pe_file_unmap (gpointer file_map, void *handle)
{
gint res;
res = mono_file_unmap (file_map, handle);
if (G_UNLIKELY (res != 0))
g_error ("%s: mono_file_unmap failed, error: \"%s\" (%d)", __func__, g_strerror (errno), errno);
}
#endif /* HOST_WIN32 */
/*
* This function returns the cpu usage in percentage,
* normalized on the number of cores.
*
* Warning : the percentage returned can be > 100%. This
* might happens on systems like Android which, for
* battery and performance reasons, shut down cores and
* lie about the number of active cores.
*/
#ifndef HOST_WIN32
gint32
mono_cpu_usage (MonoCpuUsageState *prev)
{
gint32 cpu_usage = 0;
#ifdef HAVE_GETRUSAGE
gint64 cpu_total_time;
gint64 cpu_busy_time;
struct rusage resource_usage;
gint64 current_time;
gint64 kernel_time;
gint64 user_time;
if (getrusage (RUSAGE_SELF, &resource_usage) == -1) {
g_error ("getrusage() failed, errno is %d (%s)\n", errno, strerror (errno));
return -1;
}
current_time = mono_100ns_ticks ();
kernel_time = resource_usage.ru_stime.tv_sec * 1000 * 1000 * 10 + resource_usage.ru_stime.tv_usec * 10;
user_time = resource_usage.ru_utime.tv_sec * 1000 * 1000 * 10 + resource_usage.ru_utime.tv_usec * 10;
cpu_busy_time = (user_time - (prev ? prev->user_time : 0)) + (kernel_time - (prev ? prev->kernel_time : 0));
cpu_total_time = (current_time - (prev ? prev->current_time : 0)) * mono_cpu_count ();
if (prev) {
prev->kernel_time = kernel_time;
prev->user_time = user_time;
prev->current_time = current_time;
}
if (cpu_total_time > 0 && cpu_busy_time > 0)
cpu_usage = (gint32)(cpu_busy_time * 100 / cpu_total_time);
#endif
return cpu_usage;
}
#endif /* !HOST_WIN32 */
| /**
* \file
* Copyright 2008-2011 Novell Inc
* Copyright 2011 Xamarin Inc
* Licensed under the MIT license. See LICENSE file in the project root for full license information.
*/
#include "config.h"
#include "utils/mono-proclib.h"
#include "utils/mono-time.h"
#include "utils/mono-errno.h"
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <fcntl.h>
#ifdef HAVE_UNISTD_H
#include <unistd.h>
#endif
#ifdef HAVE_SCHED_GETAFFINITY
#include <sched.h>
#endif
#include <utils/mono-mmap.h>
#include <utils/strenc-internals.h>
#include <utils/strenc.h>
#include <utils/mono-error-internals.h>
#include <utils/mono-logger-internals.h>
#if defined(_POSIX_VERSION)
#ifdef HAVE_SYS_ERRNO_H
#include <sys/errno.h>
#endif
#ifdef HAVE_SYS_PARAM_H
#include <sys/param.h>
#endif
#include <errno.h>
#ifdef HAVE_SYS_TYPES_H
#include <sys/types.h>
#endif
#ifdef HAVE_SYS_SYSCTL_H
#include <sys/sysctl.h>
#endif
#ifdef HAVE_SYS_RESOURCE_H
#include <sys/resource.h>
#endif
#endif
#if defined(__HAIKU__)
#include <os/kernel/OS.h>
#endif
#if defined(_AIX)
#include <procinfo.h>
#endif
#if defined(__APPLE__) || defined(__FreeBSD__) || defined(__OpenBSD__) || defined(__NetBSD__)
#include <sys/proc.h>
#if defined(__APPLE__)
#include <mach/mach.h>
#endif
#ifdef HAVE_SYS_USER_H
#include <sys/user.h>
#endif
#ifdef HAVE_STRUCT_KINFO_PROC_KP_PROC
# define kinfo_starttime_member kp_proc.p_starttime
# define kinfo_pid_member kp_proc.p_pid
# define kinfo_name_member kp_proc.p_comm
#elif defined(__NetBSD__)
# define kinfo_starttime_member p_ustart_sec
# define kinfo_pid_member p_pid
# define kinfo_name_member p_comm
#elif defined(__OpenBSD__)
// Can not figure out how to get the proc's start time on OpenBSD
# undef kinfo_starttime_member
# define kinfo_pid_member p_pid
# define kinfo_name_member p_comm
#else
#define kinfo_starttime_member ki_start
#define kinfo_pid_member ki_pid
#define kinfo_name_member ki_comm
#endif
#define USE_SYSCTL 1
#endif
#ifdef HAVE_SCHED_GETAFFINITY
# ifndef HAVE_GNU_CPU_COUNT
static int
CPU_COUNT(cpu_set_t *set)
{
int i, count = 0;
for (int i = 0; i < CPU_SETSIZE; i++)
if (CPU_ISSET(i, set))
count++;
return count;
}
# endif
#endif
/**
* mono_process_list:
* \param size a pointer to a location where the size of the returned array is stored
* \returns an array of pid values for the processes currently running on the system.
* The size of the array is stored in \p size.
*/
gpointer*
mono_process_list (int *size)
{
#if USE_SYSCTL
int res, i;
#ifdef KERN_PROC2
int mib [6];
size_t data_len = sizeof (struct kinfo_proc2) * 400;
struct kinfo_proc2 *processes = g_malloc (data_len);
#else
int mib [4];
size_t data_len = sizeof (struct kinfo_proc) * 16;
struct kinfo_proc *processes;
int limit = 8;
#endif /* KERN_PROC2 */
void **buf = NULL;
if (size)
*size = 0;
#ifdef KERN_PROC2
if (!processes)
return NULL;
mib [0] = CTL_KERN;
mib [1] = KERN_PROC2;
mib [2] = KERN_PROC_ALL;
mib [3] = 0;
mib [4] = sizeof(struct kinfo_proc2);
mib [5] = 400; /* XXX */
res = sysctl (mib, 6, processes, &data_len, NULL, 0);
if (res < 0) {
g_free (processes);
return NULL;
}
#else
processes = NULL;
while (limit) {
mib [0] = CTL_KERN;
mib [1] = KERN_PROC;
mib [2] = KERN_PROC_ALL;
mib [3] = 0;
res = sysctl (mib, 3, NULL, &data_len, NULL, 0);
if (res)
return NULL;
processes = (struct kinfo_proc *) g_malloc (data_len);
res = sysctl (mib, 3, processes, &data_len, NULL, 0);
if (res < 0) {
g_free (processes);
if (errno != ENOMEM)
return NULL;
limit --;
} else {
break;
}
}
#endif /* KERN_PROC2 */
#ifdef KERN_PROC2
res = data_len/sizeof (struct kinfo_proc2);
#else
res = data_len/sizeof (struct kinfo_proc);
#endif /* KERN_PROC2 */
buf = (void **) g_realloc (buf, res * sizeof (void*));
for (i = 0; i < res; ++i)
buf [i] = GINT_TO_POINTER (processes [i].kinfo_pid_member);
g_free (processes);
if (size)
*size = res;
return buf;
#elif defined(__HAIKU__)
int32 cookie = 0;
int32 i = 0;
team_info ti;
system_info si;
get_system_info(&si);
void **buf = g_calloc(si.used_teams, sizeof(void*));
while (get_next_team_info(&cookie, &ti) == B_OK && i < si.used_teams) {
buf[i++] = GINT_TO_POINTER (ti.team);
}
*size = i;
return buf;
#elif defined(_AIX)
void **buf = NULL;
struct procentry64 *procs = NULL;
int count = 0;
int i = 0;
pid_t pid = 1; // start at 1, 0 is a null process (???)
// count number of procs + compensate for new ones forked in while we do it.
// (it's not an atomic operation) 1000000 is the limit IBM ps seems to use
// when I inspected it under truss. the second call we do to getprocs64 will
// then only allocate what we need, instead of allocating some obscenely large
// array on the heap.
count = getprocs64(NULL, sizeof (struct procentry64), NULL, 0, &pid, 1000000);
if (count < 1)
goto cleanup;
count += 10;
pid = 1; // reset the pid cookie
// 5026 bytes is the ideal size for the C struct. you may not like it, but
// this is what peak allocation looks like
procs = g_calloc (count, sizeof (struct procentry64));
// the man page recommends you do this in a loop, but you can also just do it
// in one shot; again, like what ps does. let the returned count (in case it's
// less) be what we then allocate the array of pids from (in case of ANOTHER
// system-wide race condition with processes)
count = getprocs64 (procs, sizeof (struct procentry64), NULL, 0, &pid, count);
if (count < 1 || procs == NULL)
goto cleanup;
buf = g_calloc (count, sizeof (void*));
for (i = 0; i < count; i++) {
buf[i] = GINT_TO_POINTER (procs[i].pi_pid);
}
*size = i;
cleanup:
g_free (procs);
return buf;
#else
const char *name;
void **buf = NULL;
int count = 0;
int i = 0;
GDir *dir = g_dir_open ("/proc/", 0, NULL);
if (!dir) {
if (size)
*size = 0;
return NULL;
}
while ((name = g_dir_read_name (dir))) {
int pid;
char *nend;
pid = strtol (name, &nend, 10);
if (pid <= 0 || nend == name || *nend)
continue;
if (i >= count) {
if (!count)
count = 16;
else
count *= 2;
buf = (void **)g_realloc (buf, count * sizeof (void*));
}
buf [i++] = GINT_TO_POINTER (pid);
}
g_dir_close (dir);
if (size)
*size = i;
return buf;
#endif
}
static G_GNUC_UNUSED char*
get_pid_status_item_buf (int pid, const char *item, char *rbuf, int blen, MonoProcessError *error)
{
char buf [256];
char *s;
FILE *f;
size_t len = strlen (item);
g_snprintf (buf, sizeof (buf), "/proc/%d/status", pid);
f = fopen (buf, "r");
if (!f) {
if (error)
*error = MONO_PROCESS_ERROR_NOT_FOUND;
return NULL;
}
while ((s = fgets (buf, sizeof (buf), f))) {
if (*item != *buf)
continue;
if (strncmp (buf, item, len))
continue;
s = buf + len;
while (g_ascii_isspace (*s)) s++;
if (*s++ != ':')
continue;
while (g_ascii_isspace (*s)) s++;
fclose (f);
len = strlen (s);
memcpy (rbuf, s, MIN (len, blen));
rbuf [MIN (len, blen) - 1] = 0;
if (error)
*error = MONO_PROCESS_ERROR_NONE;
return rbuf;
}
fclose (f);
if (error)
*error = MONO_PROCESS_ERROR_OTHER;
return NULL;
}
#if USE_SYSCTL
#ifdef KERN_PROC2
#define KINFO_PROC struct kinfo_proc2
#else
#define KINFO_PROC struct kinfo_proc
#endif
static gboolean
sysctl_kinfo_proc (gpointer pid, KINFO_PROC* processi)
{
int res;
size_t data_len = sizeof (KINFO_PROC);
#ifdef KERN_PROC2
int mib [6];
mib [0] = CTL_KERN;
mib [1] = KERN_PROC2;
mib [2] = KERN_PROC_PID;
mib [3] = GPOINTER_TO_UINT (pid);
mib [4] = sizeof(KINFO_PROC);
mib [5] = 400; /* XXX */
res = sysctl (mib, 6, processi, &data_len, NULL, 0);
#else
int mib [4];
mib [0] = CTL_KERN;
mib [1] = KERN_PROC;
mib [2] = KERN_PROC_PID;
mib [3] = GPOINTER_TO_UINT (pid);
res = sysctl (mib, 4, processi, &data_len, NULL, 0);
#endif /* KERN_PROC2 */
if (res < 0 || data_len != sizeof (KINFO_PROC))
return FALSE;
return TRUE;
}
#endif /* USE_SYSCTL */
/**
* mono_process_get_name:
* \param pid pid of the process
* \param buf byte buffer where to store the name of the prcoess
* \param len size of the buffer \p buf
* \returns the name of the process identified by \p pid, storing it
* inside \p buf for a maximum of len bytes (including the terminating 0).
*/
char*
mono_process_get_name (gpointer pid, char *buf, int len)
{
#if USE_SYSCTL
KINFO_PROC processi;
memset (buf, 0, len);
if (sysctl_kinfo_proc (pid, &processi))
memcpy (buf, processi.kinfo_name_member, len - 1);
return buf;
#elif defined(_AIX)
struct procentry64 proc;
pid_t newpid = GPOINTER_TO_INT (pid);
if (getprocs64 (&proc, sizeof (struct procentry64), NULL, 0, &newpid, 1) == 1) {
g_strlcpy (buf, proc.pi_comm, len - 1);
}
return buf;
#else
char fname [128];
FILE *file;
char *p;
size_t r;
sprintf (fname, "/proc/%d/cmdline", GPOINTER_TO_INT (pid));
buf [0] = 0;
file = fopen (fname, "r");
if (!file)
return buf;
r = fread (buf, 1, len - 1, file);
fclose (file);
buf [r] = 0;
p = strrchr (buf, '/');
if (p)
return p + 1;
if (r == 0) {
return get_pid_status_item_buf (GPOINTER_TO_INT (pid), "Name", buf, len, NULL);
}
return buf;
#endif
}
void
mono_process_get_times (gpointer pid, gint64 *start_time, gint64 *user_time, gint64 *kernel_time)
{
if (user_time)
*user_time = mono_process_get_data (pid, MONO_PROCESS_USER_TIME);
if (kernel_time)
*kernel_time = mono_process_get_data (pid, MONO_PROCESS_SYSTEM_TIME);
if (start_time) {
*start_time = 0;
#if USE_SYSCTL && defined(kinfo_starttime_member)
{
KINFO_PROC processi;
if (sysctl_kinfo_proc (pid, &processi)) {
#if defined(__NetBSD__)
struct timeval tv;
tv.tv_sec = processi.kinfo_starttime_member;
tv.tv_usec = processi.p_ustart_usec;
*start_time = mono_100ns_datetime_from_timeval(tv);
#else
*start_time = mono_100ns_datetime_from_timeval (processi.kinfo_starttime_member);
#endif
}
}
#endif
if (*start_time == 0) {
static guint64 boot_time = 0;
if (!boot_time)
boot_time = mono_100ns_datetime () - mono_msec_boottime () * 10000;
*start_time = boot_time + mono_process_get_data (pid, MONO_PROCESS_ELAPSED);
}
}
}
/*
* /proc/pid/stat format:
* pid (cmdname) S
* [0] ppid pgid sid tty_nr tty_pgrp flags min_flt cmin_flt maj_flt cmaj_flt
* [10] utime stime cutime cstime prio nice threads 0 start_time vsize
* [20] rss rsslim start_code end_code start_stack esp eip pending blocked sigign
* [30] sigcatch wchan 0 0 exit_signal cpu rt_prio policy
*/
#define RET_ERROR(err) do { \
if (error) *error = (err); \
return 0; \
} while (0)
static gint64
get_process_stat_item (int pid, int pos, int sum, MonoProcessError *error)
{
#if defined(__APPLE__)
double process_user_time = 0, process_system_time = 0;//, process_percent = 0;
task_t task;
struct task_basic_info t_info;
mach_msg_type_number_t t_info_count = TASK_BASIC_INFO_COUNT, th_count;
thread_array_t th_array;
size_t i;
kern_return_t ret;
if (pid == getpid ()) {
/* task_for_pid () doesn't work on ios, even for the current process */
task = mach_task_self ();
} else {
do {
ret = task_for_pid (mach_task_self (), pid, &task);
} while (ret == KERN_ABORTED);
if (ret != KERN_SUCCESS)
RET_ERROR (MONO_PROCESS_ERROR_NOT_FOUND);
}
do {
ret = task_info (task, TASK_BASIC_INFO, (task_info_t)&t_info, &t_info_count);
} while (ret == KERN_ABORTED);
if (ret != KERN_SUCCESS) {
if (pid != getpid ())
mach_port_deallocate (mach_task_self (), task);
RET_ERROR (MONO_PROCESS_ERROR_OTHER);
}
do {
ret = task_threads (task, &th_array, &th_count);
} while (ret == KERN_ABORTED);
if (ret != KERN_SUCCESS) {
if (pid != getpid ())
mach_port_deallocate (mach_task_self (), task);
RET_ERROR (MONO_PROCESS_ERROR_OTHER);
}
for (i = 0; i < th_count; i++) {
double thread_user_time, thread_system_time;//, thread_percent;
struct thread_basic_info th_info;
mach_msg_type_number_t th_info_count = THREAD_BASIC_INFO_COUNT;
do {
ret = thread_info(th_array[i], THREAD_BASIC_INFO, (thread_info_t)&th_info, &th_info_count);
} while (ret == KERN_ABORTED);
if (ret == KERN_SUCCESS) {
thread_user_time = th_info.user_time.seconds + th_info.user_time.microseconds / 1e6;
thread_system_time = th_info.system_time.seconds + th_info.system_time.microseconds / 1e6;
//thread_percent = (double)th_info.cpu_usage / TH_USAGE_SCALE;
process_user_time += thread_user_time;
process_system_time += thread_system_time;
//process_percent += th_percent;
}
}
for (i = 0; i < th_count; i++)
mach_port_deallocate(task, th_array[i]);
if (pid != getpid ())
mach_port_deallocate (mach_task_self (), task);
process_user_time += t_info.user_time.seconds + t_info.user_time.microseconds / 1e6;
process_system_time += t_info.system_time.seconds + t_info.system_time.microseconds / 1e6;
if (pos == 10 && sum == TRUE)
return (gint64)((process_user_time + process_system_time) * 10000000);
else if (pos == 10)
return (gint64)(process_user_time * 10000000);
else if (pos == 11)
return (gint64)(process_system_time * 10000000);
return 0;
#else
char buf [512];
char *s, *end;
FILE *f;
size_t len;
int i;
gint64 value;
g_snprintf (buf, sizeof (buf), "/proc/%d/stat", pid);
f = fopen (buf, "r");
if (!f)
RET_ERROR (MONO_PROCESS_ERROR_NOT_FOUND);
len = fread (buf, 1, sizeof (buf), f);
fclose (f);
if (len <= 0)
RET_ERROR (MONO_PROCESS_ERROR_OTHER);
s = strchr (buf, ')');
if (!s)
RET_ERROR (MONO_PROCESS_ERROR_OTHER);
s++;
while (g_ascii_isspace (*s)) s++;
if (!*s)
RET_ERROR (MONO_PROCESS_ERROR_OTHER);
/* skip the status char */
while (*s && !g_ascii_isspace (*s)) s++;
if (!*s)
RET_ERROR (MONO_PROCESS_ERROR_OTHER);
for (i = 0; i < pos; ++i) {
while (g_ascii_isspace (*s)) s++;
if (!*s)
RET_ERROR (MONO_PROCESS_ERROR_OTHER);
while (*s && !g_ascii_isspace (*s)) s++;
if (!*s)
RET_ERROR (MONO_PROCESS_ERROR_OTHER);
}
/* we are finally at the needed item */
value = strtoul (s, &end, 0);
/* add also the following value */
if (sum) {
while (g_ascii_isspace (*s)) s++;
if (!*s)
RET_ERROR (MONO_PROCESS_ERROR_OTHER);
value += strtoul (s, &end, 0);
}
if (error)
*error = MONO_PROCESS_ERROR_NONE;
return value;
#endif
}
static int
get_user_hz (void)
{
static int user_hz = 0;
if (user_hz == 0) {
#if defined (_SC_CLK_TCK) && defined (HAVE_SYSCONF)
user_hz = sysconf (_SC_CLK_TCK);
#endif
if (user_hz == 0)
user_hz = 100;
}
return user_hz;
}
static gint64
get_process_stat_time (int pid, int pos, int sum, MonoProcessError *error)
{
gint64 val = get_process_stat_item (pid, pos, sum, error);
#if defined(__APPLE__)
return val;
#else
/* return 100ns ticks */
return (val * 10000000) / get_user_hz ();
#endif
}
static gint64
get_pid_status_item (int pid, const char *item, MonoProcessError *error, int multiplier)
{
#if defined(__APPLE__)
// ignore the multiplier
gint64 ret;
task_t task;
task_vm_info_data_t t_info;
mach_msg_type_number_t info_count = TASK_VM_INFO_COUNT;
kern_return_t mach_ret;
if (pid == getpid ()) {
/* task_for_pid () doesn't work on ios, even for the current process */
task = mach_task_self ();
} else {
do {
mach_ret = task_for_pid (mach_task_self (), pid, &task);
} while (mach_ret == KERN_ABORTED);
if (mach_ret != KERN_SUCCESS)
RET_ERROR (MONO_PROCESS_ERROR_NOT_FOUND);
}
do {
mach_ret = task_info (task, TASK_VM_INFO, (task_info_t)&t_info, &info_count);
} while (mach_ret == KERN_ABORTED);
if (mach_ret != KERN_SUCCESS) {
if (pid != getpid ())
mach_port_deallocate (mach_task_self (), task);
RET_ERROR (MONO_PROCESS_ERROR_OTHER);
}
if(strcmp (item, "VmData") == 0)
ret = t_info.internal + t_info.compressed;
else if (strcmp (item, "VmRSS") == 0)
ret = t_info.resident_size;
else if(strcmp (item, "VmHWM") == 0)
ret = t_info.resident_size_peak;
else if (strcmp (item, "VmSize") == 0 || strcmp (item, "VmPeak") == 0)
ret = t_info.virtual_size;
else if (strcmp (item, "Threads") == 0) {
struct task_basic_info t_info;
mach_msg_type_number_t th_count = TASK_BASIC_INFO_COUNT;
do {
mach_ret = task_info (task, TASK_BASIC_INFO, (task_info_t)&t_info, &th_count);
} while (mach_ret == KERN_ABORTED);
if (mach_ret != KERN_SUCCESS) {
if (pid != getpid ())
mach_port_deallocate (mach_task_self (), task);
RET_ERROR (MONO_PROCESS_ERROR_OTHER);
}
ret = th_count;
} else if (strcmp (item, "VmSwap") == 0)
ret = t_info.compressed;
else
ret = 0;
if (pid != getpid ())
mach_port_deallocate (mach_task_self (), task);
return ret;
#else
char buf [64];
char *s;
s = get_pid_status_item_buf (pid, item, buf, sizeof (buf), error);
if (s)
return ((gint64) atol (s)) * multiplier;
return 0;
#endif
}
/**
* mono_process_get_data:
* \param pid pid of the process
* \param data description of data to return
* \returns a data item of a process like user time, memory use etc,
* according to the \p data argumet.
*/
gint64
mono_process_get_data_with_error (gpointer pid, MonoProcessData data, MonoProcessError *error)
{
gint64 val;
int rpid = GPOINTER_TO_INT (pid);
if (error)
*error = MONO_PROCESS_ERROR_OTHER;
switch (data) {
case MONO_PROCESS_NUM_THREADS:
return get_pid_status_item (rpid, "Threads", error, 1);
case MONO_PROCESS_USER_TIME:
return get_process_stat_time (rpid, 10, FALSE, error);
case MONO_PROCESS_SYSTEM_TIME:
return get_process_stat_time (rpid, 11, FALSE, error);
case MONO_PROCESS_TOTAL_TIME:
return get_process_stat_time (rpid, 10, TRUE, error);
case MONO_PROCESS_WORKING_SET:
return get_pid_status_item (rpid, "VmRSS", error, 1024);
case MONO_PROCESS_WORKING_SET_PEAK:
val = get_pid_status_item (rpid, "VmHWM", error, 1024);
if (val == 0)
val = get_pid_status_item (rpid, "VmRSS", error, 1024);
return val;
case MONO_PROCESS_PRIVATE_BYTES:
return get_pid_status_item (rpid, "VmData", error, 1024);
case MONO_PROCESS_VIRTUAL_BYTES:
return get_pid_status_item (rpid, "VmSize", error, 1024);
case MONO_PROCESS_VIRTUAL_BYTES_PEAK:
val = get_pid_status_item (rpid, "VmPeak", error, 1024);
if (val == 0)
val = get_pid_status_item (rpid, "VmSize", error, 1024);
return val;
case MONO_PROCESS_FAULTS:
return get_process_stat_item (rpid, 6, TRUE, error);
case MONO_PROCESS_ELAPSED:
return get_process_stat_time (rpid, 18, FALSE, error);
case MONO_PROCESS_PPID:
return get_process_stat_time (rpid, 0, FALSE, error);
case MONO_PROCESS_PAGED_BYTES:
return get_pid_status_item (rpid, "VmSwap", error, 1024);
/* Nothing yet */
case MONO_PROCESS_END:
return 0;
}
return 0;
}
gint64
mono_process_get_data (gpointer pid, MonoProcessData data)
{
MonoProcessError error;
return mono_process_get_data_with_error (pid, data, &error);
}
#ifndef HOST_WIN32
int
mono_process_current_pid ()
{
#if defined(HAVE_GETPID)
return (int) getpid ();
#elif defined(HOST_WASI)
return 0;
#else
#error getpid
#endif
}
#endif /* !HOST_WIN32 */
/**
* mono_cpu_count:
* \returns the number of processors on the system.
*/
#ifndef HOST_WIN32
int
mono_cpu_count (void)
{
#ifdef HOST_ANDROID
/* Android tries really hard to save power by powering off CPUs on SMP phones which
* means the normal way to query cpu count returns a wrong value with userspace API.
* Instead we use /sys entries to query the actual hardware CPU count.
*/
int count = 0;
char buffer[8] = {'\0'};
int present = open ("/sys/devices/system/cpu/present", O_RDONLY);
/* Format of the /sys entry is a cpulist of indexes which in the case
* of present is always of the form "0-(n-1)" when there is more than
* 1 core, n being the number of CPU cores in the system. Otherwise
* the value is simply 0
*/
if (present != -1 && read (present, (char*)buffer, sizeof (buffer)) > 3)
count = strtol (((char*)buffer) + 2, NULL, 10);
if (present != -1)
close (present);
if (count > 0)
return count + 1;
#endif
#if defined(HOST_ARM) || defined (HOST_ARM64)
/*
* Recap from Alexander Köplinger <[email protected]>:
*
* When we merged the change from PR #2722, we started seeing random failures on ARM in
* the MonoTests.System.Threading.ThreadPoolTests.SetAndGetMaxThreads and
* MonoTests.System.Threading.ManualResetEventSlimTests.Constructor_Defaults tests. Both
* of those tests are dealing with Environment.ProcessorCount to verify some implementation
* details.
*
* It turns out that on the Jetson TK1 board we use on public Jenkins and on ARM kernels
* in general, the value returned by sched_getaffinity (or _SC_NPROCESSORS_ONLN) doesn't
* contain CPUs/cores that are powered off for power saving reasons. This is contrary to
* what happens on x86, where even cores in deep-sleep state are returned [1], [2]. This
* means that we would get a processor count of 1 at one point in time and a higher value
* when load increases later on as the system wakes CPUs.
*
* Various runtime pieces like the threadpool and also user code however relies on the
* value returned by Environment.ProcessorCount e.g. for deciding how many parallel tasks
* to start, thereby limiting the performance when that code thinks we only have one CPU.
*
* Talking to a few people, this was the reason why we changed to _SC_NPROCESSORS_CONF in
* mono#1688 and why we added a special case for Android in mono@de3addc to get the "real"
* number of processors in the system.
*
* Because of those issues Android/Dalvik also switched from _ONLN to _SC_NPROCESSORS_CONF
* for the Java API Runtime.availableProcessors() too [3], citing:
* > Traditionally this returned the number currently online, but many mobile devices are
* able to take unused cores offline to save power, so releases newer than Android 4.2 (Jelly
* Bean) return the maximum number of cores that could be made available if there were no
* power or heat constraints.
*
* The problem with sticking to _SC_NPROCESSORS_CONF however is that it breaks down in
* constrained environments like Docker or with an explicit CPU affinity set by the Linux
* `taskset` command, They'd get a higher CPU count than can be used, start more threads etc.
* which results in unnecessary context switches and overloaded systems. That's why we need
* to respect sched_getaffinity.
*
* So while in an ideal world we would be able to rely on sched_getaffinity/_SC_NPROCESSORS_ONLN
* to return the number of theoretically available CPUs regardless of power saving measures
* everywhere, we can't do this on ARM.
*
* I think the pragmatic solution is the following:
* * use sched_getaffinity (+ fallback to _SC_NPROCESSORS_ONLN in case of error) on x86. This
* ensures we're inline with what OpenJDK [4] and CoreCLR [5] do
* * use _SC_NPROCESSORS_CONF exclusively on ARM (I think we could eventually even get rid of
* the HOST_ANDROID special case)
*
* Helpful links:
*
* [1] https://sourceware.org/ml/libc-alpha/2013-07/msg00383.html
* [2] https://lists.01.org/pipermail/powertop/2012-September/000433.html
* [3] https://android.googlesource.com/platform/libcore/+/750dc634e56c58d1d04f6a138734ac2b772900b5%5E1..750dc634e56c58d1d04f6a138734ac2b772900b5/
* [4] https://bugs.openjdk.java.net/browse/JDK-6515172
* [5] https://github.com/dotnet/coreclr/blob/7058273693db2555f127ce16e6b0c5b40fb04867/src/pal/src/misc/sysinfo.cpp#L148
*/
#if defined (_SC_NPROCESSORS_CONF) && defined (HAVE_SYSCONF)
{
int count = sysconf (_SC_NPROCESSORS_CONF);
if (count > 0)
return count;
}
#endif
#else
#ifdef HAVE_SCHED_GETAFFINITY
{
cpu_set_t set;
if (sched_getaffinity (mono_process_current_pid (), sizeof (set), &set) == 0)
return CPU_COUNT (&set);
}
#endif
#if defined (_SC_NPROCESSORS_ONLN) && defined (HAVE_SYSCONF)
{
int count = sysconf (_SC_NPROCESSORS_ONLN);
if (count > 0)
return count;
}
#endif
#endif /* defined(HOST_ARM) || defined (HOST_ARM64) */
#ifdef USE_SYSCTL
{
int count;
int mib [2];
size_t len = sizeof (int);
mib [0] = CTL_HW;
mib [1] = HW_NCPU;
if (sysctl (mib, 2, &count, &len, NULL, 0) == 0)
return count;
}
#endif
/* FIXME: warn */
return 1;
}
#endif /* !HOST_WIN32 */
static void
get_cpu_times (int cpu_id, gint64 *user, gint64 *systemt, gint64 *irq, gint64 *sirq, gint64 *idle)
{
char buf [256];
char *s;
int uhz = get_user_hz ();
guint64 user_ticks = 0, nice_ticks = 0, system_ticks = 0, idle_ticks = 0, irq_ticks = 0, sirq_ticks = 0;
FILE *f = fopen ("/proc/stat", "r");
if (!f)
return;
if (cpu_id < 0)
uhz *= mono_cpu_count ();
while ((s = fgets (buf, sizeof (buf), f))) {
char *data = NULL;
if (cpu_id < 0 && strncmp (s, "cpu", 3) == 0 && g_ascii_isspace (s [3])) {
data = s + 4;
} else if (cpu_id >= 0 && strncmp (s, "cpu", 3) == 0 && strtol (s + 3, &data, 10) == cpu_id) {
if (data == s + 3)
continue;
data++;
} else {
continue;
}
user_ticks = strtoull (data, &data, 10);
nice_ticks = strtoull (data, &data, 10);
system_ticks = strtoull (data, &data, 10);
idle_ticks = strtoull (data, &data, 10);
/* iowait_ticks = strtoull (data, &data, 10); */
irq_ticks = strtoull (data, &data, 10);
sirq_ticks = strtoull (data, &data, 10);
break;
}
fclose (f);
if (user)
*user = (user_ticks + nice_ticks) * 10000000 / uhz;
if (systemt)
*systemt = (system_ticks) * 10000000 / uhz;
if (irq)
*irq = (irq_ticks) * 10000000 / uhz;
if (sirq)
*sirq = (sirq_ticks) * 10000000 / uhz;
if (idle)
*idle = (idle_ticks) * 10000000 / uhz;
}
/**
* mono_cpu_get_data:
* \param cpu_id processor number or -1 to get a summary of all the processors
* \param data type of data to retrieve
* Get data about a processor on the system, like time spent in user space or idle time.
*/
gint64
mono_cpu_get_data (int cpu_id, MonoCpuData data, MonoProcessError *error)
{
gint64 value = 0;
if (error)
*error = MONO_PROCESS_ERROR_NONE;
switch (data) {
case MONO_CPU_USER_TIME:
get_cpu_times (cpu_id, &value, NULL, NULL, NULL, NULL);
break;
case MONO_CPU_PRIV_TIME:
get_cpu_times (cpu_id, NULL, &value, NULL, NULL, NULL);
break;
case MONO_CPU_INTR_TIME:
get_cpu_times (cpu_id, NULL, NULL, &value, NULL, NULL);
break;
case MONO_CPU_DCP_TIME:
get_cpu_times (cpu_id, NULL, NULL, NULL, &value, NULL);
break;
case MONO_CPU_IDLE_TIME:
get_cpu_times (cpu_id, NULL, NULL, NULL, NULL, &value);
break;
case MONO_CPU_END:
/* Nothing yet */
return 0;
}
return value;
}
int
mono_atexit (void (*func)(void))
{
#if defined(HOST_ANDROID) || !defined(HAVE_ATEXIT)
/* Some versions of android libc doesn't define atexit () */
return 0;
#else
return atexit (func);
#endif
}
#ifndef HOST_WIN32
gboolean
mono_pe_file_time_date_stamp (const gunichar2 *filename, guint32 *out)
{
void *map_handle;
guint32 map_size;
gpointer file_map = mono_pe_file_map (filename, &map_size, &map_handle);
if (!file_map)
return FALSE;
/* Figure this out when we support 64bit PE files */
if (1) {
IMAGE_DOS_HEADER *dos_header = (IMAGE_DOS_HEADER *)file_map;
if (dos_header->e_magic != IMAGE_DOS_SIGNATURE) {
mono_pe_file_unmap (file_map, map_handle);
return FALSE;
}
IMAGE_NT_HEADERS32 *nt_headers = (IMAGE_NT_HEADERS32 *)((guint8 *)file_map + GUINT32_FROM_LE (dos_header->e_lfanew));
if (nt_headers->Signature != IMAGE_NT_SIGNATURE) {
mono_pe_file_unmap (file_map, map_handle);
return FALSE;
}
*out = nt_headers->FileHeader.TimeDateStamp;
} else {
g_assert_not_reached ();
}
mono_pe_file_unmap (file_map, map_handle);
return TRUE;
}
gpointer
mono_pe_file_map (const gunichar2 *filename, guint32 *map_size, void **handle)
{
gchar *filename_ext = NULL;
gchar *located_filename = NULL;
guint64 fsize = 0;
gpointer file_map = NULL;
ERROR_DECL (error);
MonoFileMap *filed = NULL;
/* According to the MSDN docs, a search path is applied to
* filename. FIXME: implement this, for now just pass it
* straight to open
*/
filename_ext = mono_unicode_to_external_checked (filename, error);
// This block was added to diagnose https://github.com/mono/mono/issues/14730, remove after resolved
if (G_UNLIKELY (filename_ext == NULL)) {
GString *raw_bytes = g_string_new (NULL);
const gunichar2 *p = filename;
while (*p)
g_string_append_printf (raw_bytes, "%04X ", *p++);
g_assertf (filename_ext != NULL, "%s: unicode conversion returned NULL; %s; input was: %s", __func__, mono_error_get_message (error), raw_bytes->str);
g_string_free (raw_bytes, TRUE);
}
if (filename_ext == NULL) {
mono_trace (G_LOG_LEVEL_DEBUG, MONO_TRACE_IO_LAYER_PROCESS, "%s: unicode conversion returned NULL; %s", __func__, mono_error_get_message (error));
mono_error_cleanup (error);
goto exit;
}
if ((filed = mono_file_map_open (filename_ext)) == NULL) {
mono_trace (G_LOG_LEVEL_DEBUG, MONO_TRACE_IO_LAYER_PROCESS, "%s: Error opening file %s (3): %s", __func__, filename_ext, strerror (errno));
goto exit;
}
fsize = mono_file_map_size (filed);
if (fsize == 0) {
mono_trace (G_LOG_LEVEL_DEBUG, MONO_TRACE_IO_LAYER_PROCESS, "%s: Error stat()ing file %s: %s", __func__, filename_ext, strerror (errno));
goto exit;
}
g_assert (fsize <= G_MAXUINT32);
*map_size = fsize;
/* Check basic file size */
if (fsize < sizeof(IMAGE_DOS_HEADER)) {
mono_trace (G_LOG_LEVEL_DEBUG, MONO_TRACE_IO_LAYER_PROCESS, "%s: File %s is too small: %" PRId64, __func__, filename_ext, fsize);
goto exit;
}
file_map = mono_file_map (fsize, MONO_MMAP_READ | MONO_MMAP_PRIVATE, mono_file_map_fd (filed), 0, handle);
if (file_map == NULL) {
mono_trace (G_LOG_LEVEL_DEBUG, MONO_TRACE_IO_LAYER_PROCESS, "%s: Error mmap()int file %s: %s", __func__, filename_ext, strerror (errno));
goto exit;
}
exit:
if (filed)
mono_file_map_close (filed);
g_free (located_filename);
g_free (filename_ext);
return file_map;
}
void
mono_pe_file_unmap (gpointer file_map, void *handle)
{
gint res;
res = mono_file_unmap (file_map, handle);
if (G_UNLIKELY (res != 0))
g_error ("%s: mono_file_unmap failed, error: \"%s\" (%d)", __func__, g_strerror (errno), errno);
}
#endif /* HOST_WIN32 */
/*
* This function returns the cpu usage in percentage,
* normalized on the number of cores.
*
* Warning : the percentage returned can be > 100%. This
* might happens on systems like Android which, for
* battery and performance reasons, shut down cores and
* lie about the number of active cores.
*/
#ifndef HOST_WIN32
gint32
mono_cpu_usage (MonoCpuUsageState *prev)
{
gint32 cpu_usage = 0;
#ifdef HAVE_GETRUSAGE
gint64 cpu_total_time;
gint64 cpu_busy_time;
struct rusage resource_usage;
gint64 current_time;
gint64 kernel_time;
gint64 user_time;
if (getrusage (RUSAGE_SELF, &resource_usage) == -1) {
g_error ("getrusage() failed, errno is %d (%s)\n", errno, strerror (errno));
return -1;
}
current_time = mono_100ns_ticks ();
kernel_time = resource_usage.ru_stime.tv_sec * 1000 * 1000 * 10 + resource_usage.ru_stime.tv_usec * 10;
user_time = resource_usage.ru_utime.tv_sec * 1000 * 1000 * 10 + resource_usage.ru_utime.tv_usec * 10;
cpu_busy_time = (user_time - (prev ? prev->user_time : 0)) + (kernel_time - (prev ? prev->kernel_time : 0));
cpu_total_time = (current_time - (prev ? prev->current_time : 0)) * mono_cpu_count ();
if (prev) {
prev->kernel_time = kernel_time;
prev->user_time = user_time;
prev->current_time = current_time;
}
if (cpu_total_time > 0 && cpu_busy_time > 0)
cpu_usage = (gint32)(cpu_busy_time * 100 / cpu_total_time);
#endif
return cpu_usage;
}
#endif /* !HOST_WIN32 */
| -1 |
dotnet/runtime | 66,250 | use the name of the header when throwing exception for invalid chars | Fixes #65136 | pedrobsaila | 2022-03-05T20:18:11Z | 2022-03-08T16:15:18Z | 8fe2d2b0bbb7bec841c97372528531c071b29c16 | fa5dcda23c681031b5d2bd68482702baef80b836 | use the name of the header when throwing exception for invalid chars. Fixes #65136 | ./src/tests/JIT/Regression/JitBlue/GitHub_17969/GitHub_17969.ilproj | <Project Sdk="Microsoft.NET.Sdk.IL">
<PropertyGroup>
<OutputType>Exe</OutputType>
</PropertyGroup>
<PropertyGroup>
<DebugType>None</DebugType>
<Optimize>True</Optimize>
</PropertyGroup>
<ItemGroup>
<Compile Include="$(MSBuildProjectName).il" />
</ItemGroup>
</Project>
| <Project Sdk="Microsoft.NET.Sdk.IL">
<PropertyGroup>
<OutputType>Exe</OutputType>
</PropertyGroup>
<PropertyGroup>
<DebugType>None</DebugType>
<Optimize>True</Optimize>
</PropertyGroup>
<ItemGroup>
<Compile Include="$(MSBuildProjectName).il" />
</ItemGroup>
</Project>
| -1 |
dotnet/runtime | 66,250 | use the name of the header when throwing exception for invalid chars | Fixes #65136 | pedrobsaila | 2022-03-05T20:18:11Z | 2022-03-08T16:15:18Z | 8fe2d2b0bbb7bec841c97372528531c071b29c16 | fa5dcda23c681031b5d2bd68482702baef80b836 | use the name of the header when throwing exception for invalid chars. Fixes #65136 | ./src/libraries/System.Configuration.ConfigurationManager/src/System/Configuration/ExeContext.cs | // Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
namespace System.Configuration
{
public sealed class ExeContext
{
internal ExeContext(ConfigurationUserLevel userContext,
string exePath)
{
UserLevel = userContext;
ExePath = exePath;
}
// The ConfigurationUserLevel that we are running within.
//
// Note: ConfigurationUserLevel.None will be set for machine.config
// and the applicationconfig file. Use IsMachineConfig in
// ConfigurationContext, to determine the difference.
public ConfigurationUserLevel UserLevel { get; }
public string ExePath { get; }
}
}
| // Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
namespace System.Configuration
{
public sealed class ExeContext
{
internal ExeContext(ConfigurationUserLevel userContext,
string exePath)
{
UserLevel = userContext;
ExePath = exePath;
}
// The ConfigurationUserLevel that we are running within.
//
// Note: ConfigurationUserLevel.None will be set for machine.config
// and the applicationconfig file. Use IsMachineConfig in
// ConfigurationContext, to determine the difference.
public ConfigurationUserLevel UserLevel { get; }
public string ExePath { get; }
}
}
| -1 |
dotnet/runtime | 66,250 | use the name of the header when throwing exception for invalid chars | Fixes #65136 | pedrobsaila | 2022-03-05T20:18:11Z | 2022-03-08T16:15:18Z | 8fe2d2b0bbb7bec841c97372528531c071b29c16 | fa5dcda23c681031b5d2bd68482702baef80b836 | use the name of the header when throwing exception for invalid chars. Fixes #65136 | ./src/tests/baseservices/exceptions/regressions/Dev11/147911/test147911.cs | // Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
using System;
using System.Runtime.InteropServices;
public class Class1
{
[DllImport("fpcw.dll")]
private static extern int RaiseFPException();
public static int Main(string[] args)
{
int retVal = RaiseFPException();
return ( retVal==100 ) ? 100 : 101;
}
}
| // Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
using System;
using System.Runtime.InteropServices;
public class Class1
{
[DllImport("fpcw.dll")]
private static extern int RaiseFPException();
public static int Main(string[] args)
{
int retVal = RaiseFPException();
return ( retVal==100 ) ? 100 : 101;
}
}
| -1 |
dotnet/runtime | 66,250 | use the name of the header when throwing exception for invalid chars | Fixes #65136 | pedrobsaila | 2022-03-05T20:18:11Z | 2022-03-08T16:15:18Z | 8fe2d2b0bbb7bec841c97372528531c071b29c16 | fa5dcda23c681031b5d2bd68482702baef80b836 | use the name of the header when throwing exception for invalid chars. Fixes #65136 | ./src/libraries/System.ComponentModel.Composition.Registration/ref/System.ComponentModel.Composition.Registration.csproj | <Project Sdk="Microsoft.NET.Sdk">
<PropertyGroup>
<TargetFrameworks>$(NetCoreAppCurrent);$(NetCoreAppMinimum);netstandard2.1</TargetFrameworks>
</PropertyGroup>
<ItemGroup>
<Compile Include="System.ComponentModel.Composition.Registration.cs" />
</ItemGroup>
<ItemGroup>
<ProjectReference Include="$(LibrariesProjectRoot)System.ComponentModel.Composition\ref\System.ComponentModel.Composition.csproj" />
<ProjectReference Include="$(LibrariesProjectRoot)System.Reflection.Context\ref\System.Reflection.Context.csproj" />
</ItemGroup>
<ItemGroup Condition="'$(TargetFrameworkIdentifier)' == '.NETCoreApp' and '$(TargetFramework)' != '$(NetCoreAppCurrent)'">
<Reference Include="System.Linq.Expressions" />
<Reference Include="System.Runtime" />
</ItemGroup>
</Project> | <Project Sdk="Microsoft.NET.Sdk">
<PropertyGroup>
<TargetFrameworks>$(NetCoreAppCurrent);$(NetCoreAppMinimum);netstandard2.1</TargetFrameworks>
</PropertyGroup>
<ItemGroup>
<Compile Include="System.ComponentModel.Composition.Registration.cs" />
</ItemGroup>
<ItemGroup>
<ProjectReference Include="$(LibrariesProjectRoot)System.ComponentModel.Composition\ref\System.ComponentModel.Composition.csproj" />
<ProjectReference Include="$(LibrariesProjectRoot)System.Reflection.Context\ref\System.Reflection.Context.csproj" />
</ItemGroup>
<ItemGroup Condition="'$(TargetFrameworkIdentifier)' == '.NETCoreApp' and '$(TargetFramework)' != '$(NetCoreAppCurrent)'">
<Reference Include="System.Linq.Expressions" />
<Reference Include="System.Runtime" />
</ItemGroup>
</Project> | -1 |
dotnet/runtime | 66,250 | use the name of the header when throwing exception for invalid chars | Fixes #65136 | pedrobsaila | 2022-03-05T20:18:11Z | 2022-03-08T16:15:18Z | 8fe2d2b0bbb7bec841c97372528531c071b29c16 | fa5dcda23c681031b5d2bd68482702baef80b836 | use the name of the header when throwing exception for invalid chars. Fixes #65136 | ./src/libraries/System.Linq.Parallel/src/System/Linq/Parallel/QueryOperators/Unary/IndexedWhereQueryOperator.cs | // Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
// =+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+
//
// IndexedWhereQueryOperator.cs
//
// =-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-
using System.Collections.Generic;
using System.Diagnostics;
using System.Diagnostics.CodeAnalysis;
using System.Threading;
namespace System.Linq.Parallel
{
/// <summary>
/// A variant of the Where operator that supplies element index while performing the
/// filtering operation. This requires cooperation with partitioning and merging to
/// guarantee ordering is preserved.
///
/// </summary>
/// <typeparam name="TInputOutput"></typeparam>
internal sealed class IndexedWhereQueryOperator<TInputOutput> : UnaryQueryOperator<TInputOutput, TInputOutput>
{
// Predicate function. Used to filter out non-matching elements during execution.
private readonly Func<TInputOutput, int, bool> _predicate;
private bool _prematureMerge; // Whether to prematurely merge the input of this operator.
private bool _limitsParallelism; // Whether this operator limits parallelism
//---------------------------------------------------------------------------------------
// Initializes a new where operator.
//
// Arguments:
// child - the child operator or data source from which to pull data
// predicate - a delegate representing the predicate function
//
// Assumptions:
// predicate must be non null.
//
internal IndexedWhereQueryOperator(IEnumerable<TInputOutput> child,
Func<TInputOutput, int, bool> predicate)
: base(child)
{
Debug.Assert(child != null, "child data source cannot be null");
Debug.Assert(predicate != null, "need a filter function");
_predicate = predicate;
// In an indexed Select, elements must be returned in the order in which
// indices were assigned.
_outputOrdered = true;
InitOrdinalIndexState();
}
private void InitOrdinalIndexState()
{
OrdinalIndexState childIndexState = Child.OrdinalIndexState;
if (ExchangeUtilities.IsWorseThan(childIndexState, OrdinalIndexState.Correct))
{
_prematureMerge = true;
_limitsParallelism = childIndexState != OrdinalIndexState.Shuffled;
}
SetOrdinalIndexState(OrdinalIndexState.Increasing);
}
//---------------------------------------------------------------------------------------
// Just opens the current operator, including opening the child and wrapping it with
// partitions as needed.
//
internal override QueryResults<TInputOutput> Open(
QuerySettings settings, bool preferStriping)
{
QueryResults<TInputOutput> childQueryResults = Child.Open(settings, preferStriping);
return new UnaryQueryOperatorResults(childQueryResults, this, settings, preferStriping);
}
internal override void WrapPartitionedStream<TKey>(
PartitionedStream<TInputOutput, TKey> inputStream, IPartitionedStreamRecipient<TInputOutput> recipient, bool preferStriping, QuerySettings settings)
{
int partitionCount = inputStream.PartitionCount;
// If the index is not correct, we need to reindex.
PartitionedStream<TInputOutput, int> inputStreamInt;
if (_prematureMerge)
{
ListQueryResults<TInputOutput> listResults = ExecuteAndCollectResults(inputStream, partitionCount, Child.OutputOrdered, preferStriping, settings);
inputStreamInt = listResults.GetPartitionedStream();
}
else
{
Debug.Assert(typeof(TKey) == typeof(int));
inputStreamInt = (PartitionedStream<TInputOutput, int>)(object)inputStream;
}
// Since the index is correct, the type of the index must be int
PartitionedStream<TInputOutput, int> outputStream =
new PartitionedStream<TInputOutput, int>(partitionCount, Util.GetDefaultComparer<int>(), OrdinalIndexState);
for (int i = 0; i < partitionCount; i++)
{
outputStream[i] = new IndexedWhereQueryOperatorEnumerator(inputStreamInt[i], _predicate, settings.CancellationState.MergedCancellationToken);
}
recipient.Receive(outputStream);
}
//---------------------------------------------------------------------------------------
// Returns an enumerable that represents the query executing sequentially.
//
internal override IEnumerable<TInputOutput> AsSequentialQuery(CancellationToken token)
{
IEnumerable<TInputOutput> wrappedChild = CancellableEnumerable.Wrap(Child.AsSequentialQuery(token), token);
return wrappedChild.Where(_predicate);
}
//---------------------------------------------------------------------------------------
// Whether this operator performs a premature merge that would not be performed in
// a similar sequential operation (i.e., in LINQ to Objects).
//
internal override bool LimitsParallelism
{
get { return _limitsParallelism; }
}
//-----------------------------------------------------------------------------------
// An enumerator that implements the filtering logic.
//
private sealed class IndexedWhereQueryOperatorEnumerator : QueryOperatorEnumerator<TInputOutput, int>
{
private readonly QueryOperatorEnumerator<TInputOutput, int> _source; // The data source to enumerate.
private readonly Func<TInputOutput, int, bool> _predicate; // The predicate used for filtering.
private readonly CancellationToken _cancellationToken;
private Shared<int>? _outputLoopCount;
//-----------------------------------------------------------------------------------
// Instantiates a new enumerator.
//
internal IndexedWhereQueryOperatorEnumerator(QueryOperatorEnumerator<TInputOutput, int> source, Func<TInputOutput, int, bool> predicate,
CancellationToken cancellationToken)
{
Debug.Assert(source != null);
Debug.Assert(predicate != null);
_source = source;
_predicate = predicate;
_cancellationToken = cancellationToken;
}
//-----------------------------------------------------------------------------------
// Moves to the next matching element in the underlying data stream.
//
internal override bool MoveNext([MaybeNullWhen(false), AllowNull] ref TInputOutput currentElement, ref int currentKey)
{
Debug.Assert(_predicate != null, "expected a compiled operator");
// Iterate through the input until we reach the end of the sequence or find
// an element matching the predicate.
if (_outputLoopCount == null)
_outputLoopCount = new Shared<int>(0);
while (_source.MoveNext(ref currentElement!, ref currentKey))
{
if ((_outputLoopCount.Value++ & CancellationState.POLL_INTERVAL) == 0)
_cancellationToken.ThrowIfCancellationRequested();
if (_predicate(currentElement, currentKey))
{
return true;
}
}
return false;
}
protected override void Dispose(bool disposing)
{
Debug.Assert(_source != null);
_source.Dispose();
}
}
}
}
| // Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
// =+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+
//
// IndexedWhereQueryOperator.cs
//
// =-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-
using System.Collections.Generic;
using System.Diagnostics;
using System.Diagnostics.CodeAnalysis;
using System.Threading;
namespace System.Linq.Parallel
{
/// <summary>
/// A variant of the Where operator that supplies element index while performing the
/// filtering operation. This requires cooperation with partitioning and merging to
/// guarantee ordering is preserved.
///
/// </summary>
/// <typeparam name="TInputOutput"></typeparam>
internal sealed class IndexedWhereQueryOperator<TInputOutput> : UnaryQueryOperator<TInputOutput, TInputOutput>
{
// Predicate function. Used to filter out non-matching elements during execution.
private readonly Func<TInputOutput, int, bool> _predicate;
private bool _prematureMerge; // Whether to prematurely merge the input of this operator.
private bool _limitsParallelism; // Whether this operator limits parallelism
//---------------------------------------------------------------------------------------
// Initializes a new where operator.
//
// Arguments:
// child - the child operator or data source from which to pull data
// predicate - a delegate representing the predicate function
//
// Assumptions:
// predicate must be non null.
//
internal IndexedWhereQueryOperator(IEnumerable<TInputOutput> child,
Func<TInputOutput, int, bool> predicate)
: base(child)
{
Debug.Assert(child != null, "child data source cannot be null");
Debug.Assert(predicate != null, "need a filter function");
_predicate = predicate;
// In an indexed Select, elements must be returned in the order in which
// indices were assigned.
_outputOrdered = true;
InitOrdinalIndexState();
}
private void InitOrdinalIndexState()
{
OrdinalIndexState childIndexState = Child.OrdinalIndexState;
if (ExchangeUtilities.IsWorseThan(childIndexState, OrdinalIndexState.Correct))
{
_prematureMerge = true;
_limitsParallelism = childIndexState != OrdinalIndexState.Shuffled;
}
SetOrdinalIndexState(OrdinalIndexState.Increasing);
}
//---------------------------------------------------------------------------------------
// Just opens the current operator, including opening the child and wrapping it with
// partitions as needed.
//
internal override QueryResults<TInputOutput> Open(
QuerySettings settings, bool preferStriping)
{
QueryResults<TInputOutput> childQueryResults = Child.Open(settings, preferStriping);
return new UnaryQueryOperatorResults(childQueryResults, this, settings, preferStriping);
}
internal override void WrapPartitionedStream<TKey>(
PartitionedStream<TInputOutput, TKey> inputStream, IPartitionedStreamRecipient<TInputOutput> recipient, bool preferStriping, QuerySettings settings)
{
int partitionCount = inputStream.PartitionCount;
// If the index is not correct, we need to reindex.
PartitionedStream<TInputOutput, int> inputStreamInt;
if (_prematureMerge)
{
ListQueryResults<TInputOutput> listResults = ExecuteAndCollectResults(inputStream, partitionCount, Child.OutputOrdered, preferStriping, settings);
inputStreamInt = listResults.GetPartitionedStream();
}
else
{
Debug.Assert(typeof(TKey) == typeof(int));
inputStreamInt = (PartitionedStream<TInputOutput, int>)(object)inputStream;
}
// Since the index is correct, the type of the index must be int
PartitionedStream<TInputOutput, int> outputStream =
new PartitionedStream<TInputOutput, int>(partitionCount, Util.GetDefaultComparer<int>(), OrdinalIndexState);
for (int i = 0; i < partitionCount; i++)
{
outputStream[i] = new IndexedWhereQueryOperatorEnumerator(inputStreamInt[i], _predicate, settings.CancellationState.MergedCancellationToken);
}
recipient.Receive(outputStream);
}
//---------------------------------------------------------------------------------------
// Returns an enumerable that represents the query executing sequentially.
//
internal override IEnumerable<TInputOutput> AsSequentialQuery(CancellationToken token)
{
IEnumerable<TInputOutput> wrappedChild = CancellableEnumerable.Wrap(Child.AsSequentialQuery(token), token);
return wrappedChild.Where(_predicate);
}
//---------------------------------------------------------------------------------------
// Whether this operator performs a premature merge that would not be performed in
// a similar sequential operation (i.e., in LINQ to Objects).
//
internal override bool LimitsParallelism
{
get { return _limitsParallelism; }
}
//-----------------------------------------------------------------------------------
// An enumerator that implements the filtering logic.
//
private sealed class IndexedWhereQueryOperatorEnumerator : QueryOperatorEnumerator<TInputOutput, int>
{
private readonly QueryOperatorEnumerator<TInputOutput, int> _source; // The data source to enumerate.
private readonly Func<TInputOutput, int, bool> _predicate; // The predicate used for filtering.
private readonly CancellationToken _cancellationToken;
private Shared<int>? _outputLoopCount;
//-----------------------------------------------------------------------------------
// Instantiates a new enumerator.
//
internal IndexedWhereQueryOperatorEnumerator(QueryOperatorEnumerator<TInputOutput, int> source, Func<TInputOutput, int, bool> predicate,
CancellationToken cancellationToken)
{
Debug.Assert(source != null);
Debug.Assert(predicate != null);
_source = source;
_predicate = predicate;
_cancellationToken = cancellationToken;
}
//-----------------------------------------------------------------------------------
// Moves to the next matching element in the underlying data stream.
//
internal override bool MoveNext([MaybeNullWhen(false), AllowNull] ref TInputOutput currentElement, ref int currentKey)
{
Debug.Assert(_predicate != null, "expected a compiled operator");
// Iterate through the input until we reach the end of the sequence or find
// an element matching the predicate.
if (_outputLoopCount == null)
_outputLoopCount = new Shared<int>(0);
while (_source.MoveNext(ref currentElement!, ref currentKey))
{
if ((_outputLoopCount.Value++ & CancellationState.POLL_INTERVAL) == 0)
_cancellationToken.ThrowIfCancellationRequested();
if (_predicate(currentElement, currentKey))
{
return true;
}
}
return false;
}
protected override void Dispose(bool disposing)
{
Debug.Assert(_source != null);
_source.Dispose();
}
}
}
}
| -1 |
dotnet/runtime | 66,250 | use the name of the header when throwing exception for invalid chars | Fixes #65136 | pedrobsaila | 2022-03-05T20:18:11Z | 2022-03-08T16:15:18Z | 8fe2d2b0bbb7bec841c97372528531c071b29c16 | fa5dcda23c681031b5d2bd68482702baef80b836 | use the name of the header when throwing exception for invalid chars. Fixes #65136 | ./src/libraries/System.Security.Permissions/src/System/Security/Permissions/IUnrestrictedPermission.cs | // Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
namespace System.Security.Permissions
{
#if NETCOREAPP
[Obsolete(Obsoletions.CodeAccessSecurityMessage, DiagnosticId = Obsoletions.CodeAccessSecurityDiagId, UrlFormat = Obsoletions.SharedUrlFormat)]
#endif
public partial interface IUnrestrictedPermission
{
bool IsUnrestricted();
}
}
| // Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
namespace System.Security.Permissions
{
#if NETCOREAPP
[Obsolete(Obsoletions.CodeAccessSecurityMessage, DiagnosticId = Obsoletions.CodeAccessSecurityDiagId, UrlFormat = Obsoletions.SharedUrlFormat)]
#endif
public partial interface IUnrestrictedPermission
{
bool IsUnrestricted();
}
}
| -1 |
dotnet/runtime | 66,250 | use the name of the header when throwing exception for invalid chars | Fixes #65136 | pedrobsaila | 2022-03-05T20:18:11Z | 2022-03-08T16:15:18Z | 8fe2d2b0bbb7bec841c97372528531c071b29c16 | fa5dcda23c681031b5d2bd68482702baef80b836 | use the name of the header when throwing exception for invalid chars. Fixes #65136 | ./src/libraries/System.Text.RegularExpressions/src/System/Text/RegularExpressions/CollectionDebuggerProxy.cs | // Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
using System.Collections.Generic;
using System.Diagnostics;
namespace System.Text.RegularExpressions
{
internal sealed class CollectionDebuggerProxy<T>
{
private readonly ICollection<T> _collection;
public CollectionDebuggerProxy(ICollection<T> collection!!) =>
_collection = collection;
[DebuggerBrowsable(DebuggerBrowsableState.RootHidden)]
public T[] Items
{
get
{
var items = new T[_collection.Count];
_collection.CopyTo(items, 0);
return items;
}
}
}
}
| // Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
using System.Collections.Generic;
using System.Diagnostics;
namespace System.Text.RegularExpressions
{
internal sealed class CollectionDebuggerProxy<T>
{
private readonly ICollection<T> _collection;
public CollectionDebuggerProxy(ICollection<T> collection!!) =>
_collection = collection;
[DebuggerBrowsable(DebuggerBrowsableState.RootHidden)]
public T[] Items
{
get
{
var items = new T[_collection.Count];
_collection.CopyTo(items, 0);
return items;
}
}
}
}
| -1 |
dotnet/runtime | 66,250 | use the name of the header when throwing exception for invalid chars | Fixes #65136 | pedrobsaila | 2022-03-05T20:18:11Z | 2022-03-08T16:15:18Z | 8fe2d2b0bbb7bec841c97372528531c071b29c16 | fa5dcda23c681031b5d2bd68482702baef80b836 | use the name of the header when throwing exception for invalid chars. Fixes #65136 | ./src/tests/JIT/HardwareIntrinsics/Arm/AdvSimd/MultiplySubtractByScalar.Vector128.UInt32.cs | // Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
/******************************************************************************
* This file is auto-generated from a template file by the GenerateTests.csx *
* script in tests\src\JIT\HardwareIntrinsics.Arm\Shared. In order to make *
* changes, please update the corresponding template and run according to the *
* directions listed in the file. *
******************************************************************************/
using System;
using System.Runtime.CompilerServices;
using System.Runtime.InteropServices;
using System.Runtime.Intrinsics;
using System.Runtime.Intrinsics.Arm;
namespace JIT.HardwareIntrinsics.Arm
{
public static partial class Program
{
private static void MultiplySubtractByScalar_Vector128_UInt32()
{
var test = new SimpleTernaryOpTest__MultiplySubtractByScalar_Vector128_UInt32();
if (test.IsSupported)
{
// Validates basic functionality works, using Unsafe.Read
test.RunBasicScenario_UnsafeRead();
if (AdvSimd.IsSupported)
{
// Validates basic functionality works, using Load
test.RunBasicScenario_Load();
}
// Validates calling via reflection works, using Unsafe.Read
test.RunReflectionScenario_UnsafeRead();
if (AdvSimd.IsSupported)
{
// Validates calling via reflection works, using Load
test.RunReflectionScenario_Load();
}
// Validates passing a static member works
test.RunClsVarScenario();
if (AdvSimd.IsSupported)
{
// Validates passing a static member works, using pinning and Load
test.RunClsVarScenario_Load();
}
// Validates passing a local works, using Unsafe.Read
test.RunLclVarScenario_UnsafeRead();
if (AdvSimd.IsSupported)
{
// Validates passing a local works, using Load
test.RunLclVarScenario_Load();
}
// Validates passing the field of a local class works
test.RunClassLclFldScenario();
if (AdvSimd.IsSupported)
{
// Validates passing the field of a local class works, using pinning and Load
test.RunClassLclFldScenario_Load();
}
// Validates passing an instance member of a class works
test.RunClassFldScenario();
if (AdvSimd.IsSupported)
{
// Validates passing an instance member of a class works, using pinning and Load
test.RunClassFldScenario_Load();
}
// Validates passing the field of a local struct works
test.RunStructLclFldScenario();
if (AdvSimd.IsSupported)
{
// Validates passing the field of a local struct works, using pinning and Load
test.RunStructLclFldScenario_Load();
}
// Validates passing an instance member of a struct works
test.RunStructFldScenario();
if (AdvSimd.IsSupported)
{
// Validates passing an instance member of a struct works, using pinning and Load
test.RunStructFldScenario_Load();
}
}
else
{
// Validates we throw on unsupported hardware
test.RunUnsupportedScenario();
}
if (!test.Succeeded)
{
throw new Exception("One or more scenarios did not complete as expected.");
}
}
}
public sealed unsafe class SimpleTernaryOpTest__MultiplySubtractByScalar_Vector128_UInt32
{
private struct DataTable
{
private byte[] inArray1;
private byte[] inArray2;
private byte[] inArray3;
private byte[] outArray;
private GCHandle inHandle1;
private GCHandle inHandle2;
private GCHandle inHandle3;
private GCHandle outHandle;
private ulong alignment;
public DataTable(UInt32[] inArray1, UInt32[] inArray2, UInt32[] inArray3, UInt32[] outArray, int alignment)
{
int sizeOfinArray1 = inArray1.Length * Unsafe.SizeOf<UInt32>();
int sizeOfinArray2 = inArray2.Length * Unsafe.SizeOf<UInt32>();
int sizeOfinArray3 = inArray3.Length * Unsafe.SizeOf<UInt32>();
int sizeOfoutArray = outArray.Length * Unsafe.SizeOf<UInt32>();
if ((alignment != 16 && alignment != 8) || (alignment * 2) < sizeOfinArray1 || (alignment * 2) < sizeOfinArray2 || (alignment * 2) < sizeOfinArray3 || (alignment * 2) < sizeOfoutArray)
{
throw new ArgumentException("Invalid value of alignment");
}
this.inArray1 = new byte[alignment * 2];
this.inArray2 = new byte[alignment * 2];
this.inArray3 = new byte[alignment * 2];
this.outArray = new byte[alignment * 2];
this.inHandle1 = GCHandle.Alloc(this.inArray1, GCHandleType.Pinned);
this.inHandle2 = GCHandle.Alloc(this.inArray2, GCHandleType.Pinned);
this.inHandle3 = GCHandle.Alloc(this.inArray3, GCHandleType.Pinned);
this.outHandle = GCHandle.Alloc(this.outArray, GCHandleType.Pinned);
this.alignment = (ulong)alignment;
Unsafe.CopyBlockUnaligned(ref Unsafe.AsRef<byte>(inArray1Ptr), ref Unsafe.As<UInt32, byte>(ref inArray1[0]), (uint)sizeOfinArray1);
Unsafe.CopyBlockUnaligned(ref Unsafe.AsRef<byte>(inArray2Ptr), ref Unsafe.As<UInt32, byte>(ref inArray2[0]), (uint)sizeOfinArray2);
Unsafe.CopyBlockUnaligned(ref Unsafe.AsRef<byte>(inArray3Ptr), ref Unsafe.As<UInt32, byte>(ref inArray3[0]), (uint)sizeOfinArray3);
}
public void* inArray1Ptr => Align((byte*)(inHandle1.AddrOfPinnedObject().ToPointer()), alignment);
public void* inArray2Ptr => Align((byte*)(inHandle2.AddrOfPinnedObject().ToPointer()), alignment);
public void* inArray3Ptr => Align((byte*)(inHandle3.AddrOfPinnedObject().ToPointer()), alignment);
public void* outArrayPtr => Align((byte*)(outHandle.AddrOfPinnedObject().ToPointer()), alignment);
public void Dispose()
{
inHandle1.Free();
inHandle2.Free();
inHandle3.Free();
outHandle.Free();
}
private static unsafe void* Align(byte* buffer, ulong expectedAlignment)
{
return (void*)(((ulong)buffer + expectedAlignment - 1) & ~(expectedAlignment - 1));
}
}
private struct TestStruct
{
public Vector128<UInt32> _fld1;
public Vector128<UInt32> _fld2;
public Vector64<UInt32> _fld3;
public static TestStruct Create()
{
var testStruct = new TestStruct();
for (var i = 0; i < Op1ElementCount; i++) { _data1[i] = TestLibrary.Generator.GetUInt32(); }
Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector128<UInt32>, byte>(ref testStruct._fld1), ref Unsafe.As<UInt32, byte>(ref _data1[0]), (uint)Unsafe.SizeOf<Vector128<UInt32>>());
for (var i = 0; i < Op2ElementCount; i++) { _data2[i] = TestLibrary.Generator.GetUInt32(); }
Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector128<UInt32>, byte>(ref testStruct._fld2), ref Unsafe.As<UInt32, byte>(ref _data2[0]), (uint)Unsafe.SizeOf<Vector128<UInt32>>());
for (var i = 0; i < Op3ElementCount; i++) { _data3[i] = TestLibrary.Generator.GetUInt32(); }
Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector64<UInt32>, byte>(ref testStruct._fld3), ref Unsafe.As<UInt32, byte>(ref _data3[0]), (uint)Unsafe.SizeOf<Vector64<UInt32>>());
return testStruct;
}
public void RunStructFldScenario(SimpleTernaryOpTest__MultiplySubtractByScalar_Vector128_UInt32 testClass)
{
var result = AdvSimd.MultiplySubtractByScalar(_fld1, _fld2, _fld3);
Unsafe.Write(testClass._dataTable.outArrayPtr, result);
testClass.ValidateResult(_fld1, _fld2, _fld3, testClass._dataTable.outArrayPtr);
}
public void RunStructFldScenario_Load(SimpleTernaryOpTest__MultiplySubtractByScalar_Vector128_UInt32 testClass)
{
fixed (Vector128<UInt32>* pFld1 = &_fld1)
fixed (Vector128<UInt32>* pFld2 = &_fld2)
fixed (Vector64<UInt32>* pFld3 = &_fld3)
{
var result = AdvSimd.MultiplySubtractByScalar(
AdvSimd.LoadVector128((UInt32*)(pFld1)),
AdvSimd.LoadVector128((UInt32*)(pFld2)),
AdvSimd.LoadVector64((UInt32*)(pFld3))
);
Unsafe.Write(testClass._dataTable.outArrayPtr, result);
testClass.ValidateResult(_fld1, _fld2, _fld3, testClass._dataTable.outArrayPtr);
}
}
}
private static readonly int LargestVectorSize = 16;
private static readonly int Op1ElementCount = Unsafe.SizeOf<Vector128<UInt32>>() / sizeof(UInt32);
private static readonly int Op2ElementCount = Unsafe.SizeOf<Vector128<UInt32>>() / sizeof(UInt32);
private static readonly int Op3ElementCount = Unsafe.SizeOf<Vector64<UInt32>>() / sizeof(UInt32);
private static readonly int RetElementCount = Unsafe.SizeOf<Vector128<UInt32>>() / sizeof(UInt32);
private static UInt32[] _data1 = new UInt32[Op1ElementCount];
private static UInt32[] _data2 = new UInt32[Op2ElementCount];
private static UInt32[] _data3 = new UInt32[Op3ElementCount];
private static Vector128<UInt32> _clsVar1;
private static Vector128<UInt32> _clsVar2;
private static Vector64<UInt32> _clsVar3;
private Vector128<UInt32> _fld1;
private Vector128<UInt32> _fld2;
private Vector64<UInt32> _fld3;
private DataTable _dataTable;
static SimpleTernaryOpTest__MultiplySubtractByScalar_Vector128_UInt32()
{
for (var i = 0; i < Op1ElementCount; i++) { _data1[i] = TestLibrary.Generator.GetUInt32(); }
Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector128<UInt32>, byte>(ref _clsVar1), ref Unsafe.As<UInt32, byte>(ref _data1[0]), (uint)Unsafe.SizeOf<Vector128<UInt32>>());
for (var i = 0; i < Op2ElementCount; i++) { _data2[i] = TestLibrary.Generator.GetUInt32(); }
Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector128<UInt32>, byte>(ref _clsVar2), ref Unsafe.As<UInt32, byte>(ref _data2[0]), (uint)Unsafe.SizeOf<Vector128<UInt32>>());
for (var i = 0; i < Op3ElementCount; i++) { _data3[i] = TestLibrary.Generator.GetUInt32(); }
Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector64<UInt32>, byte>(ref _clsVar3), ref Unsafe.As<UInt32, byte>(ref _data3[0]), (uint)Unsafe.SizeOf<Vector64<UInt32>>());
}
public SimpleTernaryOpTest__MultiplySubtractByScalar_Vector128_UInt32()
{
Succeeded = true;
for (var i = 0; i < Op1ElementCount; i++) { _data1[i] = TestLibrary.Generator.GetUInt32(); }
Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector128<UInt32>, byte>(ref _fld1), ref Unsafe.As<UInt32, byte>(ref _data1[0]), (uint)Unsafe.SizeOf<Vector128<UInt32>>());
for (var i = 0; i < Op2ElementCount; i++) { _data2[i] = TestLibrary.Generator.GetUInt32(); }
Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector128<UInt32>, byte>(ref _fld2), ref Unsafe.As<UInt32, byte>(ref _data2[0]), (uint)Unsafe.SizeOf<Vector128<UInt32>>());
for (var i = 0; i < Op3ElementCount; i++) { _data3[i] = TestLibrary.Generator.GetUInt32(); }
Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector64<UInt32>, byte>(ref _fld3), ref Unsafe.As<UInt32, byte>(ref _data3[0]), (uint)Unsafe.SizeOf<Vector64<UInt32>>());
for (var i = 0; i < Op1ElementCount; i++) { _data1[i] = TestLibrary.Generator.GetUInt32(); }
for (var i = 0; i < Op2ElementCount; i++) { _data2[i] = TestLibrary.Generator.GetUInt32(); }
for (var i = 0; i < Op3ElementCount; i++) { _data3[i] = TestLibrary.Generator.GetUInt32(); }
_dataTable = new DataTable(_data1, _data2, _data3, new UInt32[RetElementCount], LargestVectorSize);
}
public bool IsSupported => AdvSimd.IsSupported;
public bool Succeeded { get; set; }
public void RunBasicScenario_UnsafeRead()
{
TestLibrary.TestFramework.BeginScenario(nameof(RunBasicScenario_UnsafeRead));
var result = AdvSimd.MultiplySubtractByScalar(
Unsafe.Read<Vector128<UInt32>>(_dataTable.inArray1Ptr),
Unsafe.Read<Vector128<UInt32>>(_dataTable.inArray2Ptr),
Unsafe.Read<Vector64<UInt32>>(_dataTable.inArray3Ptr)
);
Unsafe.Write(_dataTable.outArrayPtr, result);
ValidateResult(_dataTable.inArray1Ptr, _dataTable.inArray2Ptr, _dataTable.inArray3Ptr, _dataTable.outArrayPtr);
}
public void RunBasicScenario_Load()
{
TestLibrary.TestFramework.BeginScenario(nameof(RunBasicScenario_Load));
var result = AdvSimd.MultiplySubtractByScalar(
AdvSimd.LoadVector128((UInt32*)(_dataTable.inArray1Ptr)),
AdvSimd.LoadVector128((UInt32*)(_dataTable.inArray2Ptr)),
AdvSimd.LoadVector64((UInt32*)(_dataTable.inArray3Ptr))
);
Unsafe.Write(_dataTable.outArrayPtr, result);
ValidateResult(_dataTable.inArray1Ptr, _dataTable.inArray2Ptr, _dataTable.inArray3Ptr, _dataTable.outArrayPtr);
}
public void RunReflectionScenario_UnsafeRead()
{
TestLibrary.TestFramework.BeginScenario(nameof(RunReflectionScenario_UnsafeRead));
var result = typeof(AdvSimd).GetMethod(nameof(AdvSimd.MultiplySubtractByScalar), new Type[] { typeof(Vector128<UInt32>), typeof(Vector128<UInt32>), typeof(Vector64<UInt32>) })
.Invoke(null, new object[] {
Unsafe.Read<Vector128<UInt32>>(_dataTable.inArray1Ptr),
Unsafe.Read<Vector128<UInt32>>(_dataTable.inArray2Ptr),
Unsafe.Read<Vector64<UInt32>>(_dataTable.inArray3Ptr)
});
Unsafe.Write(_dataTable.outArrayPtr, (Vector128<UInt32>)(result));
ValidateResult(_dataTable.inArray1Ptr, _dataTable.inArray2Ptr, _dataTable.inArray3Ptr, _dataTable.outArrayPtr);
}
public void RunReflectionScenario_Load()
{
TestLibrary.TestFramework.BeginScenario(nameof(RunReflectionScenario_Load));
var result = typeof(AdvSimd).GetMethod(nameof(AdvSimd.MultiplySubtractByScalar), new Type[] { typeof(Vector128<UInt32>), typeof(Vector128<UInt32>), typeof(Vector64<UInt32>) })
.Invoke(null, new object[] {
AdvSimd.LoadVector128((UInt32*)(_dataTable.inArray1Ptr)),
AdvSimd.LoadVector128((UInt32*)(_dataTable.inArray2Ptr)),
AdvSimd.LoadVector64((UInt32*)(_dataTable.inArray3Ptr))
});
Unsafe.Write(_dataTable.outArrayPtr, (Vector128<UInt32>)(result));
ValidateResult(_dataTable.inArray1Ptr, _dataTable.inArray2Ptr, _dataTable.inArray3Ptr, _dataTable.outArrayPtr);
}
public void RunClsVarScenario()
{
TestLibrary.TestFramework.BeginScenario(nameof(RunClsVarScenario));
var result = AdvSimd.MultiplySubtractByScalar(
_clsVar1,
_clsVar2,
_clsVar3
);
Unsafe.Write(_dataTable.outArrayPtr, result);
ValidateResult(_clsVar1, _clsVar2, _clsVar3, _dataTable.outArrayPtr);
}
public void RunClsVarScenario_Load()
{
TestLibrary.TestFramework.BeginScenario(nameof(RunClsVarScenario_Load));
fixed (Vector128<UInt32>* pClsVar1 = &_clsVar1)
fixed (Vector128<UInt32>* pClsVar2 = &_clsVar2)
fixed (Vector64<UInt32>* pClsVar3 = &_clsVar3)
{
var result = AdvSimd.MultiplySubtractByScalar(
AdvSimd.LoadVector128((UInt32*)(pClsVar1)),
AdvSimd.LoadVector128((UInt32*)(pClsVar2)),
AdvSimd.LoadVector64((UInt32*)(pClsVar3))
);
Unsafe.Write(_dataTable.outArrayPtr, result);
ValidateResult(_clsVar1, _clsVar2, _clsVar3, _dataTable.outArrayPtr);
}
}
public void RunLclVarScenario_UnsafeRead()
{
TestLibrary.TestFramework.BeginScenario(nameof(RunLclVarScenario_UnsafeRead));
var op1 = Unsafe.Read<Vector128<UInt32>>(_dataTable.inArray1Ptr);
var op2 = Unsafe.Read<Vector128<UInt32>>(_dataTable.inArray2Ptr);
var op3 = Unsafe.Read<Vector64<UInt32>>(_dataTable.inArray3Ptr);
var result = AdvSimd.MultiplySubtractByScalar(op1, op2, op3);
Unsafe.Write(_dataTable.outArrayPtr, result);
ValidateResult(op1, op2, op3, _dataTable.outArrayPtr);
}
public void RunLclVarScenario_Load()
{
TestLibrary.TestFramework.BeginScenario(nameof(RunLclVarScenario_Load));
var op1 = AdvSimd.LoadVector128((UInt32*)(_dataTable.inArray1Ptr));
var op2 = AdvSimd.LoadVector128((UInt32*)(_dataTable.inArray2Ptr));
var op3 = AdvSimd.LoadVector64((UInt32*)(_dataTable.inArray3Ptr));
var result = AdvSimd.MultiplySubtractByScalar(op1, op2, op3);
Unsafe.Write(_dataTable.outArrayPtr, result);
ValidateResult(op1, op2, op3, _dataTable.outArrayPtr);
}
public void RunClassLclFldScenario()
{
TestLibrary.TestFramework.BeginScenario(nameof(RunClassLclFldScenario));
var test = new SimpleTernaryOpTest__MultiplySubtractByScalar_Vector128_UInt32();
var result = AdvSimd.MultiplySubtractByScalar(test._fld1, test._fld2, test._fld3);
Unsafe.Write(_dataTable.outArrayPtr, result);
ValidateResult(test._fld1, test._fld2, test._fld3, _dataTable.outArrayPtr);
}
public void RunClassLclFldScenario_Load()
{
TestLibrary.TestFramework.BeginScenario(nameof(RunClassLclFldScenario_Load));
var test = new SimpleTernaryOpTest__MultiplySubtractByScalar_Vector128_UInt32();
fixed (Vector128<UInt32>* pFld1 = &test._fld1)
fixed (Vector128<UInt32>* pFld2 = &test._fld2)
fixed (Vector64<UInt32>* pFld3 = &test._fld3)
{
var result = AdvSimd.MultiplySubtractByScalar(
AdvSimd.LoadVector128((UInt32*)(pFld1)),
AdvSimd.LoadVector128((UInt32*)(pFld2)),
AdvSimd.LoadVector64((UInt32*)(pFld3))
);
Unsafe.Write(_dataTable.outArrayPtr, result);
ValidateResult(test._fld1, test._fld2, test._fld3, _dataTable.outArrayPtr);
}
}
public void RunClassFldScenario()
{
TestLibrary.TestFramework.BeginScenario(nameof(RunClassFldScenario));
var result = AdvSimd.MultiplySubtractByScalar(_fld1, _fld2, _fld3);
Unsafe.Write(_dataTable.outArrayPtr, result);
ValidateResult(_fld1, _fld2, _fld3, _dataTable.outArrayPtr);
}
public void RunClassFldScenario_Load()
{
TestLibrary.TestFramework.BeginScenario(nameof(RunClassFldScenario_Load));
fixed (Vector128<UInt32>* pFld1 = &_fld1)
fixed (Vector128<UInt32>* pFld2 = &_fld2)
fixed (Vector64<UInt32>* pFld3 = &_fld3)
{
var result = AdvSimd.MultiplySubtractByScalar(
AdvSimd.LoadVector128((UInt32*)(pFld1)),
AdvSimd.LoadVector128((UInt32*)(pFld2)),
AdvSimd.LoadVector64((UInt32*)(pFld3))
);
Unsafe.Write(_dataTable.outArrayPtr, result);
ValidateResult(_fld1, _fld2, _fld3, _dataTable.outArrayPtr);
}
}
public void RunStructLclFldScenario()
{
TestLibrary.TestFramework.BeginScenario(nameof(RunStructLclFldScenario));
var test = TestStruct.Create();
var result = AdvSimd.MultiplySubtractByScalar(test._fld1, test._fld2, test._fld3);
Unsafe.Write(_dataTable.outArrayPtr, result);
ValidateResult(test._fld1, test._fld2, test._fld3, _dataTable.outArrayPtr);
}
public void RunStructLclFldScenario_Load()
{
TestLibrary.TestFramework.BeginScenario(nameof(RunStructLclFldScenario_Load));
var test = TestStruct.Create();
var result = AdvSimd.MultiplySubtractByScalar(
AdvSimd.LoadVector128((UInt32*)(&test._fld1)),
AdvSimd.LoadVector128((UInt32*)(&test._fld2)),
AdvSimd.LoadVector64((UInt32*)(&test._fld3))
);
Unsafe.Write(_dataTable.outArrayPtr, result);
ValidateResult(test._fld1, test._fld2, test._fld3, _dataTable.outArrayPtr);
}
public void RunStructFldScenario()
{
TestLibrary.TestFramework.BeginScenario(nameof(RunStructFldScenario));
var test = TestStruct.Create();
test.RunStructFldScenario(this);
}
public void RunStructFldScenario_Load()
{
TestLibrary.TestFramework.BeginScenario(nameof(RunStructFldScenario_Load));
var test = TestStruct.Create();
test.RunStructFldScenario_Load(this);
}
public void RunUnsupportedScenario()
{
TestLibrary.TestFramework.BeginScenario(nameof(RunUnsupportedScenario));
bool succeeded = false;
try
{
RunBasicScenario_UnsafeRead();
}
catch (PlatformNotSupportedException)
{
succeeded = true;
}
if (!succeeded)
{
Succeeded = false;
}
}
private void ValidateResult(Vector128<UInt32> op1, Vector128<UInt32> op2, Vector64<UInt32> op3, void* result, [CallerMemberName] string method = "")
{
UInt32[] inArray1 = new UInt32[Op1ElementCount];
UInt32[] inArray2 = new UInt32[Op2ElementCount];
UInt32[] inArray3 = new UInt32[Op3ElementCount];
UInt32[] outArray = new UInt32[RetElementCount];
Unsafe.WriteUnaligned(ref Unsafe.As<UInt32, byte>(ref inArray1[0]), op1);
Unsafe.WriteUnaligned(ref Unsafe.As<UInt32, byte>(ref inArray2[0]), op2);
Unsafe.WriteUnaligned(ref Unsafe.As<UInt32, byte>(ref inArray3[0]), op3);
Unsafe.CopyBlockUnaligned(ref Unsafe.As<UInt32, byte>(ref outArray[0]), ref Unsafe.AsRef<byte>(result), (uint)Unsafe.SizeOf<Vector128<UInt32>>());
ValidateResult(inArray1, inArray2, inArray3, outArray, method);
}
private void ValidateResult(void* op1, void* op2, void* op3, void* result, [CallerMemberName] string method = "")
{
UInt32[] inArray1 = new UInt32[Op1ElementCount];
UInt32[] inArray2 = new UInt32[Op2ElementCount];
UInt32[] inArray3 = new UInt32[Op3ElementCount];
UInt32[] outArray = new UInt32[RetElementCount];
Unsafe.CopyBlockUnaligned(ref Unsafe.As<UInt32, byte>(ref inArray1[0]), ref Unsafe.AsRef<byte>(op1), (uint)Unsafe.SizeOf<Vector128<UInt32>>());
Unsafe.CopyBlockUnaligned(ref Unsafe.As<UInt32, byte>(ref inArray2[0]), ref Unsafe.AsRef<byte>(op2), (uint)Unsafe.SizeOf<Vector128<UInt32>>());
Unsafe.CopyBlockUnaligned(ref Unsafe.As<UInt32, byte>(ref inArray3[0]), ref Unsafe.AsRef<byte>(op3), (uint)Unsafe.SizeOf<Vector64<UInt32>>());
Unsafe.CopyBlockUnaligned(ref Unsafe.As<UInt32, byte>(ref outArray[0]), ref Unsafe.AsRef<byte>(result), (uint)Unsafe.SizeOf<Vector128<UInt32>>());
ValidateResult(inArray1, inArray2, inArray3, outArray, method);
}
private void ValidateResult(UInt32[] firstOp, UInt32[] secondOp, UInt32[] thirdOp, UInt32[] result, [CallerMemberName] string method = "")
{
bool succeeded = true;
for (var i = 0; i < RetElementCount; i++)
{
if (Helpers.MultiplySubtract(firstOp[i], secondOp[i], thirdOp[0]) != result[i])
{
succeeded = false;
break;
}
}
if (!succeeded)
{
TestLibrary.TestFramework.LogInformation($"{nameof(AdvSimd)}.{nameof(AdvSimd.MultiplySubtractByScalar)}<UInt32>(Vector128<UInt32>, Vector128<UInt32>, Vector64<UInt32>): {method} failed:");
TestLibrary.TestFramework.LogInformation($" firstOp: ({string.Join(", ", firstOp)})");
TestLibrary.TestFramework.LogInformation($"secondOp: ({string.Join(", ", secondOp)})");
TestLibrary.TestFramework.LogInformation($" thirdOp: ({string.Join(", ", thirdOp)})");
TestLibrary.TestFramework.LogInformation($" result: ({string.Join(", ", result)})");
TestLibrary.TestFramework.LogInformation(string.Empty);
Succeeded = false;
}
}
}
}
| // Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
/******************************************************************************
* This file is auto-generated from a template file by the GenerateTests.csx *
* script in tests\src\JIT\HardwareIntrinsics.Arm\Shared. In order to make *
* changes, please update the corresponding template and run according to the *
* directions listed in the file. *
******************************************************************************/
using System;
using System.Runtime.CompilerServices;
using System.Runtime.InteropServices;
using System.Runtime.Intrinsics;
using System.Runtime.Intrinsics.Arm;
namespace JIT.HardwareIntrinsics.Arm
{
public static partial class Program
{
private static void MultiplySubtractByScalar_Vector128_UInt32()
{
var test = new SimpleTernaryOpTest__MultiplySubtractByScalar_Vector128_UInt32();
if (test.IsSupported)
{
// Validates basic functionality works, using Unsafe.Read
test.RunBasicScenario_UnsafeRead();
if (AdvSimd.IsSupported)
{
// Validates basic functionality works, using Load
test.RunBasicScenario_Load();
}
// Validates calling via reflection works, using Unsafe.Read
test.RunReflectionScenario_UnsafeRead();
if (AdvSimd.IsSupported)
{
// Validates calling via reflection works, using Load
test.RunReflectionScenario_Load();
}
// Validates passing a static member works
test.RunClsVarScenario();
if (AdvSimd.IsSupported)
{
// Validates passing a static member works, using pinning and Load
test.RunClsVarScenario_Load();
}
// Validates passing a local works, using Unsafe.Read
test.RunLclVarScenario_UnsafeRead();
if (AdvSimd.IsSupported)
{
// Validates passing a local works, using Load
test.RunLclVarScenario_Load();
}
// Validates passing the field of a local class works
test.RunClassLclFldScenario();
if (AdvSimd.IsSupported)
{
// Validates passing the field of a local class works, using pinning and Load
test.RunClassLclFldScenario_Load();
}
// Validates passing an instance member of a class works
test.RunClassFldScenario();
if (AdvSimd.IsSupported)
{
// Validates passing an instance member of a class works, using pinning and Load
test.RunClassFldScenario_Load();
}
// Validates passing the field of a local struct works
test.RunStructLclFldScenario();
if (AdvSimd.IsSupported)
{
// Validates passing the field of a local struct works, using pinning and Load
test.RunStructLclFldScenario_Load();
}
// Validates passing an instance member of a struct works
test.RunStructFldScenario();
if (AdvSimd.IsSupported)
{
// Validates passing an instance member of a struct works, using pinning and Load
test.RunStructFldScenario_Load();
}
}
else
{
// Validates we throw on unsupported hardware
test.RunUnsupportedScenario();
}
if (!test.Succeeded)
{
throw new Exception("One or more scenarios did not complete as expected.");
}
}
}
public sealed unsafe class SimpleTernaryOpTest__MultiplySubtractByScalar_Vector128_UInt32
{
private struct DataTable
{
private byte[] inArray1;
private byte[] inArray2;
private byte[] inArray3;
private byte[] outArray;
private GCHandle inHandle1;
private GCHandle inHandle2;
private GCHandle inHandle3;
private GCHandle outHandle;
private ulong alignment;
public DataTable(UInt32[] inArray1, UInt32[] inArray2, UInt32[] inArray3, UInt32[] outArray, int alignment)
{
int sizeOfinArray1 = inArray1.Length * Unsafe.SizeOf<UInt32>();
int sizeOfinArray2 = inArray2.Length * Unsafe.SizeOf<UInt32>();
int sizeOfinArray3 = inArray3.Length * Unsafe.SizeOf<UInt32>();
int sizeOfoutArray = outArray.Length * Unsafe.SizeOf<UInt32>();
if ((alignment != 16 && alignment != 8) || (alignment * 2) < sizeOfinArray1 || (alignment * 2) < sizeOfinArray2 || (alignment * 2) < sizeOfinArray3 || (alignment * 2) < sizeOfoutArray)
{
throw new ArgumentException("Invalid value of alignment");
}
this.inArray1 = new byte[alignment * 2];
this.inArray2 = new byte[alignment * 2];
this.inArray3 = new byte[alignment * 2];
this.outArray = new byte[alignment * 2];
this.inHandle1 = GCHandle.Alloc(this.inArray1, GCHandleType.Pinned);
this.inHandle2 = GCHandle.Alloc(this.inArray2, GCHandleType.Pinned);
this.inHandle3 = GCHandle.Alloc(this.inArray3, GCHandleType.Pinned);
this.outHandle = GCHandle.Alloc(this.outArray, GCHandleType.Pinned);
this.alignment = (ulong)alignment;
Unsafe.CopyBlockUnaligned(ref Unsafe.AsRef<byte>(inArray1Ptr), ref Unsafe.As<UInt32, byte>(ref inArray1[0]), (uint)sizeOfinArray1);
Unsafe.CopyBlockUnaligned(ref Unsafe.AsRef<byte>(inArray2Ptr), ref Unsafe.As<UInt32, byte>(ref inArray2[0]), (uint)sizeOfinArray2);
Unsafe.CopyBlockUnaligned(ref Unsafe.AsRef<byte>(inArray3Ptr), ref Unsafe.As<UInt32, byte>(ref inArray3[0]), (uint)sizeOfinArray3);
}
public void* inArray1Ptr => Align((byte*)(inHandle1.AddrOfPinnedObject().ToPointer()), alignment);
public void* inArray2Ptr => Align((byte*)(inHandle2.AddrOfPinnedObject().ToPointer()), alignment);
public void* inArray3Ptr => Align((byte*)(inHandle3.AddrOfPinnedObject().ToPointer()), alignment);
public void* outArrayPtr => Align((byte*)(outHandle.AddrOfPinnedObject().ToPointer()), alignment);
public void Dispose()
{
inHandle1.Free();
inHandle2.Free();
inHandle3.Free();
outHandle.Free();
}
private static unsafe void* Align(byte* buffer, ulong expectedAlignment)
{
return (void*)(((ulong)buffer + expectedAlignment - 1) & ~(expectedAlignment - 1));
}
}
private struct TestStruct
{
public Vector128<UInt32> _fld1;
public Vector128<UInt32> _fld2;
public Vector64<UInt32> _fld3;
public static TestStruct Create()
{
var testStruct = new TestStruct();
for (var i = 0; i < Op1ElementCount; i++) { _data1[i] = TestLibrary.Generator.GetUInt32(); }
Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector128<UInt32>, byte>(ref testStruct._fld1), ref Unsafe.As<UInt32, byte>(ref _data1[0]), (uint)Unsafe.SizeOf<Vector128<UInt32>>());
for (var i = 0; i < Op2ElementCount; i++) { _data2[i] = TestLibrary.Generator.GetUInt32(); }
Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector128<UInt32>, byte>(ref testStruct._fld2), ref Unsafe.As<UInt32, byte>(ref _data2[0]), (uint)Unsafe.SizeOf<Vector128<UInt32>>());
for (var i = 0; i < Op3ElementCount; i++) { _data3[i] = TestLibrary.Generator.GetUInt32(); }
Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector64<UInt32>, byte>(ref testStruct._fld3), ref Unsafe.As<UInt32, byte>(ref _data3[0]), (uint)Unsafe.SizeOf<Vector64<UInt32>>());
return testStruct;
}
public void RunStructFldScenario(SimpleTernaryOpTest__MultiplySubtractByScalar_Vector128_UInt32 testClass)
{
var result = AdvSimd.MultiplySubtractByScalar(_fld1, _fld2, _fld3);
Unsafe.Write(testClass._dataTable.outArrayPtr, result);
testClass.ValidateResult(_fld1, _fld2, _fld3, testClass._dataTable.outArrayPtr);
}
public void RunStructFldScenario_Load(SimpleTernaryOpTest__MultiplySubtractByScalar_Vector128_UInt32 testClass)
{
fixed (Vector128<UInt32>* pFld1 = &_fld1)
fixed (Vector128<UInt32>* pFld2 = &_fld2)
fixed (Vector64<UInt32>* pFld3 = &_fld3)
{
var result = AdvSimd.MultiplySubtractByScalar(
AdvSimd.LoadVector128((UInt32*)(pFld1)),
AdvSimd.LoadVector128((UInt32*)(pFld2)),
AdvSimd.LoadVector64((UInt32*)(pFld3))
);
Unsafe.Write(testClass._dataTable.outArrayPtr, result);
testClass.ValidateResult(_fld1, _fld2, _fld3, testClass._dataTable.outArrayPtr);
}
}
}
private static readonly int LargestVectorSize = 16;
private static readonly int Op1ElementCount = Unsafe.SizeOf<Vector128<UInt32>>() / sizeof(UInt32);
private static readonly int Op2ElementCount = Unsafe.SizeOf<Vector128<UInt32>>() / sizeof(UInt32);
private static readonly int Op3ElementCount = Unsafe.SizeOf<Vector64<UInt32>>() / sizeof(UInt32);
private static readonly int RetElementCount = Unsafe.SizeOf<Vector128<UInt32>>() / sizeof(UInt32);
private static UInt32[] _data1 = new UInt32[Op1ElementCount];
private static UInt32[] _data2 = new UInt32[Op2ElementCount];
private static UInt32[] _data3 = new UInt32[Op3ElementCount];
private static Vector128<UInt32> _clsVar1;
private static Vector128<UInt32> _clsVar2;
private static Vector64<UInt32> _clsVar3;
private Vector128<UInt32> _fld1;
private Vector128<UInt32> _fld2;
private Vector64<UInt32> _fld3;
private DataTable _dataTable;
static SimpleTernaryOpTest__MultiplySubtractByScalar_Vector128_UInt32()
{
for (var i = 0; i < Op1ElementCount; i++) { _data1[i] = TestLibrary.Generator.GetUInt32(); }
Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector128<UInt32>, byte>(ref _clsVar1), ref Unsafe.As<UInt32, byte>(ref _data1[0]), (uint)Unsafe.SizeOf<Vector128<UInt32>>());
for (var i = 0; i < Op2ElementCount; i++) { _data2[i] = TestLibrary.Generator.GetUInt32(); }
Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector128<UInt32>, byte>(ref _clsVar2), ref Unsafe.As<UInt32, byte>(ref _data2[0]), (uint)Unsafe.SizeOf<Vector128<UInt32>>());
for (var i = 0; i < Op3ElementCount; i++) { _data3[i] = TestLibrary.Generator.GetUInt32(); }
Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector64<UInt32>, byte>(ref _clsVar3), ref Unsafe.As<UInt32, byte>(ref _data3[0]), (uint)Unsafe.SizeOf<Vector64<UInt32>>());
}
public SimpleTernaryOpTest__MultiplySubtractByScalar_Vector128_UInt32()
{
Succeeded = true;
for (var i = 0; i < Op1ElementCount; i++) { _data1[i] = TestLibrary.Generator.GetUInt32(); }
Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector128<UInt32>, byte>(ref _fld1), ref Unsafe.As<UInt32, byte>(ref _data1[0]), (uint)Unsafe.SizeOf<Vector128<UInt32>>());
for (var i = 0; i < Op2ElementCount; i++) { _data2[i] = TestLibrary.Generator.GetUInt32(); }
Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector128<UInt32>, byte>(ref _fld2), ref Unsafe.As<UInt32, byte>(ref _data2[0]), (uint)Unsafe.SizeOf<Vector128<UInt32>>());
for (var i = 0; i < Op3ElementCount; i++) { _data3[i] = TestLibrary.Generator.GetUInt32(); }
Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector64<UInt32>, byte>(ref _fld3), ref Unsafe.As<UInt32, byte>(ref _data3[0]), (uint)Unsafe.SizeOf<Vector64<UInt32>>());
for (var i = 0; i < Op1ElementCount; i++) { _data1[i] = TestLibrary.Generator.GetUInt32(); }
for (var i = 0; i < Op2ElementCount; i++) { _data2[i] = TestLibrary.Generator.GetUInt32(); }
for (var i = 0; i < Op3ElementCount; i++) { _data3[i] = TestLibrary.Generator.GetUInt32(); }
_dataTable = new DataTable(_data1, _data2, _data3, new UInt32[RetElementCount], LargestVectorSize);
}
public bool IsSupported => AdvSimd.IsSupported;
public bool Succeeded { get; set; }
public void RunBasicScenario_UnsafeRead()
{
TestLibrary.TestFramework.BeginScenario(nameof(RunBasicScenario_UnsafeRead));
var result = AdvSimd.MultiplySubtractByScalar(
Unsafe.Read<Vector128<UInt32>>(_dataTable.inArray1Ptr),
Unsafe.Read<Vector128<UInt32>>(_dataTable.inArray2Ptr),
Unsafe.Read<Vector64<UInt32>>(_dataTable.inArray3Ptr)
);
Unsafe.Write(_dataTable.outArrayPtr, result);
ValidateResult(_dataTable.inArray1Ptr, _dataTable.inArray2Ptr, _dataTable.inArray3Ptr, _dataTable.outArrayPtr);
}
public void RunBasicScenario_Load()
{
TestLibrary.TestFramework.BeginScenario(nameof(RunBasicScenario_Load));
var result = AdvSimd.MultiplySubtractByScalar(
AdvSimd.LoadVector128((UInt32*)(_dataTable.inArray1Ptr)),
AdvSimd.LoadVector128((UInt32*)(_dataTable.inArray2Ptr)),
AdvSimd.LoadVector64((UInt32*)(_dataTable.inArray3Ptr))
);
Unsafe.Write(_dataTable.outArrayPtr, result);
ValidateResult(_dataTable.inArray1Ptr, _dataTable.inArray2Ptr, _dataTable.inArray3Ptr, _dataTable.outArrayPtr);
}
public void RunReflectionScenario_UnsafeRead()
{
TestLibrary.TestFramework.BeginScenario(nameof(RunReflectionScenario_UnsafeRead));
var result = typeof(AdvSimd).GetMethod(nameof(AdvSimd.MultiplySubtractByScalar), new Type[] { typeof(Vector128<UInt32>), typeof(Vector128<UInt32>), typeof(Vector64<UInt32>) })
.Invoke(null, new object[] {
Unsafe.Read<Vector128<UInt32>>(_dataTable.inArray1Ptr),
Unsafe.Read<Vector128<UInt32>>(_dataTable.inArray2Ptr),
Unsafe.Read<Vector64<UInt32>>(_dataTable.inArray3Ptr)
});
Unsafe.Write(_dataTable.outArrayPtr, (Vector128<UInt32>)(result));
ValidateResult(_dataTable.inArray1Ptr, _dataTable.inArray2Ptr, _dataTable.inArray3Ptr, _dataTable.outArrayPtr);
}
public void RunReflectionScenario_Load()
{
TestLibrary.TestFramework.BeginScenario(nameof(RunReflectionScenario_Load));
var result = typeof(AdvSimd).GetMethod(nameof(AdvSimd.MultiplySubtractByScalar), new Type[] { typeof(Vector128<UInt32>), typeof(Vector128<UInt32>), typeof(Vector64<UInt32>) })
.Invoke(null, new object[] {
AdvSimd.LoadVector128((UInt32*)(_dataTable.inArray1Ptr)),
AdvSimd.LoadVector128((UInt32*)(_dataTable.inArray2Ptr)),
AdvSimd.LoadVector64((UInt32*)(_dataTable.inArray3Ptr))
});
Unsafe.Write(_dataTable.outArrayPtr, (Vector128<UInt32>)(result));
ValidateResult(_dataTable.inArray1Ptr, _dataTable.inArray2Ptr, _dataTable.inArray3Ptr, _dataTable.outArrayPtr);
}
public void RunClsVarScenario()
{
TestLibrary.TestFramework.BeginScenario(nameof(RunClsVarScenario));
var result = AdvSimd.MultiplySubtractByScalar(
_clsVar1,
_clsVar2,
_clsVar3
);
Unsafe.Write(_dataTable.outArrayPtr, result);
ValidateResult(_clsVar1, _clsVar2, _clsVar3, _dataTable.outArrayPtr);
}
public void RunClsVarScenario_Load()
{
TestLibrary.TestFramework.BeginScenario(nameof(RunClsVarScenario_Load));
fixed (Vector128<UInt32>* pClsVar1 = &_clsVar1)
fixed (Vector128<UInt32>* pClsVar2 = &_clsVar2)
fixed (Vector64<UInt32>* pClsVar3 = &_clsVar3)
{
var result = AdvSimd.MultiplySubtractByScalar(
AdvSimd.LoadVector128((UInt32*)(pClsVar1)),
AdvSimd.LoadVector128((UInt32*)(pClsVar2)),
AdvSimd.LoadVector64((UInt32*)(pClsVar3))
);
Unsafe.Write(_dataTable.outArrayPtr, result);
ValidateResult(_clsVar1, _clsVar2, _clsVar3, _dataTable.outArrayPtr);
}
}
public void RunLclVarScenario_UnsafeRead()
{
TestLibrary.TestFramework.BeginScenario(nameof(RunLclVarScenario_UnsafeRead));
var op1 = Unsafe.Read<Vector128<UInt32>>(_dataTable.inArray1Ptr);
var op2 = Unsafe.Read<Vector128<UInt32>>(_dataTable.inArray2Ptr);
var op3 = Unsafe.Read<Vector64<UInt32>>(_dataTable.inArray3Ptr);
var result = AdvSimd.MultiplySubtractByScalar(op1, op2, op3);
Unsafe.Write(_dataTable.outArrayPtr, result);
ValidateResult(op1, op2, op3, _dataTable.outArrayPtr);
}
public void RunLclVarScenario_Load()
{
TestLibrary.TestFramework.BeginScenario(nameof(RunLclVarScenario_Load));
var op1 = AdvSimd.LoadVector128((UInt32*)(_dataTable.inArray1Ptr));
var op2 = AdvSimd.LoadVector128((UInt32*)(_dataTable.inArray2Ptr));
var op3 = AdvSimd.LoadVector64((UInt32*)(_dataTable.inArray3Ptr));
var result = AdvSimd.MultiplySubtractByScalar(op1, op2, op3);
Unsafe.Write(_dataTable.outArrayPtr, result);
ValidateResult(op1, op2, op3, _dataTable.outArrayPtr);
}
public void RunClassLclFldScenario()
{
TestLibrary.TestFramework.BeginScenario(nameof(RunClassLclFldScenario));
var test = new SimpleTernaryOpTest__MultiplySubtractByScalar_Vector128_UInt32();
var result = AdvSimd.MultiplySubtractByScalar(test._fld1, test._fld2, test._fld3);
Unsafe.Write(_dataTable.outArrayPtr, result);
ValidateResult(test._fld1, test._fld2, test._fld3, _dataTable.outArrayPtr);
}
public void RunClassLclFldScenario_Load()
{
TestLibrary.TestFramework.BeginScenario(nameof(RunClassLclFldScenario_Load));
var test = new SimpleTernaryOpTest__MultiplySubtractByScalar_Vector128_UInt32();
fixed (Vector128<UInt32>* pFld1 = &test._fld1)
fixed (Vector128<UInt32>* pFld2 = &test._fld2)
fixed (Vector64<UInt32>* pFld3 = &test._fld3)
{
var result = AdvSimd.MultiplySubtractByScalar(
AdvSimd.LoadVector128((UInt32*)(pFld1)),
AdvSimd.LoadVector128((UInt32*)(pFld2)),
AdvSimd.LoadVector64((UInt32*)(pFld3))
);
Unsafe.Write(_dataTable.outArrayPtr, result);
ValidateResult(test._fld1, test._fld2, test._fld3, _dataTable.outArrayPtr);
}
}
public void RunClassFldScenario()
{
TestLibrary.TestFramework.BeginScenario(nameof(RunClassFldScenario));
var result = AdvSimd.MultiplySubtractByScalar(_fld1, _fld2, _fld3);
Unsafe.Write(_dataTable.outArrayPtr, result);
ValidateResult(_fld1, _fld2, _fld3, _dataTable.outArrayPtr);
}
public void RunClassFldScenario_Load()
{
TestLibrary.TestFramework.BeginScenario(nameof(RunClassFldScenario_Load));
fixed (Vector128<UInt32>* pFld1 = &_fld1)
fixed (Vector128<UInt32>* pFld2 = &_fld2)
fixed (Vector64<UInt32>* pFld3 = &_fld3)
{
var result = AdvSimd.MultiplySubtractByScalar(
AdvSimd.LoadVector128((UInt32*)(pFld1)),
AdvSimd.LoadVector128((UInt32*)(pFld2)),
AdvSimd.LoadVector64((UInt32*)(pFld3))
);
Unsafe.Write(_dataTable.outArrayPtr, result);
ValidateResult(_fld1, _fld2, _fld3, _dataTable.outArrayPtr);
}
}
public void RunStructLclFldScenario()
{
TestLibrary.TestFramework.BeginScenario(nameof(RunStructLclFldScenario));
var test = TestStruct.Create();
var result = AdvSimd.MultiplySubtractByScalar(test._fld1, test._fld2, test._fld3);
Unsafe.Write(_dataTable.outArrayPtr, result);
ValidateResult(test._fld1, test._fld2, test._fld3, _dataTable.outArrayPtr);
}
public void RunStructLclFldScenario_Load()
{
TestLibrary.TestFramework.BeginScenario(nameof(RunStructLclFldScenario_Load));
var test = TestStruct.Create();
var result = AdvSimd.MultiplySubtractByScalar(
AdvSimd.LoadVector128((UInt32*)(&test._fld1)),
AdvSimd.LoadVector128((UInt32*)(&test._fld2)),
AdvSimd.LoadVector64((UInt32*)(&test._fld3))
);
Unsafe.Write(_dataTable.outArrayPtr, result);
ValidateResult(test._fld1, test._fld2, test._fld3, _dataTable.outArrayPtr);
}
public void RunStructFldScenario()
{
TestLibrary.TestFramework.BeginScenario(nameof(RunStructFldScenario));
var test = TestStruct.Create();
test.RunStructFldScenario(this);
}
public void RunStructFldScenario_Load()
{
TestLibrary.TestFramework.BeginScenario(nameof(RunStructFldScenario_Load));
var test = TestStruct.Create();
test.RunStructFldScenario_Load(this);
}
public void RunUnsupportedScenario()
{
TestLibrary.TestFramework.BeginScenario(nameof(RunUnsupportedScenario));
bool succeeded = false;
try
{
RunBasicScenario_UnsafeRead();
}
catch (PlatformNotSupportedException)
{
succeeded = true;
}
if (!succeeded)
{
Succeeded = false;
}
}
private void ValidateResult(Vector128<UInt32> op1, Vector128<UInt32> op2, Vector64<UInt32> op3, void* result, [CallerMemberName] string method = "")
{
UInt32[] inArray1 = new UInt32[Op1ElementCount];
UInt32[] inArray2 = new UInt32[Op2ElementCount];
UInt32[] inArray3 = new UInt32[Op3ElementCount];
UInt32[] outArray = new UInt32[RetElementCount];
Unsafe.WriteUnaligned(ref Unsafe.As<UInt32, byte>(ref inArray1[0]), op1);
Unsafe.WriteUnaligned(ref Unsafe.As<UInt32, byte>(ref inArray2[0]), op2);
Unsafe.WriteUnaligned(ref Unsafe.As<UInt32, byte>(ref inArray3[0]), op3);
Unsafe.CopyBlockUnaligned(ref Unsafe.As<UInt32, byte>(ref outArray[0]), ref Unsafe.AsRef<byte>(result), (uint)Unsafe.SizeOf<Vector128<UInt32>>());
ValidateResult(inArray1, inArray2, inArray3, outArray, method);
}
private void ValidateResult(void* op1, void* op2, void* op3, void* result, [CallerMemberName] string method = "")
{
UInt32[] inArray1 = new UInt32[Op1ElementCount];
UInt32[] inArray2 = new UInt32[Op2ElementCount];
UInt32[] inArray3 = new UInt32[Op3ElementCount];
UInt32[] outArray = new UInt32[RetElementCount];
Unsafe.CopyBlockUnaligned(ref Unsafe.As<UInt32, byte>(ref inArray1[0]), ref Unsafe.AsRef<byte>(op1), (uint)Unsafe.SizeOf<Vector128<UInt32>>());
Unsafe.CopyBlockUnaligned(ref Unsafe.As<UInt32, byte>(ref inArray2[0]), ref Unsafe.AsRef<byte>(op2), (uint)Unsafe.SizeOf<Vector128<UInt32>>());
Unsafe.CopyBlockUnaligned(ref Unsafe.As<UInt32, byte>(ref inArray3[0]), ref Unsafe.AsRef<byte>(op3), (uint)Unsafe.SizeOf<Vector64<UInt32>>());
Unsafe.CopyBlockUnaligned(ref Unsafe.As<UInt32, byte>(ref outArray[0]), ref Unsafe.AsRef<byte>(result), (uint)Unsafe.SizeOf<Vector128<UInt32>>());
ValidateResult(inArray1, inArray2, inArray3, outArray, method);
}
private void ValidateResult(UInt32[] firstOp, UInt32[] secondOp, UInt32[] thirdOp, UInt32[] result, [CallerMemberName] string method = "")
{
bool succeeded = true;
for (var i = 0; i < RetElementCount; i++)
{
if (Helpers.MultiplySubtract(firstOp[i], secondOp[i], thirdOp[0]) != result[i])
{
succeeded = false;
break;
}
}
if (!succeeded)
{
TestLibrary.TestFramework.LogInformation($"{nameof(AdvSimd)}.{nameof(AdvSimd.MultiplySubtractByScalar)}<UInt32>(Vector128<UInt32>, Vector128<UInt32>, Vector64<UInt32>): {method} failed:");
TestLibrary.TestFramework.LogInformation($" firstOp: ({string.Join(", ", firstOp)})");
TestLibrary.TestFramework.LogInformation($"secondOp: ({string.Join(", ", secondOp)})");
TestLibrary.TestFramework.LogInformation($" thirdOp: ({string.Join(", ", thirdOp)})");
TestLibrary.TestFramework.LogInformation($" result: ({string.Join(", ", result)})");
TestLibrary.TestFramework.LogInformation(string.Empty);
Succeeded = false;
}
}
}
}
| -1 |
dotnet/runtime | 66,250 | use the name of the header when throwing exception for invalid chars | Fixes #65136 | pedrobsaila | 2022-03-05T20:18:11Z | 2022-03-08T16:15:18Z | 8fe2d2b0bbb7bec841c97372528531c071b29c16 | fa5dcda23c681031b5d2bd68482702baef80b836 | use the name of the header when throwing exception for invalid chars. Fixes #65136 | ./src/tests/JIT/HardwareIntrinsics/X86/AvxVnni_Vector128/MultiplyWideningAndAddSaturate.Int16.cs | // Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
using System;
using System.Runtime.CompilerServices;
using System.Runtime.InteropServices;
using System.Runtime.Intrinsics;
using System.Runtime.Intrinsics.X86;
using System.Text.RegularExpressions;
namespace JIT.HardwareIntrinsics.X86
{
public static partial class Program
{
private static void MultiplyWideningAndAddSaturateInt16()
{
var test = new SimpleTernaryOpTest__MultiplyWideningAndAddSaturateInt16();
if (test.IsSupported)
{
// Validates basic functionality works, using Unsafe.Read
test.RunBasicScenario_UnsafeRead();
if (Avx.IsSupported)
{
// Validates basic functionality works, using Load
test.RunBasicScenario_Load();
// Validates basic functionality works, using LoadAligned
test.RunBasicScenario_LoadAligned();
}
else
{
Console.WriteLine("Avx Is Not Supported");
}
// Validates calling via reflection works, using Unsafe.Read
test.RunReflectionScenario_UnsafeRead(); //TODO: this one does not work. Fix it.
if (Avx.IsSupported)
{
// Validates calling via reflection works, using Load
test.RunReflectionScenario_Load();
// Validates calling via reflection works, using LoadAligned
test.RunReflectionScenario_LoadAligned();
}
// Validates passing a static member works
test.RunClsVarScenario();
// Validates passing a local works, using Unsafe.Read
test.RunLclVarScenario_UnsafeRead();
if (Avx.IsSupported)
{
// Validates passing a local works, using Load
test.RunLclVarScenario_Load();
// Validates passing a local works, using LoadAligned
test.RunLclVarScenario_LoadAligned();
}
// Validates passing the field of a local class works
test.RunClassLclFldScenario();
// Validates passing an instance member of a class works
test.RunClassFldScenario();
// Validates passing the field of a local struct works
test.RunStructLclFldScenario();
// Validates passing an instance member of a struct works
test.RunStructFldScenario();
}
else
{
Console.WriteLine("Test Is Not Supported");
// Validates we throw on unsupported hardware
test.RunUnsupportedScenario();
}
if (!test.Succeeded)
{
throw new Exception("One or more scenarios did not complete as expected.");
}
}
}
public sealed unsafe class SimpleTernaryOpTest__MultiplyWideningAndAddSaturateInt16
{
private struct DataTable
{
private byte[] inArray0;
private byte[] inArray1;
private byte[] inArray2;
private byte[] outArray;
private GCHandle inHandle0;
private GCHandle inHandle1;
private GCHandle inHandle2;
private GCHandle outHandle;
private ulong alignment;
public DataTable(Int32[] inArray0, Int16[] inArray1, Int16[] inArray2, Int32[] outArray, int alignment)
{
int sizeOfinArray0 = inArray0.Length * Unsafe.SizeOf<Int32>();
int sizeOfinArray1 = inArray1.Length * Unsafe.SizeOf<Int16>();
int sizeOfinArray2 = inArray2.Length * Unsafe.SizeOf<Int16>();
int sizeOfoutArray = outArray.Length * Unsafe.SizeOf<Int32>();
if((alignment != 32 && alignment != 16) || (alignment *2) < sizeOfinArray0 || (alignment * 2) < sizeOfinArray1 || (alignment * 2) < sizeOfinArray2 || (alignment * 2) < sizeOfoutArray)
{
throw new ArgumentException("Invalid value of alighment");
}
this.inArray0 = new byte[alignment * 2];
this.inArray1 = new byte[alignment * 2];
this.inArray2 = new byte[alignment * 2];
this.outArray = new byte[alignment * 2];
this.inHandle0 = GCHandle.Alloc(this.inArray0, GCHandleType.Pinned);
this.inHandle1 = GCHandle.Alloc(this.inArray1, GCHandleType.Pinned);
this.inHandle2 = GCHandle.Alloc(this.inArray2, GCHandleType.Pinned);
this.outHandle = GCHandle.Alloc(this.outArray, GCHandleType.Pinned);
this.alignment = (ulong)alignment;
Unsafe.CopyBlockUnaligned(ref Unsafe.AsRef<byte>(inArray0Ptr), ref Unsafe.As<Int32, byte>(ref inArray0[0]), (uint)sizeOfinArray0);
Unsafe.CopyBlockUnaligned(ref Unsafe.AsRef<byte>(inArray1Ptr), ref Unsafe.As<Int16, byte>(ref inArray1[0]), (uint)sizeOfinArray1);
Unsafe.CopyBlockUnaligned(ref Unsafe.AsRef<byte>(inArray2Ptr), ref Unsafe.As<Int16, byte>(ref inArray2[0]), (uint)sizeOfinArray2);
}
public void* inArray0Ptr => Align((byte*)(inHandle0.AddrOfPinnedObject().ToPointer()), alignment);
public void* inArray1Ptr => Align((byte*)(inHandle1.AddrOfPinnedObject().ToPointer()), alignment);
public void* inArray2Ptr => Align((byte*)(inHandle2.AddrOfPinnedObject().ToPointer()), alignment);
public void* outArrayPtr => Align((byte*)(outHandle.AddrOfPinnedObject().ToPointer()), alignment);
public void Dispose()
{
inHandle0.Free();
inHandle1.Free();
inHandle2.Free();
outHandle.Free();
}
private static unsafe void* Align(byte* buffer, ulong expectedAlighment)
{
return (void*)(((ulong)buffer + expectedAlighment -1) & ~(expectedAlighment - 1));
}
}
private struct TestStruct
{
public Vector128<Int32> _fld0;
public Vector128<Int16> _fld1;
public Vector128<Int16> _fld2;
public static TestStruct Create()
{
var testStruct = new TestStruct();
for (var i = 0; i < Op0ElementCount; i++) { _data0[i] = TestLibrary.Generator.GetInt16(); }
Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector128<Int32>, byte>(ref testStruct._fld0), ref Unsafe.As<Int32, byte>(ref _data0[0]), (uint)Unsafe.SizeOf<Vector128<Int32>>());
for (var i = 0; i < Op1ElementCount; i++) { _data1[i] = TestLibrary.Generator.GetByte(); }
Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector128<Int16>, byte>(ref testStruct._fld1), ref Unsafe.As<Int16, byte>(ref _data1[0]), (uint)Unsafe.SizeOf<Vector128<Int16>>());
for (var i = 0; i < Op2ElementCount; i++) { _data2[i] = (sbyte)TestLibrary.Generator.GetInt16(); }
Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector128<Int16>, byte>(ref testStruct._fld2), ref Unsafe.As<Int16, byte>(ref _data2[0]), (uint)Unsafe.SizeOf<Vector128<Int16>>());
return testStruct;
}
public void RunStructFldScenario(SimpleTernaryOpTest__MultiplyWideningAndAddSaturateInt16 testClass)
{
var result = AvxVnni.MultiplyWideningAndAddSaturate(_fld0, _fld1, _fld2);
Unsafe.Write(testClass._dataTable.outArrayPtr, result);
testClass.ValidateResult(_fld0, _fld1, _fld2, testClass._dataTable.outArrayPtr);
}
}
private static readonly int LargestVectorSize = 32;
private static readonly int Op0ElementCount = Unsafe.SizeOf<Vector128<Int32>>() / sizeof(Int32);
private static readonly int Op1ElementCount = Unsafe.SizeOf<Vector128<Int16>>() / sizeof(Int16);
private static readonly int Op2ElementCount = Unsafe.SizeOf<Vector128<Int16>>() / sizeof(Int16);
private static readonly int RetElementCount = Unsafe.SizeOf<Vector128<Int32>>() / sizeof(Int32);
private static Int32[] _data0 = new Int32[Op0ElementCount];
private static Int16[] _data1 = new Int16[Op1ElementCount];
private static Int16[] _data2 = new Int16[Op2ElementCount];
private static Vector128<Int32> _clsVar0;
private static Vector128<Int16> _clsVar1;
private static Vector128<Int16> _clsVar2;
private Vector128<Int32> _fld0;
private Vector128<Int16> _fld1;
private Vector128<Int16> _fld2;
private DataTable _dataTable;
static SimpleTernaryOpTest__MultiplyWideningAndAddSaturateInt16()
{
for (var i = 0; i < Op0ElementCount; i++) { _data0[i] = TestLibrary.Generator.GetInt32(); }
Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector128<Int32>, byte>(ref _clsVar0), ref Unsafe.As<Int32, byte>(ref _data0[0]), (uint)Unsafe.SizeOf<Vector128<Int32>>());
for (var i = 0; i < Op1ElementCount; i++) { _data1[i] = TestLibrary.Generator.GetInt16(); }
Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector128<Int16>, byte>(ref _clsVar1), ref Unsafe.As<Int16, byte>(ref _data1[0]), (uint)Unsafe.SizeOf<Vector128<Int16>>());
for (var i = 0; i < Op2ElementCount; i++) { _data2[i] = (sbyte)TestLibrary.Generator.GetInt16(); }
Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector128<Int16>, byte>(ref _clsVar2), ref Unsafe.As<Int16, byte>(ref _data2[0]), (uint)Unsafe.SizeOf<Vector128<Int16>>());
}
public SimpleTernaryOpTest__MultiplyWideningAndAddSaturateInt16()
{
Succeeded = true;
for (var i = 0; i < Op0ElementCount; i++) { _data0[i] = TestLibrary.Generator.GetInt32(); }
Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector128<Int32>, byte>(ref _fld0), ref Unsafe.As<Int32, byte>(ref _data0[0]), (uint)Unsafe.SizeOf<Vector128<Int32>>());
for (var i = 0; i < Op1ElementCount; i++) { _data1[i] = TestLibrary.Generator.GetInt16(); }
Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector128<Int16>, byte>(ref _fld1), ref Unsafe.As<Int16, byte>(ref _data1[0]), (uint)Unsafe.SizeOf<Vector128<Int16>>());
for (var i = 0; i < Op2ElementCount; i++) { _data2[i] = (sbyte)TestLibrary.Generator.GetInt16(); }
Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector128<Int16>, byte>(ref _fld2), ref Unsafe.As<Int16, byte>(ref _data2[0]), (uint)Unsafe.SizeOf<Vector128<Int16>>());
for (var i = 0; i < Op0ElementCount; i++) { _data0[i] = TestLibrary.Generator.GetInt32(); }
for (var i = 0; i < Op1ElementCount; i++) { _data1[i] = TestLibrary.Generator.GetInt16(); }
for (var i = 0; i < Op2ElementCount; i++) { _data2[i] = TestLibrary.Generator.GetInt16(); }
_dataTable = new DataTable(_data0, _data1, _data2, new Int32[RetElementCount], LargestVectorSize);
}
public bool IsSupported => AvxVnni.IsSupported;
public bool Succeeded { get; set; }
public void RunBasicScenario_UnsafeRead()
{
TestLibrary.TestFramework.BeginScenario(nameof(RunBasicScenario_UnsafeRead));
var result = AvxVnni.MultiplyWideningAndAddSaturate(
Unsafe.Read<Vector128<Int32>>(_dataTable.inArray0Ptr),
Unsafe.Read<Vector128<Int16>>(_dataTable.inArray1Ptr),
Unsafe.Read<Vector128<Int16>>(_dataTable.inArray2Ptr)
);
Unsafe.Write(_dataTable.outArrayPtr, result);
ValidateResult(_dataTable.inArray0Ptr, _dataTable.inArray1Ptr, _dataTable.inArray2Ptr, _dataTable.outArrayPtr);
}
public void RunBasicScenario_Load()
{
TestLibrary.TestFramework.BeginScenario(nameof(RunBasicScenario_Load));
var result = AvxVnni.MultiplyWideningAndAddSaturate(
Avx.LoadVector128((Int32*)(_dataTable.inArray0Ptr)),
Avx.LoadVector128((Int16*)(_dataTable.inArray1Ptr)),
Avx.LoadVector128((Int16*)(_dataTable.inArray2Ptr))
);
Unsafe.Write(_dataTable.outArrayPtr, result);
ValidateResult(_dataTable.inArray0Ptr, _dataTable.inArray1Ptr, _dataTable.inArray2Ptr, _dataTable.outArrayPtr);
}
public void RunBasicScenario_LoadAligned()
{
TestLibrary.TestFramework.BeginScenario(nameof(RunBasicScenario_LoadAligned));
var result = AvxVnni.MultiplyWideningAndAddSaturate(
Avx.LoadAlignedVector128((Int32*)(_dataTable.inArray0Ptr)),
Avx.LoadAlignedVector128((Int16*)(_dataTable.inArray1Ptr)),
Avx.LoadAlignedVector128((Int16*)(_dataTable.inArray2Ptr))
);
Unsafe.Write(_dataTable.outArrayPtr, result);
ValidateResult(_dataTable.inArray0Ptr, _dataTable.inArray1Ptr, _dataTable.inArray2Ptr, _dataTable.outArrayPtr);
}
public void RunReflectionScenario_UnsafeRead()
{
TestLibrary.TestFramework.BeginScenario(nameof(RunReflectionScenario_UnsafeRead));
var result = typeof(AvxVnni).GetMethod(nameof(AvxVnni.MultiplyWideningAndAddSaturate), new Type[] { typeof(Vector128<Int32>), typeof(Vector128<Int16>), typeof(Vector128<Int16>) })
.Invoke(null, new object[] {
Unsafe.Read<Vector128<Int32>>(_dataTable.inArray0Ptr),
Unsafe.Read<Vector128<Int16>>(_dataTable.inArray1Ptr),
Unsafe.Read<Vector128<Int16>>(_dataTable.inArray2Ptr)
});
Unsafe.Write(_dataTable.outArrayPtr, (Vector128<Int32>)(result));
ValidateResult(_dataTable.inArray0Ptr, _dataTable.inArray1Ptr, _dataTable.inArray2Ptr, _dataTable.outArrayPtr);
}
public void RunReflectionScenario_Load()
{
TestLibrary.TestFramework.BeginScenario(nameof(RunReflectionScenario_Load));
var result = typeof(AvxVnni).GetMethod(nameof(AvxVnni.MultiplyWideningAndAddSaturate), new Type[] { typeof(Vector128<Int32>), typeof(Vector128<Int16>), typeof(Vector128<Int16>) })
.Invoke(null, new object[] {
Avx.LoadVector128((Int32*)(_dataTable.inArray0Ptr)),
Avx.LoadVector128((Int16*)(_dataTable.inArray1Ptr)),
Avx.LoadVector128((Int16*)(_dataTable.inArray2Ptr))
});
Unsafe.Write(_dataTable.outArrayPtr, (Vector128<Int32>)(result));
ValidateResult(_dataTable.inArray0Ptr, _dataTable.inArray1Ptr, _dataTable.inArray2Ptr, _dataTable.outArrayPtr);
}
public void RunReflectionScenario_LoadAligned()
{
TestLibrary.TestFramework.BeginScenario(nameof(RunReflectionScenario_LoadAligned));
var result = typeof(AvxVnni).GetMethod(nameof(AvxVnni.MultiplyWideningAndAddSaturate), new Type[] { typeof(Vector128<Int32>), typeof(Vector128<Int16>), typeof(Vector128<Int16>) })
.Invoke(null, new object[] {
Avx.LoadAlignedVector128((Int32*)(_dataTable.inArray0Ptr)),
Avx.LoadAlignedVector128((Int16*)(_dataTable.inArray1Ptr)),
Avx.LoadAlignedVector128((Int16*)(_dataTable.inArray2Ptr))
});
Unsafe.Write(_dataTable.outArrayPtr, (Vector128<Int32>)(result));
ValidateResult(_dataTable.inArray0Ptr, _dataTable.inArray1Ptr, _dataTable.inArray2Ptr, _dataTable.outArrayPtr);
}
public void RunClsVarScenario()
{
TestLibrary.TestFramework.BeginScenario(nameof(RunClsVarScenario));
var result = AvxVnni.MultiplyWideningAndAddSaturate(
_clsVar0,
_clsVar1,
_clsVar2
);
Unsafe.Write(_dataTable.outArrayPtr, result);
ValidateResult(_clsVar0, _clsVar1, _clsVar2, _dataTable.outArrayPtr);
}
public void RunLclVarScenario_UnsafeRead()
{
TestLibrary.TestFramework.BeginScenario(nameof(RunLclVarScenario_UnsafeRead));
var first = Unsafe.Read<Vector128<Int32>>(_dataTable.inArray0Ptr);
var second = Unsafe.Read<Vector128<Int16>>(_dataTable.inArray1Ptr);
var third = Unsafe.Read<Vector128<Int16>>(_dataTable.inArray2Ptr);
var result = AvxVnni.MultiplyWideningAndAddSaturate(first, second, third);
Unsafe.Write(_dataTable.outArrayPtr, result);
ValidateResult(first, second, third, _dataTable.outArrayPtr);
}
public void RunLclVarScenario_Load()
{
TestLibrary.TestFramework.BeginScenario(nameof(RunLclVarScenario_Load));
var first= Avx.LoadVector128((Int32*)(_dataTable.inArray0Ptr));
var second = Avx.LoadVector128((Int16*)(_dataTable.inArray1Ptr));
var third = Avx.LoadVector128((Int16*)(_dataTable.inArray2Ptr));
var result = AvxVnni.MultiplyWideningAndAddSaturate(first, second, third);
Unsafe.Write(_dataTable.outArrayPtr, result);
ValidateResult(first, second, third, _dataTable.outArrayPtr);
}
public void RunLclVarScenario_LoadAligned()
{
TestLibrary.TestFramework.BeginScenario(nameof(RunLclVarScenario_LoadAligned));
var first = Avx.LoadAlignedVector128((Int32*)(_dataTable.inArray0Ptr));
var second = Avx.LoadAlignedVector128((Int16*)(_dataTable.inArray1Ptr));
var third = Avx.LoadAlignedVector128((Int16*)(_dataTable.inArray2Ptr));
var result = AvxVnni.MultiplyWideningAndAddSaturate(first, second, third);
Unsafe.Write(_dataTable.outArrayPtr, result);
ValidateResult(first, second, third, _dataTable.outArrayPtr);
}
public void RunClassLclFldScenario()
{
TestLibrary.TestFramework.BeginScenario(nameof(RunClassLclFldScenario));
var test = new SimpleTernaryOpTest__MultiplyWideningAndAddSaturateInt16();
var result = AvxVnni.MultiplyWideningAndAddSaturate(test._fld0, test._fld1, test._fld2);
Unsafe.Write(_dataTable.outArrayPtr, result);
ValidateResult(test._fld0, test._fld1, test._fld2, _dataTable.outArrayPtr);
}
public void RunClassFldScenario()
{
TestLibrary.TestFramework.BeginScenario(nameof(RunClassFldScenario));
var result = AvxVnni.MultiplyWideningAndAddSaturate(_fld0, _fld1, _fld2);
Unsafe.Write(_dataTable.outArrayPtr, result);
ValidateResult(_fld0, _fld1, _fld2, _dataTable.outArrayPtr);
}
public void RunStructLclFldScenario()
{
TestLibrary.TestFramework.BeginScenario(nameof(RunStructLclFldScenario));
var test = TestStruct.Create();
var result = AvxVnni.MultiplyWideningAndAddSaturate(test._fld0, test._fld1, test._fld2);
Unsafe.Write(_dataTable.outArrayPtr, result);
ValidateResult(test._fld0, test._fld1, test._fld2, _dataTable.outArrayPtr);
}
public void RunStructFldScenario()
{
TestLibrary.TestFramework.BeginScenario(nameof(RunStructFldScenario));
var test = TestStruct.Create();
test.RunStructFldScenario(this);
}
public void RunUnsupportedScenario()
{
TestLibrary.TestFramework.BeginScenario(nameof(RunUnsupportedScenario));
bool succeeded = false;
try
{
RunBasicScenario_UnsafeRead();
}
catch (PlatformNotSupportedException)
{
succeeded = true;
}
if (!succeeded)
{
Succeeded = false;
}
}
private void ValidateResult(Vector128<Int32> addend, Vector128<Int16> left, Vector128<Int16> right, void* result, [CallerMemberName] string method = "")
{
Int32[] inArray0 = new Int32[Op0ElementCount];
Int16[] inArray1 = new Int16[Op1ElementCount];
Int16[] inArray2 = new Int16[Op2ElementCount];
Int32[] outArray = new Int32[RetElementCount];
Unsafe.WriteUnaligned(ref Unsafe.As<Int32, byte>(ref inArray0[0]), addend);
Unsafe.WriteUnaligned(ref Unsafe.As<Int16, byte>(ref inArray1[0]), left);
Unsafe.WriteUnaligned(ref Unsafe.As<Int16, byte>(ref inArray2[0]), right);
Unsafe.CopyBlockUnaligned(ref Unsafe.As<Int32, byte>(ref outArray[0]), ref Unsafe.AsRef<byte>(result), (uint)Unsafe.SizeOf<Vector128<Int32>>());
ValidateResult(inArray0, inArray1, inArray2, outArray, method);
}
private void ValidateResult(void* addend, void* left, void* right, void* result, [CallerMemberName] string method = "")
{
Int32[] inArray0 = new Int32[Op0ElementCount];
Int16[] inArray1 = new Int16[Op1ElementCount];
Int16[] inArray2 = new Int16[Op2ElementCount];
Int32[] outArray = new Int32[RetElementCount];
Unsafe.CopyBlockUnaligned(ref Unsafe.As<Int32, byte>(ref inArray0[0]), ref Unsafe.AsRef<byte>(addend), (uint)Unsafe.SizeOf<Vector128<Int32>>());
Unsafe.CopyBlockUnaligned(ref Unsafe.As<Int16, byte>(ref inArray1[0]), ref Unsafe.AsRef<byte>(left), (uint)Unsafe.SizeOf<Vector128<Int16>>());
Unsafe.CopyBlockUnaligned(ref Unsafe.As<Int16, byte>(ref inArray2[0]), ref Unsafe.AsRef<byte>(right), (uint)Unsafe.SizeOf<Vector128<Int16>>());
Unsafe.CopyBlockUnaligned(ref Unsafe.As<Int32, byte>(ref outArray[0]), ref Unsafe.AsRef<byte>(result), (uint)Unsafe.SizeOf<Vector128<Int32>>());
ValidateResult(inArray0, inArray1, inArray2, outArray, method);
}
private void ValidateResult(Int32[] addend, Int16[] left, Int16[] right, Int32[] result, [CallerMemberName] string method = "")
{
bool succeeded = true;
Int32[] outArray = new Int32[RetElementCount];
for (var i = 0; i < RetElementCount; i++)
{
int addend2 = right[i * 2 + 1] * left[i * 2 + 1] + right[i * 2] * left[i * 2];
int value = addend[i] + addend2;
int tmp = (value & ~(addend2 | addend[i])) < 0 ? int.MaxValue : value;
int c = (~value & (addend2 & addend[i])) < 0 ? int.MinValue : tmp;
outArray[i] = c;
}
for (var i = 0; i < RetElementCount; i++)
{
if (result[i] != outArray[i])
{
succeeded = false;
break;
}
}
if (!succeeded)
{
TestLibrary.TestFramework.LogInformation($"{nameof(AvxVnni)}.{nameof(AvxVnni.MultiplyWideningAndAddSaturate)}<Int32>(Vector128<Int32>, Vector128<Int32>): {method} failed:");
TestLibrary.TestFramework.LogInformation($" addend: ({string.Join(", ", addend)})");
TestLibrary.TestFramework.LogInformation($" left: ({string.Join(", ", left)})");
TestLibrary.TestFramework.LogInformation($" right: ({string.Join(", ", right)})");
TestLibrary.TestFramework.LogInformation($" result: ({string.Join(", ", result)})");
TestLibrary.TestFramework.LogInformation($" valid: ({string.Join(", ", outArray)})");
TestLibrary.TestFramework.LogInformation(string.Empty);
Succeeded = false;
}
}
}
}
| // Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
using System;
using System.Runtime.CompilerServices;
using System.Runtime.InteropServices;
using System.Runtime.Intrinsics;
using System.Runtime.Intrinsics.X86;
using System.Text.RegularExpressions;
namespace JIT.HardwareIntrinsics.X86
{
public static partial class Program
{
private static void MultiplyWideningAndAddSaturateInt16()
{
var test = new SimpleTernaryOpTest__MultiplyWideningAndAddSaturateInt16();
if (test.IsSupported)
{
// Validates basic functionality works, using Unsafe.Read
test.RunBasicScenario_UnsafeRead();
if (Avx.IsSupported)
{
// Validates basic functionality works, using Load
test.RunBasicScenario_Load();
// Validates basic functionality works, using LoadAligned
test.RunBasicScenario_LoadAligned();
}
else
{
Console.WriteLine("Avx Is Not Supported");
}
// Validates calling via reflection works, using Unsafe.Read
test.RunReflectionScenario_UnsafeRead(); //TODO: this one does not work. Fix it.
if (Avx.IsSupported)
{
// Validates calling via reflection works, using Load
test.RunReflectionScenario_Load();
// Validates calling via reflection works, using LoadAligned
test.RunReflectionScenario_LoadAligned();
}
// Validates passing a static member works
test.RunClsVarScenario();
// Validates passing a local works, using Unsafe.Read
test.RunLclVarScenario_UnsafeRead();
if (Avx.IsSupported)
{
// Validates passing a local works, using Load
test.RunLclVarScenario_Load();
// Validates passing a local works, using LoadAligned
test.RunLclVarScenario_LoadAligned();
}
// Validates passing the field of a local class works
test.RunClassLclFldScenario();
// Validates passing an instance member of a class works
test.RunClassFldScenario();
// Validates passing the field of a local struct works
test.RunStructLclFldScenario();
// Validates passing an instance member of a struct works
test.RunStructFldScenario();
}
else
{
Console.WriteLine("Test Is Not Supported");
// Validates we throw on unsupported hardware
test.RunUnsupportedScenario();
}
if (!test.Succeeded)
{
throw new Exception("One or more scenarios did not complete as expected.");
}
}
}
public sealed unsafe class SimpleTernaryOpTest__MultiplyWideningAndAddSaturateInt16
{
private struct DataTable
{
private byte[] inArray0;
private byte[] inArray1;
private byte[] inArray2;
private byte[] outArray;
private GCHandle inHandle0;
private GCHandle inHandle1;
private GCHandle inHandle2;
private GCHandle outHandle;
private ulong alignment;
public DataTable(Int32[] inArray0, Int16[] inArray1, Int16[] inArray2, Int32[] outArray, int alignment)
{
int sizeOfinArray0 = inArray0.Length * Unsafe.SizeOf<Int32>();
int sizeOfinArray1 = inArray1.Length * Unsafe.SizeOf<Int16>();
int sizeOfinArray2 = inArray2.Length * Unsafe.SizeOf<Int16>();
int sizeOfoutArray = outArray.Length * Unsafe.SizeOf<Int32>();
if((alignment != 32 && alignment != 16) || (alignment *2) < sizeOfinArray0 || (alignment * 2) < sizeOfinArray1 || (alignment * 2) < sizeOfinArray2 || (alignment * 2) < sizeOfoutArray)
{
throw new ArgumentException("Invalid value of alighment");
}
this.inArray0 = new byte[alignment * 2];
this.inArray1 = new byte[alignment * 2];
this.inArray2 = new byte[alignment * 2];
this.outArray = new byte[alignment * 2];
this.inHandle0 = GCHandle.Alloc(this.inArray0, GCHandleType.Pinned);
this.inHandle1 = GCHandle.Alloc(this.inArray1, GCHandleType.Pinned);
this.inHandle2 = GCHandle.Alloc(this.inArray2, GCHandleType.Pinned);
this.outHandle = GCHandle.Alloc(this.outArray, GCHandleType.Pinned);
this.alignment = (ulong)alignment;
Unsafe.CopyBlockUnaligned(ref Unsafe.AsRef<byte>(inArray0Ptr), ref Unsafe.As<Int32, byte>(ref inArray0[0]), (uint)sizeOfinArray0);
Unsafe.CopyBlockUnaligned(ref Unsafe.AsRef<byte>(inArray1Ptr), ref Unsafe.As<Int16, byte>(ref inArray1[0]), (uint)sizeOfinArray1);
Unsafe.CopyBlockUnaligned(ref Unsafe.AsRef<byte>(inArray2Ptr), ref Unsafe.As<Int16, byte>(ref inArray2[0]), (uint)sizeOfinArray2);
}
public void* inArray0Ptr => Align((byte*)(inHandle0.AddrOfPinnedObject().ToPointer()), alignment);
public void* inArray1Ptr => Align((byte*)(inHandle1.AddrOfPinnedObject().ToPointer()), alignment);
public void* inArray2Ptr => Align((byte*)(inHandle2.AddrOfPinnedObject().ToPointer()), alignment);
public void* outArrayPtr => Align((byte*)(outHandle.AddrOfPinnedObject().ToPointer()), alignment);
public void Dispose()
{
inHandle0.Free();
inHandle1.Free();
inHandle2.Free();
outHandle.Free();
}
private static unsafe void* Align(byte* buffer, ulong expectedAlighment)
{
return (void*)(((ulong)buffer + expectedAlighment -1) & ~(expectedAlighment - 1));
}
}
private struct TestStruct
{
public Vector128<Int32> _fld0;
public Vector128<Int16> _fld1;
public Vector128<Int16> _fld2;
public static TestStruct Create()
{
var testStruct = new TestStruct();
for (var i = 0; i < Op0ElementCount; i++) { _data0[i] = TestLibrary.Generator.GetInt16(); }
Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector128<Int32>, byte>(ref testStruct._fld0), ref Unsafe.As<Int32, byte>(ref _data0[0]), (uint)Unsafe.SizeOf<Vector128<Int32>>());
for (var i = 0; i < Op1ElementCount; i++) { _data1[i] = TestLibrary.Generator.GetByte(); }
Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector128<Int16>, byte>(ref testStruct._fld1), ref Unsafe.As<Int16, byte>(ref _data1[0]), (uint)Unsafe.SizeOf<Vector128<Int16>>());
for (var i = 0; i < Op2ElementCount; i++) { _data2[i] = (sbyte)TestLibrary.Generator.GetInt16(); }
Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector128<Int16>, byte>(ref testStruct._fld2), ref Unsafe.As<Int16, byte>(ref _data2[0]), (uint)Unsafe.SizeOf<Vector128<Int16>>());
return testStruct;
}
public void RunStructFldScenario(SimpleTernaryOpTest__MultiplyWideningAndAddSaturateInt16 testClass)
{
var result = AvxVnni.MultiplyWideningAndAddSaturate(_fld0, _fld1, _fld2);
Unsafe.Write(testClass._dataTable.outArrayPtr, result);
testClass.ValidateResult(_fld0, _fld1, _fld2, testClass._dataTable.outArrayPtr);
}
}
private static readonly int LargestVectorSize = 32;
private static readonly int Op0ElementCount = Unsafe.SizeOf<Vector128<Int32>>() / sizeof(Int32);
private static readonly int Op1ElementCount = Unsafe.SizeOf<Vector128<Int16>>() / sizeof(Int16);
private static readonly int Op2ElementCount = Unsafe.SizeOf<Vector128<Int16>>() / sizeof(Int16);
private static readonly int RetElementCount = Unsafe.SizeOf<Vector128<Int32>>() / sizeof(Int32);
private static Int32[] _data0 = new Int32[Op0ElementCount];
private static Int16[] _data1 = new Int16[Op1ElementCount];
private static Int16[] _data2 = new Int16[Op2ElementCount];
private static Vector128<Int32> _clsVar0;
private static Vector128<Int16> _clsVar1;
private static Vector128<Int16> _clsVar2;
private Vector128<Int32> _fld0;
private Vector128<Int16> _fld1;
private Vector128<Int16> _fld2;
private DataTable _dataTable;
static SimpleTernaryOpTest__MultiplyWideningAndAddSaturateInt16()
{
for (var i = 0; i < Op0ElementCount; i++) { _data0[i] = TestLibrary.Generator.GetInt32(); }
Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector128<Int32>, byte>(ref _clsVar0), ref Unsafe.As<Int32, byte>(ref _data0[0]), (uint)Unsafe.SizeOf<Vector128<Int32>>());
for (var i = 0; i < Op1ElementCount; i++) { _data1[i] = TestLibrary.Generator.GetInt16(); }
Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector128<Int16>, byte>(ref _clsVar1), ref Unsafe.As<Int16, byte>(ref _data1[0]), (uint)Unsafe.SizeOf<Vector128<Int16>>());
for (var i = 0; i < Op2ElementCount; i++) { _data2[i] = (sbyte)TestLibrary.Generator.GetInt16(); }
Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector128<Int16>, byte>(ref _clsVar2), ref Unsafe.As<Int16, byte>(ref _data2[0]), (uint)Unsafe.SizeOf<Vector128<Int16>>());
}
public SimpleTernaryOpTest__MultiplyWideningAndAddSaturateInt16()
{
Succeeded = true;
for (var i = 0; i < Op0ElementCount; i++) { _data0[i] = TestLibrary.Generator.GetInt32(); }
Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector128<Int32>, byte>(ref _fld0), ref Unsafe.As<Int32, byte>(ref _data0[0]), (uint)Unsafe.SizeOf<Vector128<Int32>>());
for (var i = 0; i < Op1ElementCount; i++) { _data1[i] = TestLibrary.Generator.GetInt16(); }
Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector128<Int16>, byte>(ref _fld1), ref Unsafe.As<Int16, byte>(ref _data1[0]), (uint)Unsafe.SizeOf<Vector128<Int16>>());
for (var i = 0; i < Op2ElementCount; i++) { _data2[i] = (sbyte)TestLibrary.Generator.GetInt16(); }
Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector128<Int16>, byte>(ref _fld2), ref Unsafe.As<Int16, byte>(ref _data2[0]), (uint)Unsafe.SizeOf<Vector128<Int16>>());
for (var i = 0; i < Op0ElementCount; i++) { _data0[i] = TestLibrary.Generator.GetInt32(); }
for (var i = 0; i < Op1ElementCount; i++) { _data1[i] = TestLibrary.Generator.GetInt16(); }
for (var i = 0; i < Op2ElementCount; i++) { _data2[i] = TestLibrary.Generator.GetInt16(); }
_dataTable = new DataTable(_data0, _data1, _data2, new Int32[RetElementCount], LargestVectorSize);
}
public bool IsSupported => AvxVnni.IsSupported;
public bool Succeeded { get; set; }
public void RunBasicScenario_UnsafeRead()
{
TestLibrary.TestFramework.BeginScenario(nameof(RunBasicScenario_UnsafeRead));
var result = AvxVnni.MultiplyWideningAndAddSaturate(
Unsafe.Read<Vector128<Int32>>(_dataTable.inArray0Ptr),
Unsafe.Read<Vector128<Int16>>(_dataTable.inArray1Ptr),
Unsafe.Read<Vector128<Int16>>(_dataTable.inArray2Ptr)
);
Unsafe.Write(_dataTable.outArrayPtr, result);
ValidateResult(_dataTable.inArray0Ptr, _dataTable.inArray1Ptr, _dataTable.inArray2Ptr, _dataTable.outArrayPtr);
}
public void RunBasicScenario_Load()
{
TestLibrary.TestFramework.BeginScenario(nameof(RunBasicScenario_Load));
var result = AvxVnni.MultiplyWideningAndAddSaturate(
Avx.LoadVector128((Int32*)(_dataTable.inArray0Ptr)),
Avx.LoadVector128((Int16*)(_dataTable.inArray1Ptr)),
Avx.LoadVector128((Int16*)(_dataTable.inArray2Ptr))
);
Unsafe.Write(_dataTable.outArrayPtr, result);
ValidateResult(_dataTable.inArray0Ptr, _dataTable.inArray1Ptr, _dataTable.inArray2Ptr, _dataTable.outArrayPtr);
}
public void RunBasicScenario_LoadAligned()
{
TestLibrary.TestFramework.BeginScenario(nameof(RunBasicScenario_LoadAligned));
var result = AvxVnni.MultiplyWideningAndAddSaturate(
Avx.LoadAlignedVector128((Int32*)(_dataTable.inArray0Ptr)),
Avx.LoadAlignedVector128((Int16*)(_dataTable.inArray1Ptr)),
Avx.LoadAlignedVector128((Int16*)(_dataTable.inArray2Ptr))
);
Unsafe.Write(_dataTable.outArrayPtr, result);
ValidateResult(_dataTable.inArray0Ptr, _dataTable.inArray1Ptr, _dataTable.inArray2Ptr, _dataTable.outArrayPtr);
}
public void RunReflectionScenario_UnsafeRead()
{
TestLibrary.TestFramework.BeginScenario(nameof(RunReflectionScenario_UnsafeRead));
var result = typeof(AvxVnni).GetMethod(nameof(AvxVnni.MultiplyWideningAndAddSaturate), new Type[] { typeof(Vector128<Int32>), typeof(Vector128<Int16>), typeof(Vector128<Int16>) })
.Invoke(null, new object[] {
Unsafe.Read<Vector128<Int32>>(_dataTable.inArray0Ptr),
Unsafe.Read<Vector128<Int16>>(_dataTable.inArray1Ptr),
Unsafe.Read<Vector128<Int16>>(_dataTable.inArray2Ptr)
});
Unsafe.Write(_dataTable.outArrayPtr, (Vector128<Int32>)(result));
ValidateResult(_dataTable.inArray0Ptr, _dataTable.inArray1Ptr, _dataTable.inArray2Ptr, _dataTable.outArrayPtr);
}
public void RunReflectionScenario_Load()
{
TestLibrary.TestFramework.BeginScenario(nameof(RunReflectionScenario_Load));
var result = typeof(AvxVnni).GetMethod(nameof(AvxVnni.MultiplyWideningAndAddSaturate), new Type[] { typeof(Vector128<Int32>), typeof(Vector128<Int16>), typeof(Vector128<Int16>) })
.Invoke(null, new object[] {
Avx.LoadVector128((Int32*)(_dataTable.inArray0Ptr)),
Avx.LoadVector128((Int16*)(_dataTable.inArray1Ptr)),
Avx.LoadVector128((Int16*)(_dataTable.inArray2Ptr))
});
Unsafe.Write(_dataTable.outArrayPtr, (Vector128<Int32>)(result));
ValidateResult(_dataTable.inArray0Ptr, _dataTable.inArray1Ptr, _dataTable.inArray2Ptr, _dataTable.outArrayPtr);
}
public void RunReflectionScenario_LoadAligned()
{
TestLibrary.TestFramework.BeginScenario(nameof(RunReflectionScenario_LoadAligned));
var result = typeof(AvxVnni).GetMethod(nameof(AvxVnni.MultiplyWideningAndAddSaturate), new Type[] { typeof(Vector128<Int32>), typeof(Vector128<Int16>), typeof(Vector128<Int16>) })
.Invoke(null, new object[] {
Avx.LoadAlignedVector128((Int32*)(_dataTable.inArray0Ptr)),
Avx.LoadAlignedVector128((Int16*)(_dataTable.inArray1Ptr)),
Avx.LoadAlignedVector128((Int16*)(_dataTable.inArray2Ptr))
});
Unsafe.Write(_dataTable.outArrayPtr, (Vector128<Int32>)(result));
ValidateResult(_dataTable.inArray0Ptr, _dataTable.inArray1Ptr, _dataTable.inArray2Ptr, _dataTable.outArrayPtr);
}
public void RunClsVarScenario()
{
TestLibrary.TestFramework.BeginScenario(nameof(RunClsVarScenario));
var result = AvxVnni.MultiplyWideningAndAddSaturate(
_clsVar0,
_clsVar1,
_clsVar2
);
Unsafe.Write(_dataTable.outArrayPtr, result);
ValidateResult(_clsVar0, _clsVar1, _clsVar2, _dataTable.outArrayPtr);
}
public void RunLclVarScenario_UnsafeRead()
{
TestLibrary.TestFramework.BeginScenario(nameof(RunLclVarScenario_UnsafeRead));
var first = Unsafe.Read<Vector128<Int32>>(_dataTable.inArray0Ptr);
var second = Unsafe.Read<Vector128<Int16>>(_dataTable.inArray1Ptr);
var third = Unsafe.Read<Vector128<Int16>>(_dataTable.inArray2Ptr);
var result = AvxVnni.MultiplyWideningAndAddSaturate(first, second, third);
Unsafe.Write(_dataTable.outArrayPtr, result);
ValidateResult(first, second, third, _dataTable.outArrayPtr);
}
public void RunLclVarScenario_Load()
{
TestLibrary.TestFramework.BeginScenario(nameof(RunLclVarScenario_Load));
var first= Avx.LoadVector128((Int32*)(_dataTable.inArray0Ptr));
var second = Avx.LoadVector128((Int16*)(_dataTable.inArray1Ptr));
var third = Avx.LoadVector128((Int16*)(_dataTable.inArray2Ptr));
var result = AvxVnni.MultiplyWideningAndAddSaturate(first, second, third);
Unsafe.Write(_dataTable.outArrayPtr, result);
ValidateResult(first, second, third, _dataTable.outArrayPtr);
}
public void RunLclVarScenario_LoadAligned()
{
TestLibrary.TestFramework.BeginScenario(nameof(RunLclVarScenario_LoadAligned));
var first = Avx.LoadAlignedVector128((Int32*)(_dataTable.inArray0Ptr));
var second = Avx.LoadAlignedVector128((Int16*)(_dataTable.inArray1Ptr));
var third = Avx.LoadAlignedVector128((Int16*)(_dataTable.inArray2Ptr));
var result = AvxVnni.MultiplyWideningAndAddSaturate(first, second, third);
Unsafe.Write(_dataTable.outArrayPtr, result);
ValidateResult(first, second, third, _dataTable.outArrayPtr);
}
public void RunClassLclFldScenario()
{
TestLibrary.TestFramework.BeginScenario(nameof(RunClassLclFldScenario));
var test = new SimpleTernaryOpTest__MultiplyWideningAndAddSaturateInt16();
var result = AvxVnni.MultiplyWideningAndAddSaturate(test._fld0, test._fld1, test._fld2);
Unsafe.Write(_dataTable.outArrayPtr, result);
ValidateResult(test._fld0, test._fld1, test._fld2, _dataTable.outArrayPtr);
}
public void RunClassFldScenario()
{
TestLibrary.TestFramework.BeginScenario(nameof(RunClassFldScenario));
var result = AvxVnni.MultiplyWideningAndAddSaturate(_fld0, _fld1, _fld2);
Unsafe.Write(_dataTable.outArrayPtr, result);
ValidateResult(_fld0, _fld1, _fld2, _dataTable.outArrayPtr);
}
public void RunStructLclFldScenario()
{
TestLibrary.TestFramework.BeginScenario(nameof(RunStructLclFldScenario));
var test = TestStruct.Create();
var result = AvxVnni.MultiplyWideningAndAddSaturate(test._fld0, test._fld1, test._fld2);
Unsafe.Write(_dataTable.outArrayPtr, result);
ValidateResult(test._fld0, test._fld1, test._fld2, _dataTable.outArrayPtr);
}
public void RunStructFldScenario()
{
TestLibrary.TestFramework.BeginScenario(nameof(RunStructFldScenario));
var test = TestStruct.Create();
test.RunStructFldScenario(this);
}
public void RunUnsupportedScenario()
{
TestLibrary.TestFramework.BeginScenario(nameof(RunUnsupportedScenario));
bool succeeded = false;
try
{
RunBasicScenario_UnsafeRead();
}
catch (PlatformNotSupportedException)
{
succeeded = true;
}
if (!succeeded)
{
Succeeded = false;
}
}
private void ValidateResult(Vector128<Int32> addend, Vector128<Int16> left, Vector128<Int16> right, void* result, [CallerMemberName] string method = "")
{
Int32[] inArray0 = new Int32[Op0ElementCount];
Int16[] inArray1 = new Int16[Op1ElementCount];
Int16[] inArray2 = new Int16[Op2ElementCount];
Int32[] outArray = new Int32[RetElementCount];
Unsafe.WriteUnaligned(ref Unsafe.As<Int32, byte>(ref inArray0[0]), addend);
Unsafe.WriteUnaligned(ref Unsafe.As<Int16, byte>(ref inArray1[0]), left);
Unsafe.WriteUnaligned(ref Unsafe.As<Int16, byte>(ref inArray2[0]), right);
Unsafe.CopyBlockUnaligned(ref Unsafe.As<Int32, byte>(ref outArray[0]), ref Unsafe.AsRef<byte>(result), (uint)Unsafe.SizeOf<Vector128<Int32>>());
ValidateResult(inArray0, inArray1, inArray2, outArray, method);
}
private void ValidateResult(void* addend, void* left, void* right, void* result, [CallerMemberName] string method = "")
{
Int32[] inArray0 = new Int32[Op0ElementCount];
Int16[] inArray1 = new Int16[Op1ElementCount];
Int16[] inArray2 = new Int16[Op2ElementCount];
Int32[] outArray = new Int32[RetElementCount];
Unsafe.CopyBlockUnaligned(ref Unsafe.As<Int32, byte>(ref inArray0[0]), ref Unsafe.AsRef<byte>(addend), (uint)Unsafe.SizeOf<Vector128<Int32>>());
Unsafe.CopyBlockUnaligned(ref Unsafe.As<Int16, byte>(ref inArray1[0]), ref Unsafe.AsRef<byte>(left), (uint)Unsafe.SizeOf<Vector128<Int16>>());
Unsafe.CopyBlockUnaligned(ref Unsafe.As<Int16, byte>(ref inArray2[0]), ref Unsafe.AsRef<byte>(right), (uint)Unsafe.SizeOf<Vector128<Int16>>());
Unsafe.CopyBlockUnaligned(ref Unsafe.As<Int32, byte>(ref outArray[0]), ref Unsafe.AsRef<byte>(result), (uint)Unsafe.SizeOf<Vector128<Int32>>());
ValidateResult(inArray0, inArray1, inArray2, outArray, method);
}
private void ValidateResult(Int32[] addend, Int16[] left, Int16[] right, Int32[] result, [CallerMemberName] string method = "")
{
bool succeeded = true;
Int32[] outArray = new Int32[RetElementCount];
for (var i = 0; i < RetElementCount; i++)
{
int addend2 = right[i * 2 + 1] * left[i * 2 + 1] + right[i * 2] * left[i * 2];
int value = addend[i] + addend2;
int tmp = (value & ~(addend2 | addend[i])) < 0 ? int.MaxValue : value;
int c = (~value & (addend2 & addend[i])) < 0 ? int.MinValue : tmp;
outArray[i] = c;
}
for (var i = 0; i < RetElementCount; i++)
{
if (result[i] != outArray[i])
{
succeeded = false;
break;
}
}
if (!succeeded)
{
TestLibrary.TestFramework.LogInformation($"{nameof(AvxVnni)}.{nameof(AvxVnni.MultiplyWideningAndAddSaturate)}<Int32>(Vector128<Int32>, Vector128<Int32>): {method} failed:");
TestLibrary.TestFramework.LogInformation($" addend: ({string.Join(", ", addend)})");
TestLibrary.TestFramework.LogInformation($" left: ({string.Join(", ", left)})");
TestLibrary.TestFramework.LogInformation($" right: ({string.Join(", ", right)})");
TestLibrary.TestFramework.LogInformation($" result: ({string.Join(", ", result)})");
TestLibrary.TestFramework.LogInformation($" valid: ({string.Join(", ", outArray)})");
TestLibrary.TestFramework.LogInformation(string.Empty);
Succeeded = false;
}
}
}
}
| -1 |
dotnet/runtime | 66,250 | use the name of the header when throwing exception for invalid chars | Fixes #65136 | pedrobsaila | 2022-03-05T20:18:11Z | 2022-03-08T16:15:18Z | 8fe2d2b0bbb7bec841c97372528531c071b29c16 | fa5dcda23c681031b5d2bd68482702baef80b836 | use the name of the header when throwing exception for invalid chars. Fixes #65136 | ./src/coreclr/nativeaot/Runtime.Base/src/System/Runtime/CompilerServices/RuntimeHelpers.cs | // Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
namespace System.Runtime.CompilerServices
{
internal static class RuntimeHelpers
{
public static int OffsetToStringData
{
get
{
// Number of bytes from the address pointed to by a reference to
// a String to the first 16-bit character in the String.
// This property allows C#'s fixed statement to work on Strings.
return String.FIRST_CHAR_OFFSET;
}
}
[Intrinsic]
public static extern void InitializeArray(Array array, RuntimeFieldHandle fldHandle);
}
}
| // Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
namespace System.Runtime.CompilerServices
{
internal static class RuntimeHelpers
{
public static int OffsetToStringData
{
get
{
// Number of bytes from the address pointed to by a reference to
// a String to the first 16-bit character in the String.
// This property allows C#'s fixed statement to work on Strings.
return String.FIRST_CHAR_OFFSET;
}
}
[Intrinsic]
public static extern void InitializeArray(Array array, RuntimeFieldHandle fldHandle);
}
}
| -1 |
dotnet/runtime | 66,250 | use the name of the header when throwing exception for invalid chars | Fixes #65136 | pedrobsaila | 2022-03-05T20:18:11Z | 2022-03-08T16:15:18Z | 8fe2d2b0bbb7bec841c97372528531c071b29c16 | fa5dcda23c681031b5d2bd68482702baef80b836 | use the name of the header when throwing exception for invalid chars. Fixes #65136 | ./src/libraries/System.Private.Xml/tests/XmlReaderLib/TCReadEndElement.cs | // Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
using OLEDB.Test.ModuleCore;
namespace System.Xml.Tests
{
public partial class TCReadEndElement : TCXMLReaderBaseGeneral
{
// Type is System.Xml.Tests.TCReadEndElement
// Test Case
public override void AddChildren()
{
// for function TestReadEndElement1
{
this.AddChild(new CVariation(TestReadEndElement1) { Attribute = new Variation("ReadEndElement() on EndElement, no namespace") { Pri = 0 } });
}
// for function TestReadEndElement2
{
this.AddChild(new CVariation(TestReadEndElement2) { Attribute = new Variation("ReadEndElement() on EndElement, with namespace") { Pri = 0 } });
}
// for function TestReadEndElement3
{
this.AddChild(new CVariation(TestReadEndElement3) { Attribute = new Variation("ReadEndElement on Start Element, no namespace") });
}
// for function TestReadEndElement4
{
this.AddChild(new CVariation(TestReadEndElement4) { Attribute = new Variation("ReadEndElement on Empty Element, no namespace") { Pri = 0 } });
}
// for function TestReadEndElement5
{
this.AddChild(new CVariation(TestReadEndElement5) { Attribute = new Variation("ReadEndElement on regular Element, with namespace") { Pri = 0 } });
}
// for function TestReadEndElement6
{
this.AddChild(new CVariation(TestReadEndElement6) { Attribute = new Variation("ReadEndElement on Empty Tag, with namespace") { Pri = 0 } });
}
// for function TestReadEndElement7
{
this.AddChild(new CVariation(TestReadEndElement7) { Attribute = new Variation("ReadEndElement on CDATA") });
}
// for function TestReadEndElement9
{
this.AddChild(new CVariation(TestReadEndElement9) { Attribute = new Variation("ReadEndElement on Text") });
}
// for function TestReadEndElement10
{
this.AddChild(new CVariation(TestReadEndElement10) { Attribute = new Variation("ReadEndElement on ProcessingInstruction") });
}
// for function TestReadEndElement11
{
this.AddChild(new CVariation(TestReadEndElement11) { Attribute = new Variation("ReadEndElement on Comment") });
}
// for function TestReadEndElement13
{
this.AddChild(new CVariation(TestReadEndElement13) { Attribute = new Variation("ReadEndElement on XmlDeclaration") });
}
// for function TestTextReadEndElement1
{
this.AddChild(new CVariation(TestTextReadEndElement1) { Attribute = new Variation("ReadEndElement on EntityReference") });
}
// for function TestTextReadEndElement2
{
this.AddChild(new CVariation(TestTextReadEndElement2) { Attribute = new Variation("ReadEndElement on EndEntity") });
}
}
}
}
| // Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
using OLEDB.Test.ModuleCore;
namespace System.Xml.Tests
{
public partial class TCReadEndElement : TCXMLReaderBaseGeneral
{
// Type is System.Xml.Tests.TCReadEndElement
// Test Case
public override void AddChildren()
{
// for function TestReadEndElement1
{
this.AddChild(new CVariation(TestReadEndElement1) { Attribute = new Variation("ReadEndElement() on EndElement, no namespace") { Pri = 0 } });
}
// for function TestReadEndElement2
{
this.AddChild(new CVariation(TestReadEndElement2) { Attribute = new Variation("ReadEndElement() on EndElement, with namespace") { Pri = 0 } });
}
// for function TestReadEndElement3
{
this.AddChild(new CVariation(TestReadEndElement3) { Attribute = new Variation("ReadEndElement on Start Element, no namespace") });
}
// for function TestReadEndElement4
{
this.AddChild(new CVariation(TestReadEndElement4) { Attribute = new Variation("ReadEndElement on Empty Element, no namespace") { Pri = 0 } });
}
// for function TestReadEndElement5
{
this.AddChild(new CVariation(TestReadEndElement5) { Attribute = new Variation("ReadEndElement on regular Element, with namespace") { Pri = 0 } });
}
// for function TestReadEndElement6
{
this.AddChild(new CVariation(TestReadEndElement6) { Attribute = new Variation("ReadEndElement on Empty Tag, with namespace") { Pri = 0 } });
}
// for function TestReadEndElement7
{
this.AddChild(new CVariation(TestReadEndElement7) { Attribute = new Variation("ReadEndElement on CDATA") });
}
// for function TestReadEndElement9
{
this.AddChild(new CVariation(TestReadEndElement9) { Attribute = new Variation("ReadEndElement on Text") });
}
// for function TestReadEndElement10
{
this.AddChild(new CVariation(TestReadEndElement10) { Attribute = new Variation("ReadEndElement on ProcessingInstruction") });
}
// for function TestReadEndElement11
{
this.AddChild(new CVariation(TestReadEndElement11) { Attribute = new Variation("ReadEndElement on Comment") });
}
// for function TestReadEndElement13
{
this.AddChild(new CVariation(TestReadEndElement13) { Attribute = new Variation("ReadEndElement on XmlDeclaration") });
}
// for function TestTextReadEndElement1
{
this.AddChild(new CVariation(TestTextReadEndElement1) { Attribute = new Variation("ReadEndElement on EntityReference") });
}
// for function TestTextReadEndElement2
{
this.AddChild(new CVariation(TestTextReadEndElement2) { Attribute = new Variation("ReadEndElement on EndEntity") });
}
}
}
}
| -1 |
dotnet/runtime | 66,250 | use the name of the header when throwing exception for invalid chars | Fixes #65136 | pedrobsaila | 2022-03-05T20:18:11Z | 2022-03-08T16:15:18Z | 8fe2d2b0bbb7bec841c97372528531c071b29c16 | fa5dcda23c681031b5d2bd68482702baef80b836 | use the name of the header when throwing exception for invalid chars. Fixes #65136 | ./src/tests/Loader/classloader/TypeGeneratorTests/TypeGeneratorTest136/Generated136.il | // Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
.assembly extern mscorlib { .publickeytoken = (B7 7A 5C 56 19 34 E0 89 ) .ver 4:0:0:0 }
.assembly extern TestFramework { .publickeytoken = ( B0 3F 5F 7F 11 D5 0A 3A ) }
//TYPES IN FORWARDER ASSEMBLIES:
//TEST ASSEMBLY:
.assembly Generated136 { .hash algorithm 0x00008004 }
.assembly extern xunit.core {}
.class public BaseClass0
{
.method public hidebysig specialname rtspecialname instance void .ctor() cil managed {
ldarg.0
call instance void [mscorlib]System.Object::.ctor()
ret
}
}
.class public BaseClass1
extends BaseClass0
{
.method public hidebysig specialname rtspecialname instance void .ctor() cil managed {
ldarg.0
call instance void BaseClass0::.ctor()
ret
}
}
.class public sequential sealed MyStruct186`1<T0>
extends [mscorlib]System.ValueType
implements class IBase2`2<class BaseClass1,class BaseClass1>, class IBase2`2<!T0,class BaseClass1>
{
.pack 0
.size 1
.method public hidebysig newslot virtual instance string Method7<M0>() cil managed noinlining {
ldstr "MyStruct186::Method7.1501<"
ldtoken !!M0
call class [mscorlib]System.Type [mscorlib]System.Type::GetTypeFromHandle(valuetype [mscorlib]System.RuntimeTypeHandle)
call string [mscorlib]System.String::Concat(object,object)
ldstr ">()"
call string [mscorlib]System.String::Concat(object,object)
ret
}
.method public hidebysig newslot virtual instance string 'IBase2<T0,class BaseClass1>.Method7'<M0>() cil managed noinlining {
.override method instance string class IBase2`2<!T0,class BaseClass1>::Method7<[1]>()
ldstr "MyStruct186::Method7.MI.1503<"
ldtoken !!M0
call class [mscorlib]System.Type [mscorlib]System.Type::GetTypeFromHandle(valuetype [mscorlib]System.RuntimeTypeHandle)
call string [mscorlib]System.String::Concat(object,object)
ldstr ">()"
call string [mscorlib]System.String::Concat(object,object)
ret
}
.method public hidebysig newslot instance string ClassMethod380<M0>() cil managed noinlining {
ldstr "MyStruct186::ClassMethod380.1504<"
ldtoken !!M0
call class [mscorlib]System.Type [mscorlib]System.Type::GetTypeFromHandle(valuetype [mscorlib]System.RuntimeTypeHandle)
call string [mscorlib]System.String::Concat(object,object)
ldstr ">()"
call string [mscorlib]System.String::Concat(object,object)
ret
}
.method public hidebysig newslot instance string ClassMethod381<M0>() cil managed noinlining {
ldstr "MyStruct186::ClassMethod381.1505<"
ldtoken !!M0
call class [mscorlib]System.Type [mscorlib]System.Type::GetTypeFromHandle(valuetype [mscorlib]System.RuntimeTypeHandle)
call string [mscorlib]System.String::Concat(object,object)
ldstr ">()"
call string [mscorlib]System.String::Concat(object,object)
ret
}
.method public hidebysig virtual instance bool Equals(object obj) cil managed { ldc.i4.0 ret }
.method public hidebysig virtual instance int32 GetHashCode() cil managed { ldc.i4.0 ret }
.method public hidebysig virtual instance string ToString() cil managed { ldstr "" ret }
}
.class interface public abstract IBase2`2<+T0, -T1>
{
.method public hidebysig newslot abstract virtual instance string Method7<M0>() cil managed { }
}
.class public auto ansi beforefieldinit Generated136 {
.method static void M.BaseClass0<(BaseClass0)W>(!!W inst, string exp) cil managed {
.maxstack 5
.locals init (string[] actualResults)
ldc.i4.s 0
newarr string
stloc.s actualResults
ldarg.1
ldstr "M.BaseClass0<(BaseClass0)W>(!!W inst, string exp)"
ldc.i4.s 0
ldloc.s actualResults
call void [TestFramework]TestFramework::MethodCallTest(string,string,int32,string[])
ret
}
.method static void M.BaseClass1<(BaseClass1)W>(!!W inst, string exp) cil managed {
.maxstack 5
.locals init (string[] actualResults)
ldc.i4.s 0
newarr string
stloc.s actualResults
ldarg.1
ldstr "M.BaseClass1<(BaseClass1)W>(!!W inst, string exp)"
ldc.i4.s 0
ldloc.s actualResults
call void [TestFramework]TestFramework::MethodCallTest(string,string,int32,string[])
ret
}
.method static void M.IBase2.T.T<T0,T1,(class IBase2`2<!!T0,!!T1>)W>(!!W 'inst', string exp) cil managed {
.maxstack 6
.locals init (string[] actualResults)
ldc.i4.s 1
newarr string
stloc.s actualResults
ldarg.1
ldstr "M.IBase2.T.T<T0,T1,(class IBase2`2<!!T0,!!T1>)W>(!!W 'inst', string exp)"
ldc.i4.s 1
ldloc.s actualResults
ldc.i4.s 0
ldarga.s 0
constrained. !!W
callvirt instance string class IBase2`2<!!T0,!!T1>::Method7<object>()
stelem.ref
ldloc.s actualResults
call void [TestFramework]TestFramework::MethodCallTest(string,string,int32,string[])
ret
}
.method static void M.IBase2.A.T<T1,(class IBase2`2<class BaseClass0,!!T1>)W>(!!W 'inst', string exp) cil managed {
.maxstack 6
.locals init (string[] actualResults)
ldc.i4.s 1
newarr string
stloc.s actualResults
ldarg.1
ldstr "M.IBase2.A.T<T1,(class IBase2`2<class BaseClass0,!!T1>)W>(!!W 'inst', string exp)"
ldc.i4.s 1
ldloc.s actualResults
ldc.i4.s 0
ldarga.s 0
constrained. !!W
callvirt instance string class IBase2`2<class BaseClass0,!!T1>::Method7<object>()
stelem.ref
ldloc.s actualResults
call void [TestFramework]TestFramework::MethodCallTest(string,string,int32,string[])
ret
}
.method static void M.IBase2.A.A<(class IBase2`2<class BaseClass0,class BaseClass0>)W>(!!W 'inst', string exp) cil managed {
.maxstack 6
.locals init (string[] actualResults)
ldc.i4.s 1
newarr string
stloc.s actualResults
ldarg.1
ldstr "M.IBase2.A.A<(class IBase2`2<class BaseClass0,class BaseClass0>)W>(!!W 'inst', string exp)"
ldc.i4.s 1
ldloc.s actualResults
ldc.i4.s 0
ldarga.s 0
constrained. !!W
callvirt instance string class IBase2`2<class BaseClass0,class BaseClass0>::Method7<object>()
stelem.ref
ldloc.s actualResults
call void [TestFramework]TestFramework::MethodCallTest(string,string,int32,string[])
ret
}
.method static void M.IBase2.A.B<(class IBase2`2<class BaseClass0,class BaseClass1>)W>(!!W 'inst', string exp) cil managed {
.maxstack 6
.locals init (string[] actualResults)
ldc.i4.s 1
newarr string
stloc.s actualResults
ldarg.1
ldstr "M.IBase2.A.B<(class IBase2`2<class BaseClass0,class BaseClass1>)W>(!!W 'inst', string exp)"
ldc.i4.s 1
ldloc.s actualResults
ldc.i4.s 0
ldarga.s 0
constrained. !!W
callvirt instance string class IBase2`2<class BaseClass0,class BaseClass1>::Method7<object>()
stelem.ref
ldloc.s actualResults
call void [TestFramework]TestFramework::MethodCallTest(string,string,int32,string[])
ret
}
.method static void M.IBase2.B.T<T1,(class IBase2`2<class BaseClass1,!!T1>)W>(!!W 'inst', string exp) cil managed {
.maxstack 6
.locals init (string[] actualResults)
ldc.i4.s 1
newarr string
stloc.s actualResults
ldarg.1
ldstr "M.IBase2.B.T<T1,(class IBase2`2<class BaseClass1,!!T1>)W>(!!W 'inst', string exp)"
ldc.i4.s 1
ldloc.s actualResults
ldc.i4.s 0
ldarga.s 0
constrained. !!W
callvirt instance string class IBase2`2<class BaseClass1,!!T1>::Method7<object>()
stelem.ref
ldloc.s actualResults
call void [TestFramework]TestFramework::MethodCallTest(string,string,int32,string[])
ret
}
.method static void M.IBase2.B.A<(class IBase2`2<class BaseClass1,class BaseClass0>)W>(!!W 'inst', string exp) cil managed {
.maxstack 6
.locals init (string[] actualResults)
ldc.i4.s 1
newarr string
stloc.s actualResults
ldarg.1
ldstr "M.IBase2.B.A<(class IBase2`2<class BaseClass1,class BaseClass0>)W>(!!W 'inst', string exp)"
ldc.i4.s 1
ldloc.s actualResults
ldc.i4.s 0
ldarga.s 0
constrained. !!W
callvirt instance string class IBase2`2<class BaseClass1,class BaseClass0>::Method7<object>()
stelem.ref
ldloc.s actualResults
call void [TestFramework]TestFramework::MethodCallTest(string,string,int32,string[])
ret
}
.method static void M.IBase2.B.B<(class IBase2`2<class BaseClass1,class BaseClass1>)W>(!!W 'inst', string exp) cil managed {
.maxstack 6
.locals init (string[] actualResults)
ldc.i4.s 1
newarr string
stloc.s actualResults
ldarg.1
ldstr "M.IBase2.B.B<(class IBase2`2<class BaseClass1,class BaseClass1>)W>(!!W 'inst', string exp)"
ldc.i4.s 1
ldloc.s actualResults
ldc.i4.s 0
ldarga.s 0
constrained. !!W
callvirt instance string class IBase2`2<class BaseClass1,class BaseClass1>::Method7<object>()
stelem.ref
ldloc.s actualResults
call void [TestFramework]TestFramework::MethodCallTest(string,string,int32,string[])
ret
}
.method static void M.MyStruct186.T<T0,(valuetype MyStruct186`1<!!T0>)W>(!!W 'inst', string exp) cil managed {
.maxstack 7
.locals init (string[] actualResults)
ldc.i4.s 2
newarr string
stloc.s actualResults
ldarg.1
ldstr "M.MyStruct186.T<T0,(valuetype MyStruct186`1<!!T0>)W>(!!W 'inst', string exp)"
ldc.i4.s 2
ldloc.s actualResults
ldc.i4.s 0
ldarga.s 0
constrained. valuetype MyStruct186`1<!!T0>
callvirt instance string class IBase2`2<class BaseClass1,class BaseClass1>::Method7<object>()
stelem.ref
ldloc.s actualResults
ldc.i4.s 1
ldarga.s 0
constrained. valuetype MyStruct186`1<!!T0>
callvirt instance string class IBase2`2<!!T0,class BaseClass1>::Method7<object>()
stelem.ref
ldloc.s actualResults
call void [TestFramework]TestFramework::MethodCallTest(string,string,int32,string[])
ret
}
.method static void M.MyStruct186.A<(valuetype MyStruct186`1<class BaseClass0>)W>(!!W 'inst', string exp) cil managed {
.maxstack 7
.locals init (string[] actualResults)
ldc.i4.s 2
newarr string
stloc.s actualResults
ldarg.1
ldstr "M.MyStruct186.A<(valuetype MyStruct186`1<class BaseClass0>)W>(!!W 'inst', string exp)"
ldc.i4.s 2
ldloc.s actualResults
ldc.i4.s 0
ldarga.s 0
constrained. valuetype MyStruct186`1<class BaseClass0>
callvirt instance string class IBase2`2<class BaseClass1,class BaseClass1>::Method7<object>()
stelem.ref
ldloc.s actualResults
ldc.i4.s 1
ldarga.s 0
constrained. valuetype MyStruct186`1<class BaseClass0>
callvirt instance string class IBase2`2<class BaseClass0,class BaseClass1>::Method7<object>()
stelem.ref
ldloc.s actualResults
call void [TestFramework]TestFramework::MethodCallTest(string,string,int32,string[])
ret
}
.method public hidebysig static void MethodCallingTest() cil managed
{
.maxstack 10
.locals init (object V_0)
ldstr "========================== Method Calling Test =========================="
call void [mscorlib]System.Console::WriteLine(string)
.locals init (valuetype MyStruct186`1<class BaseClass0> V_1)
ldloca V_1
initobj valuetype MyStruct186`1<class BaseClass0>
ldloca V_1
dup
call instance string valuetype MyStruct186`1<class BaseClass0>::Method7<object>()
ldstr "MyStruct186::Method7.1501<System.Object>()"
ldstr "valuetype MyStruct186`1<class BaseClass0> on type MyStruct186"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
dup
call instance string valuetype MyStruct186`1<class BaseClass0>::ClassMethod380<object>()
ldstr "MyStruct186::ClassMethod380.1504<System.Object>()"
ldstr "valuetype MyStruct186`1<class BaseClass0> on type MyStruct186"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
dup
call instance string valuetype MyStruct186`1<class BaseClass0>::ClassMethod381<object>()
ldstr "MyStruct186::ClassMethod381.1505<System.Object>()"
ldstr "valuetype MyStruct186`1<class BaseClass0> on type MyStruct186"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
dup ldnull call instance bool valuetype MyStruct186`1<class BaseClass0>::Equals(object) pop
dup call instance int32 valuetype MyStruct186`1<class BaseClass0>::GetHashCode() pop
dup call instance string valuetype MyStruct186`1<class BaseClass0>::ToString() pop
pop
ldloc V_1
box valuetype MyStruct186`1<class BaseClass0>
dup
callvirt instance string class IBase2`2<class BaseClass1,class BaseClass1>::Method7<object>()
ldstr "MyStruct186::Method7.1501<System.Object>()"
ldstr "class IBase2`2<class BaseClass1,class BaseClass1> on type valuetype MyStruct186`1<class BaseClass0>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
pop
ldloc V_1
box valuetype MyStruct186`1<class BaseClass0>
dup
callvirt instance string class IBase2`2<class BaseClass0,class BaseClass1>::Method7<object>()
ldstr "MyStruct186::Method7.MI.1503<System.Object>()"
ldstr "class IBase2`2<class BaseClass0,class BaseClass1> on type valuetype MyStruct186`1<class BaseClass0>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
pop
ldstr "========================================================================\n\n"
call void [mscorlib]System.Console::WriteLine(string)
ret
}
.method public hidebysig static void ConstrainedCallsTest() cil managed
{
.maxstack 10
.locals init (object V_0)
ldstr "========================== Constrained Calls Test =========================="
call void [mscorlib]System.Console::WriteLine(string)
.locals init (valuetype MyStruct186`1<class BaseClass0> V_3)
ldloca V_3
initobj valuetype MyStruct186`1<class BaseClass0>
.try { ldloc V_3
ldstr "MyStruct186::Method7.1501<System.Object>()#"
call void Generated136::M.IBase2.T.T<class BaseClass1,class BaseClass1,valuetype MyStruct186`1<class BaseClass0>>(!!2,string) leave.s LV0
} catch [mscorlib]System.Security.VerificationException { pop leave.s LV0} LV0:
.try { ldloc V_3
ldstr "MyStruct186::Method7.1501<System.Object>()#"
call void Generated136::M.IBase2.B.T<class BaseClass1,valuetype MyStruct186`1<class BaseClass0>>(!!1,string) leave.s LV1
} catch [mscorlib]System.Security.VerificationException { pop leave.s LV1} LV1:
.try { ldloc V_3
ldstr "MyStruct186::Method7.1501<System.Object>()#"
call void Generated136::M.IBase2.B.B<valuetype MyStruct186`1<class BaseClass0>>(!!0,string) leave.s LV2
} catch [mscorlib]System.Security.VerificationException { pop leave.s LV2} LV2:
.try { ldloc V_3
ldstr "MyStruct186::Method7.MI.1503<System.Object>()#"
call void Generated136::M.IBase2.T.T<class BaseClass0,class BaseClass1,valuetype MyStruct186`1<class BaseClass0>>(!!2,string) leave.s LV3
} catch [mscorlib]System.Security.VerificationException { pop leave.s LV3} LV3:
.try { ldloc V_3
ldstr "MyStruct186::Method7.MI.1503<System.Object>()#"
call void Generated136::M.IBase2.A.T<class BaseClass1,valuetype MyStruct186`1<class BaseClass0>>(!!1,string) leave.s LV4
} catch [mscorlib]System.Security.VerificationException { pop leave.s LV4} LV4:
.try { ldloc V_3
ldstr "MyStruct186::Method7.MI.1503<System.Object>()#"
call void Generated136::M.IBase2.A.B<valuetype MyStruct186`1<class BaseClass0>>(!!0,string) leave.s LV5
} catch [mscorlib]System.Security.VerificationException { pop leave.s LV5} LV5:
ldstr "========================================================================\n\n"
call void [mscorlib]System.Console::WriteLine(string)
ret
}
.method public hidebysig static void StructConstrainedInterfaceCallsTest() cil managed
{
.maxstack 10
ldstr "===================== Struct Constrained Interface Calls Test ====================="
call void [mscorlib]System.Console::WriteLine(string)
.locals init (valuetype MyStruct186`1<class BaseClass0> V_5)
ldloca V_5
initobj valuetype MyStruct186`1<class BaseClass0>
.try { ldloc V_5
ldstr "MyStruct186::Method7.1501<System.Object>()#" +
"MyStruct186::Method7.MI.1503<System.Object>()#"
call void Generated136::M.MyStruct186.T<class BaseClass0,valuetype MyStruct186`1<class BaseClass0>>(!!1,string) leave.s LV0
} catch [mscorlib]System.Security.VerificationException { pop leave.s LV0} LV0:
.try { ldloc V_5
ldstr "MyStruct186::Method7.1501<System.Object>()#" +
"MyStruct186::Method7.MI.1503<System.Object>()#"
call void Generated136::M.MyStruct186.A<valuetype MyStruct186`1<class BaseClass0>>(!!0,string) leave.s LV1
} catch [mscorlib]System.Security.VerificationException { pop leave.s LV1} LV1:
ldstr "========================================================================\n\n"
call void [mscorlib]System.Console::WriteLine(string)
ret
}
.method public hidebysig static void CalliTest() cil managed
{
.maxstack 10
.locals init (object V_0)
ldstr "========================== Method Calli Test =========================="
call void [mscorlib]System.Console::WriteLine(string)
.locals init (valuetype MyStruct186`1<class BaseClass0> V_7)
ldloca V_7
initobj valuetype MyStruct186`1<class BaseClass0>
ldloc V_7
box valuetype MyStruct186`1<class BaseClass0>
ldloc V_7
box valuetype MyStruct186`1<class BaseClass0>
ldvirtftn instance string valuetype MyStruct186`1<class BaseClass0>::Method7<object>()
calli default string(object)
ldstr "MyStruct186::Method7.1501<System.Object>()"
ldstr "valuetype MyStruct186`1<class BaseClass0> on type valuetype MyStruct186`1<class BaseClass0>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
ldloc V_7
box valuetype MyStruct186`1<class BaseClass0>
ldloc V_7
box valuetype MyStruct186`1<class BaseClass0>
ldvirtftn instance string valuetype MyStruct186`1<class BaseClass0>::ClassMethod380<object>()
calli default string(object)
ldstr "MyStruct186::ClassMethod380.1504<System.Object>()"
ldstr "valuetype MyStruct186`1<class BaseClass0> on type valuetype MyStruct186`1<class BaseClass0>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
ldloc V_7
box valuetype MyStruct186`1<class BaseClass0>
ldloc V_7
box valuetype MyStruct186`1<class BaseClass0>
ldvirtftn instance string valuetype MyStruct186`1<class BaseClass0>::ClassMethod381<object>()
calli default string(object)
ldstr "MyStruct186::ClassMethod381.1505<System.Object>()"
ldstr "valuetype MyStruct186`1<class BaseClass0> on type valuetype MyStruct186`1<class BaseClass0>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
ldloc V_7 box valuetype MyStruct186`1<class BaseClass0> ldnull
ldloc V_7 box valuetype MyStruct186`1<class BaseClass0>
ldvirtftn instance bool valuetype MyStruct186`1<class BaseClass0>::Equals(object) calli default bool(object,object) pop
ldloc V_7 box valuetype MyStruct186`1<class BaseClass0>
ldloc V_7 box valuetype MyStruct186`1<class BaseClass0>
ldvirtftn instance int32 valuetype MyStruct186`1<class BaseClass0>::GetHashCode() calli default int32(object) pop
ldloc V_7 box valuetype MyStruct186`1<class BaseClass0>
ldloc V_7 box valuetype MyStruct186`1<class BaseClass0>
ldvirtftn instance string valuetype MyStruct186`1<class BaseClass0>::ToString() calli default string(object) pop
ldloc V_7
box valuetype MyStruct186`1<class BaseClass0>
ldloc V_7
box valuetype MyStruct186`1<class BaseClass0>
ldvirtftn instance string class IBase2`2<class BaseClass1,class BaseClass1>::Method7<object>()
calli default string(object)
ldstr "MyStruct186::Method7.1501<System.Object>()"
ldstr "class IBase2`2<class BaseClass1,class BaseClass1> on type valuetype MyStruct186`1<class BaseClass0>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
ldloc V_7
box valuetype MyStruct186`1<class BaseClass0>
ldloc V_7
box valuetype MyStruct186`1<class BaseClass0>
ldvirtftn instance string class IBase2`2<class BaseClass0,class BaseClass1>::Method7<object>()
calli default string(object)
ldstr "MyStruct186::Method7.MI.1503<System.Object>()"
ldstr "class IBase2`2<class BaseClass0,class BaseClass1> on type valuetype MyStruct186`1<class BaseClass0>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
ldstr "========================================================================\n\n"
call void [mscorlib]System.Console::WriteLine(string)
ret
}
.method public hidebysig static int32 Main() cil managed
{
.custom instance void [xunit.core]Xunit.FactAttribute::.ctor() = (
01 00 00 00
)
.entrypoint
.maxstack 10
call void Generated136::MethodCallingTest()
call void Generated136::ConstrainedCallsTest()
call void Generated136::StructConstrainedInterfaceCallsTest()
call void Generated136::CalliTest()
ldc.i4 100
ret
}
}
| // Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
.assembly extern mscorlib { .publickeytoken = (B7 7A 5C 56 19 34 E0 89 ) .ver 4:0:0:0 }
.assembly extern TestFramework { .publickeytoken = ( B0 3F 5F 7F 11 D5 0A 3A ) }
//TYPES IN FORWARDER ASSEMBLIES:
//TEST ASSEMBLY:
.assembly Generated136 { .hash algorithm 0x00008004 }
.assembly extern xunit.core {}
.class public BaseClass0
{
.method public hidebysig specialname rtspecialname instance void .ctor() cil managed {
ldarg.0
call instance void [mscorlib]System.Object::.ctor()
ret
}
}
.class public BaseClass1
extends BaseClass0
{
.method public hidebysig specialname rtspecialname instance void .ctor() cil managed {
ldarg.0
call instance void BaseClass0::.ctor()
ret
}
}
.class public sequential sealed MyStruct186`1<T0>
extends [mscorlib]System.ValueType
implements class IBase2`2<class BaseClass1,class BaseClass1>, class IBase2`2<!T0,class BaseClass1>
{
.pack 0
.size 1
.method public hidebysig newslot virtual instance string Method7<M0>() cil managed noinlining {
ldstr "MyStruct186::Method7.1501<"
ldtoken !!M0
call class [mscorlib]System.Type [mscorlib]System.Type::GetTypeFromHandle(valuetype [mscorlib]System.RuntimeTypeHandle)
call string [mscorlib]System.String::Concat(object,object)
ldstr ">()"
call string [mscorlib]System.String::Concat(object,object)
ret
}
.method public hidebysig newslot virtual instance string 'IBase2<T0,class BaseClass1>.Method7'<M0>() cil managed noinlining {
.override method instance string class IBase2`2<!T0,class BaseClass1>::Method7<[1]>()
ldstr "MyStruct186::Method7.MI.1503<"
ldtoken !!M0
call class [mscorlib]System.Type [mscorlib]System.Type::GetTypeFromHandle(valuetype [mscorlib]System.RuntimeTypeHandle)
call string [mscorlib]System.String::Concat(object,object)
ldstr ">()"
call string [mscorlib]System.String::Concat(object,object)
ret
}
.method public hidebysig newslot instance string ClassMethod380<M0>() cil managed noinlining {
ldstr "MyStruct186::ClassMethod380.1504<"
ldtoken !!M0
call class [mscorlib]System.Type [mscorlib]System.Type::GetTypeFromHandle(valuetype [mscorlib]System.RuntimeTypeHandle)
call string [mscorlib]System.String::Concat(object,object)
ldstr ">()"
call string [mscorlib]System.String::Concat(object,object)
ret
}
.method public hidebysig newslot instance string ClassMethod381<M0>() cil managed noinlining {
ldstr "MyStruct186::ClassMethod381.1505<"
ldtoken !!M0
call class [mscorlib]System.Type [mscorlib]System.Type::GetTypeFromHandle(valuetype [mscorlib]System.RuntimeTypeHandle)
call string [mscorlib]System.String::Concat(object,object)
ldstr ">()"
call string [mscorlib]System.String::Concat(object,object)
ret
}
.method public hidebysig virtual instance bool Equals(object obj) cil managed { ldc.i4.0 ret }
.method public hidebysig virtual instance int32 GetHashCode() cil managed { ldc.i4.0 ret }
.method public hidebysig virtual instance string ToString() cil managed { ldstr "" ret }
}
.class interface public abstract IBase2`2<+T0, -T1>
{
.method public hidebysig newslot abstract virtual instance string Method7<M0>() cil managed { }
}
.class public auto ansi beforefieldinit Generated136 {
.method static void M.BaseClass0<(BaseClass0)W>(!!W inst, string exp) cil managed {
.maxstack 5
.locals init (string[] actualResults)
ldc.i4.s 0
newarr string
stloc.s actualResults
ldarg.1
ldstr "M.BaseClass0<(BaseClass0)W>(!!W inst, string exp)"
ldc.i4.s 0
ldloc.s actualResults
call void [TestFramework]TestFramework::MethodCallTest(string,string,int32,string[])
ret
}
.method static void M.BaseClass1<(BaseClass1)W>(!!W inst, string exp) cil managed {
.maxstack 5
.locals init (string[] actualResults)
ldc.i4.s 0
newarr string
stloc.s actualResults
ldarg.1
ldstr "M.BaseClass1<(BaseClass1)W>(!!W inst, string exp)"
ldc.i4.s 0
ldloc.s actualResults
call void [TestFramework]TestFramework::MethodCallTest(string,string,int32,string[])
ret
}
.method static void M.IBase2.T.T<T0,T1,(class IBase2`2<!!T0,!!T1>)W>(!!W 'inst', string exp) cil managed {
.maxstack 6
.locals init (string[] actualResults)
ldc.i4.s 1
newarr string
stloc.s actualResults
ldarg.1
ldstr "M.IBase2.T.T<T0,T1,(class IBase2`2<!!T0,!!T1>)W>(!!W 'inst', string exp)"
ldc.i4.s 1
ldloc.s actualResults
ldc.i4.s 0
ldarga.s 0
constrained. !!W
callvirt instance string class IBase2`2<!!T0,!!T1>::Method7<object>()
stelem.ref
ldloc.s actualResults
call void [TestFramework]TestFramework::MethodCallTest(string,string,int32,string[])
ret
}
.method static void M.IBase2.A.T<T1,(class IBase2`2<class BaseClass0,!!T1>)W>(!!W 'inst', string exp) cil managed {
.maxstack 6
.locals init (string[] actualResults)
ldc.i4.s 1
newarr string
stloc.s actualResults
ldarg.1
ldstr "M.IBase2.A.T<T1,(class IBase2`2<class BaseClass0,!!T1>)W>(!!W 'inst', string exp)"
ldc.i4.s 1
ldloc.s actualResults
ldc.i4.s 0
ldarga.s 0
constrained. !!W
callvirt instance string class IBase2`2<class BaseClass0,!!T1>::Method7<object>()
stelem.ref
ldloc.s actualResults
call void [TestFramework]TestFramework::MethodCallTest(string,string,int32,string[])
ret
}
.method static void M.IBase2.A.A<(class IBase2`2<class BaseClass0,class BaseClass0>)W>(!!W 'inst', string exp) cil managed {
.maxstack 6
.locals init (string[] actualResults)
ldc.i4.s 1
newarr string
stloc.s actualResults
ldarg.1
ldstr "M.IBase2.A.A<(class IBase2`2<class BaseClass0,class BaseClass0>)W>(!!W 'inst', string exp)"
ldc.i4.s 1
ldloc.s actualResults
ldc.i4.s 0
ldarga.s 0
constrained. !!W
callvirt instance string class IBase2`2<class BaseClass0,class BaseClass0>::Method7<object>()
stelem.ref
ldloc.s actualResults
call void [TestFramework]TestFramework::MethodCallTest(string,string,int32,string[])
ret
}
.method static void M.IBase2.A.B<(class IBase2`2<class BaseClass0,class BaseClass1>)W>(!!W 'inst', string exp) cil managed {
.maxstack 6
.locals init (string[] actualResults)
ldc.i4.s 1
newarr string
stloc.s actualResults
ldarg.1
ldstr "M.IBase2.A.B<(class IBase2`2<class BaseClass0,class BaseClass1>)W>(!!W 'inst', string exp)"
ldc.i4.s 1
ldloc.s actualResults
ldc.i4.s 0
ldarga.s 0
constrained. !!W
callvirt instance string class IBase2`2<class BaseClass0,class BaseClass1>::Method7<object>()
stelem.ref
ldloc.s actualResults
call void [TestFramework]TestFramework::MethodCallTest(string,string,int32,string[])
ret
}
.method static void M.IBase2.B.T<T1,(class IBase2`2<class BaseClass1,!!T1>)W>(!!W 'inst', string exp) cil managed {
.maxstack 6
.locals init (string[] actualResults)
ldc.i4.s 1
newarr string
stloc.s actualResults
ldarg.1
ldstr "M.IBase2.B.T<T1,(class IBase2`2<class BaseClass1,!!T1>)W>(!!W 'inst', string exp)"
ldc.i4.s 1
ldloc.s actualResults
ldc.i4.s 0
ldarga.s 0
constrained. !!W
callvirt instance string class IBase2`2<class BaseClass1,!!T1>::Method7<object>()
stelem.ref
ldloc.s actualResults
call void [TestFramework]TestFramework::MethodCallTest(string,string,int32,string[])
ret
}
.method static void M.IBase2.B.A<(class IBase2`2<class BaseClass1,class BaseClass0>)W>(!!W 'inst', string exp) cil managed {
.maxstack 6
.locals init (string[] actualResults)
ldc.i4.s 1
newarr string
stloc.s actualResults
ldarg.1
ldstr "M.IBase2.B.A<(class IBase2`2<class BaseClass1,class BaseClass0>)W>(!!W 'inst', string exp)"
ldc.i4.s 1
ldloc.s actualResults
ldc.i4.s 0
ldarga.s 0
constrained. !!W
callvirt instance string class IBase2`2<class BaseClass1,class BaseClass0>::Method7<object>()
stelem.ref
ldloc.s actualResults
call void [TestFramework]TestFramework::MethodCallTest(string,string,int32,string[])
ret
}
.method static void M.IBase2.B.B<(class IBase2`2<class BaseClass1,class BaseClass1>)W>(!!W 'inst', string exp) cil managed {
.maxstack 6
.locals init (string[] actualResults)
ldc.i4.s 1
newarr string
stloc.s actualResults
ldarg.1
ldstr "M.IBase2.B.B<(class IBase2`2<class BaseClass1,class BaseClass1>)W>(!!W 'inst', string exp)"
ldc.i4.s 1
ldloc.s actualResults
ldc.i4.s 0
ldarga.s 0
constrained. !!W
callvirt instance string class IBase2`2<class BaseClass1,class BaseClass1>::Method7<object>()
stelem.ref
ldloc.s actualResults
call void [TestFramework]TestFramework::MethodCallTest(string,string,int32,string[])
ret
}
.method static void M.MyStruct186.T<T0,(valuetype MyStruct186`1<!!T0>)W>(!!W 'inst', string exp) cil managed {
.maxstack 7
.locals init (string[] actualResults)
ldc.i4.s 2
newarr string
stloc.s actualResults
ldarg.1
ldstr "M.MyStruct186.T<T0,(valuetype MyStruct186`1<!!T0>)W>(!!W 'inst', string exp)"
ldc.i4.s 2
ldloc.s actualResults
ldc.i4.s 0
ldarga.s 0
constrained. valuetype MyStruct186`1<!!T0>
callvirt instance string class IBase2`2<class BaseClass1,class BaseClass1>::Method7<object>()
stelem.ref
ldloc.s actualResults
ldc.i4.s 1
ldarga.s 0
constrained. valuetype MyStruct186`1<!!T0>
callvirt instance string class IBase2`2<!!T0,class BaseClass1>::Method7<object>()
stelem.ref
ldloc.s actualResults
call void [TestFramework]TestFramework::MethodCallTest(string,string,int32,string[])
ret
}
.method static void M.MyStruct186.A<(valuetype MyStruct186`1<class BaseClass0>)W>(!!W 'inst', string exp) cil managed {
.maxstack 7
.locals init (string[] actualResults)
ldc.i4.s 2
newarr string
stloc.s actualResults
ldarg.1
ldstr "M.MyStruct186.A<(valuetype MyStruct186`1<class BaseClass0>)W>(!!W 'inst', string exp)"
ldc.i4.s 2
ldloc.s actualResults
ldc.i4.s 0
ldarga.s 0
constrained. valuetype MyStruct186`1<class BaseClass0>
callvirt instance string class IBase2`2<class BaseClass1,class BaseClass1>::Method7<object>()
stelem.ref
ldloc.s actualResults
ldc.i4.s 1
ldarga.s 0
constrained. valuetype MyStruct186`1<class BaseClass0>
callvirt instance string class IBase2`2<class BaseClass0,class BaseClass1>::Method7<object>()
stelem.ref
ldloc.s actualResults
call void [TestFramework]TestFramework::MethodCallTest(string,string,int32,string[])
ret
}
.method public hidebysig static void MethodCallingTest() cil managed
{
.maxstack 10
.locals init (object V_0)
ldstr "========================== Method Calling Test =========================="
call void [mscorlib]System.Console::WriteLine(string)
.locals init (valuetype MyStruct186`1<class BaseClass0> V_1)
ldloca V_1
initobj valuetype MyStruct186`1<class BaseClass0>
ldloca V_1
dup
call instance string valuetype MyStruct186`1<class BaseClass0>::Method7<object>()
ldstr "MyStruct186::Method7.1501<System.Object>()"
ldstr "valuetype MyStruct186`1<class BaseClass0> on type MyStruct186"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
dup
call instance string valuetype MyStruct186`1<class BaseClass0>::ClassMethod380<object>()
ldstr "MyStruct186::ClassMethod380.1504<System.Object>()"
ldstr "valuetype MyStruct186`1<class BaseClass0> on type MyStruct186"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
dup
call instance string valuetype MyStruct186`1<class BaseClass0>::ClassMethod381<object>()
ldstr "MyStruct186::ClassMethod381.1505<System.Object>()"
ldstr "valuetype MyStruct186`1<class BaseClass0> on type MyStruct186"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
dup ldnull call instance bool valuetype MyStruct186`1<class BaseClass0>::Equals(object) pop
dup call instance int32 valuetype MyStruct186`1<class BaseClass0>::GetHashCode() pop
dup call instance string valuetype MyStruct186`1<class BaseClass0>::ToString() pop
pop
ldloc V_1
box valuetype MyStruct186`1<class BaseClass0>
dup
callvirt instance string class IBase2`2<class BaseClass1,class BaseClass1>::Method7<object>()
ldstr "MyStruct186::Method7.1501<System.Object>()"
ldstr "class IBase2`2<class BaseClass1,class BaseClass1> on type valuetype MyStruct186`1<class BaseClass0>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
pop
ldloc V_1
box valuetype MyStruct186`1<class BaseClass0>
dup
callvirt instance string class IBase2`2<class BaseClass0,class BaseClass1>::Method7<object>()
ldstr "MyStruct186::Method7.MI.1503<System.Object>()"
ldstr "class IBase2`2<class BaseClass0,class BaseClass1> on type valuetype MyStruct186`1<class BaseClass0>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
pop
ldstr "========================================================================\n\n"
call void [mscorlib]System.Console::WriteLine(string)
ret
}
.method public hidebysig static void ConstrainedCallsTest() cil managed
{
.maxstack 10
.locals init (object V_0)
ldstr "========================== Constrained Calls Test =========================="
call void [mscorlib]System.Console::WriteLine(string)
.locals init (valuetype MyStruct186`1<class BaseClass0> V_3)
ldloca V_3
initobj valuetype MyStruct186`1<class BaseClass0>
.try { ldloc V_3
ldstr "MyStruct186::Method7.1501<System.Object>()#"
call void Generated136::M.IBase2.T.T<class BaseClass1,class BaseClass1,valuetype MyStruct186`1<class BaseClass0>>(!!2,string) leave.s LV0
} catch [mscorlib]System.Security.VerificationException { pop leave.s LV0} LV0:
.try { ldloc V_3
ldstr "MyStruct186::Method7.1501<System.Object>()#"
call void Generated136::M.IBase2.B.T<class BaseClass1,valuetype MyStruct186`1<class BaseClass0>>(!!1,string) leave.s LV1
} catch [mscorlib]System.Security.VerificationException { pop leave.s LV1} LV1:
.try { ldloc V_3
ldstr "MyStruct186::Method7.1501<System.Object>()#"
call void Generated136::M.IBase2.B.B<valuetype MyStruct186`1<class BaseClass0>>(!!0,string) leave.s LV2
} catch [mscorlib]System.Security.VerificationException { pop leave.s LV2} LV2:
.try { ldloc V_3
ldstr "MyStruct186::Method7.MI.1503<System.Object>()#"
call void Generated136::M.IBase2.T.T<class BaseClass0,class BaseClass1,valuetype MyStruct186`1<class BaseClass0>>(!!2,string) leave.s LV3
} catch [mscorlib]System.Security.VerificationException { pop leave.s LV3} LV3:
.try { ldloc V_3
ldstr "MyStruct186::Method7.MI.1503<System.Object>()#"
call void Generated136::M.IBase2.A.T<class BaseClass1,valuetype MyStruct186`1<class BaseClass0>>(!!1,string) leave.s LV4
} catch [mscorlib]System.Security.VerificationException { pop leave.s LV4} LV4:
.try { ldloc V_3
ldstr "MyStruct186::Method7.MI.1503<System.Object>()#"
call void Generated136::M.IBase2.A.B<valuetype MyStruct186`1<class BaseClass0>>(!!0,string) leave.s LV5
} catch [mscorlib]System.Security.VerificationException { pop leave.s LV5} LV5:
ldstr "========================================================================\n\n"
call void [mscorlib]System.Console::WriteLine(string)
ret
}
.method public hidebysig static void StructConstrainedInterfaceCallsTest() cil managed
{
.maxstack 10
ldstr "===================== Struct Constrained Interface Calls Test ====================="
call void [mscorlib]System.Console::WriteLine(string)
.locals init (valuetype MyStruct186`1<class BaseClass0> V_5)
ldloca V_5
initobj valuetype MyStruct186`1<class BaseClass0>
.try { ldloc V_5
ldstr "MyStruct186::Method7.1501<System.Object>()#" +
"MyStruct186::Method7.MI.1503<System.Object>()#"
call void Generated136::M.MyStruct186.T<class BaseClass0,valuetype MyStruct186`1<class BaseClass0>>(!!1,string) leave.s LV0
} catch [mscorlib]System.Security.VerificationException { pop leave.s LV0} LV0:
.try { ldloc V_5
ldstr "MyStruct186::Method7.1501<System.Object>()#" +
"MyStruct186::Method7.MI.1503<System.Object>()#"
call void Generated136::M.MyStruct186.A<valuetype MyStruct186`1<class BaseClass0>>(!!0,string) leave.s LV1
} catch [mscorlib]System.Security.VerificationException { pop leave.s LV1} LV1:
ldstr "========================================================================\n\n"
call void [mscorlib]System.Console::WriteLine(string)
ret
}
.method public hidebysig static void CalliTest() cil managed
{
.maxstack 10
.locals init (object V_0)
ldstr "========================== Method Calli Test =========================="
call void [mscorlib]System.Console::WriteLine(string)
.locals init (valuetype MyStruct186`1<class BaseClass0> V_7)
ldloca V_7
initobj valuetype MyStruct186`1<class BaseClass0>
ldloc V_7
box valuetype MyStruct186`1<class BaseClass0>
ldloc V_7
box valuetype MyStruct186`1<class BaseClass0>
ldvirtftn instance string valuetype MyStruct186`1<class BaseClass0>::Method7<object>()
calli default string(object)
ldstr "MyStruct186::Method7.1501<System.Object>()"
ldstr "valuetype MyStruct186`1<class BaseClass0> on type valuetype MyStruct186`1<class BaseClass0>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
ldloc V_7
box valuetype MyStruct186`1<class BaseClass0>
ldloc V_7
box valuetype MyStruct186`1<class BaseClass0>
ldvirtftn instance string valuetype MyStruct186`1<class BaseClass0>::ClassMethod380<object>()
calli default string(object)
ldstr "MyStruct186::ClassMethod380.1504<System.Object>()"
ldstr "valuetype MyStruct186`1<class BaseClass0> on type valuetype MyStruct186`1<class BaseClass0>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
ldloc V_7
box valuetype MyStruct186`1<class BaseClass0>
ldloc V_7
box valuetype MyStruct186`1<class BaseClass0>
ldvirtftn instance string valuetype MyStruct186`1<class BaseClass0>::ClassMethod381<object>()
calli default string(object)
ldstr "MyStruct186::ClassMethod381.1505<System.Object>()"
ldstr "valuetype MyStruct186`1<class BaseClass0> on type valuetype MyStruct186`1<class BaseClass0>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
ldloc V_7 box valuetype MyStruct186`1<class BaseClass0> ldnull
ldloc V_7 box valuetype MyStruct186`1<class BaseClass0>
ldvirtftn instance bool valuetype MyStruct186`1<class BaseClass0>::Equals(object) calli default bool(object,object) pop
ldloc V_7 box valuetype MyStruct186`1<class BaseClass0>
ldloc V_7 box valuetype MyStruct186`1<class BaseClass0>
ldvirtftn instance int32 valuetype MyStruct186`1<class BaseClass0>::GetHashCode() calli default int32(object) pop
ldloc V_7 box valuetype MyStruct186`1<class BaseClass0>
ldloc V_7 box valuetype MyStruct186`1<class BaseClass0>
ldvirtftn instance string valuetype MyStruct186`1<class BaseClass0>::ToString() calli default string(object) pop
ldloc V_7
box valuetype MyStruct186`1<class BaseClass0>
ldloc V_7
box valuetype MyStruct186`1<class BaseClass0>
ldvirtftn instance string class IBase2`2<class BaseClass1,class BaseClass1>::Method7<object>()
calli default string(object)
ldstr "MyStruct186::Method7.1501<System.Object>()"
ldstr "class IBase2`2<class BaseClass1,class BaseClass1> on type valuetype MyStruct186`1<class BaseClass0>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
ldloc V_7
box valuetype MyStruct186`1<class BaseClass0>
ldloc V_7
box valuetype MyStruct186`1<class BaseClass0>
ldvirtftn instance string class IBase2`2<class BaseClass0,class BaseClass1>::Method7<object>()
calli default string(object)
ldstr "MyStruct186::Method7.MI.1503<System.Object>()"
ldstr "class IBase2`2<class BaseClass0,class BaseClass1> on type valuetype MyStruct186`1<class BaseClass0>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
ldstr "========================================================================\n\n"
call void [mscorlib]System.Console::WriteLine(string)
ret
}
.method public hidebysig static int32 Main() cil managed
{
.custom instance void [xunit.core]Xunit.FactAttribute::.ctor() = (
01 00 00 00
)
.entrypoint
.maxstack 10
call void Generated136::MethodCallingTest()
call void Generated136::ConstrainedCallsTest()
call void Generated136::StructConstrainedInterfaceCallsTest()
call void Generated136::CalliTest()
ldc.i4 100
ret
}
}
| -1 |
dotnet/runtime | 66,250 | use the name of the header when throwing exception for invalid chars | Fixes #65136 | pedrobsaila | 2022-03-05T20:18:11Z | 2022-03-08T16:15:18Z | 8fe2d2b0bbb7bec841c97372528531c071b29c16 | fa5dcda23c681031b5d2bd68482702baef80b836 | use the name of the header when throwing exception for invalid chars. Fixes #65136 | ./src/mono/mono/tests/delegate-prop.cs | using System;
using System.Reflection;
using System.Globalization;
using System.Diagnostics;
/* Regression test for https://github.com/mono/mono/issues/7944 */
public class MyClass
{
public string Prop1 { get; set; }
public string Prop2 { get; set; }
public void DoRepro()
{
var prop1Setter = this.GetType ().GetProperty (nameof (Prop1)).GetSetMethod ();
var prop2Setter = this.GetType ().GetProperty (nameof (Prop2)).GetSetMethod ();
var prop1Delegate = (Action <MyClass, string>) prop1Setter.CreateDelegate(typeof (Action <MyClass, string>));
var prop2Delegate = (Action <MyClass, string>) prop2Setter.CreateDelegate(typeof (Action <MyClass, string>));
prop1Delegate (this, "prop1Value");
prop2Delegate (this, "prop2Value");
// Console.WriteLine ($"prop1: {Prop1}");
// Console.WriteLine ($"prop2: {Prop2}");
}
public static int Main (string []args) {
var o = new MyClass ();
o.DoRepro ();
if (o.Prop1 != "prop1Value")
return 1;
if (o.Prop2 != "prop2Value")
return 2;
return 0;
}
}
| using System;
using System.Reflection;
using System.Globalization;
using System.Diagnostics;
/* Regression test for https://github.com/mono/mono/issues/7944 */
public class MyClass
{
public string Prop1 { get; set; }
public string Prop2 { get; set; }
public void DoRepro()
{
var prop1Setter = this.GetType ().GetProperty (nameof (Prop1)).GetSetMethod ();
var prop2Setter = this.GetType ().GetProperty (nameof (Prop2)).GetSetMethod ();
var prop1Delegate = (Action <MyClass, string>) prop1Setter.CreateDelegate(typeof (Action <MyClass, string>));
var prop2Delegate = (Action <MyClass, string>) prop2Setter.CreateDelegate(typeof (Action <MyClass, string>));
prop1Delegate (this, "prop1Value");
prop2Delegate (this, "prop2Value");
// Console.WriteLine ($"prop1: {Prop1}");
// Console.WriteLine ($"prop2: {Prop2}");
}
public static int Main (string []args) {
var o = new MyClass ();
o.DoRepro ();
if (o.Prop1 != "prop1Value")
return 1;
if (o.Prop2 != "prop2Value")
return 2;
return 0;
}
}
| -1 |
dotnet/runtime | 66,250 | use the name of the header when throwing exception for invalid chars | Fixes #65136 | pedrobsaila | 2022-03-05T20:18:11Z | 2022-03-08T16:15:18Z | 8fe2d2b0bbb7bec841c97372528531c071b29c16 | fa5dcda23c681031b5d2bd68482702baef80b836 | use the name of the header when throwing exception for invalid chars. Fixes #65136 | ./src/tests/JIT/Methodical/cctor/xassem/xprecise4_cs_d.csproj | <Project Sdk="Microsoft.NET.Sdk">
<PropertyGroup>
<OutputType>Exe</OutputType>
<CLRTestPriority>1</CLRTestPriority>
<RequiresProcessIsolation>true</RequiresProcessIsolation>
</PropertyGroup>
<PropertyGroup>
<DebugType>Full</DebugType>
<Optimize>False</Optimize>
</PropertyGroup>
<ItemGroup>
<Compile Include="xprecise4.cs" />
</ItemGroup>
<ItemGroup>
<ProjectReference Include="testlib.csproj" />
</ItemGroup>
</Project>
| <Project Sdk="Microsoft.NET.Sdk">
<PropertyGroup>
<OutputType>Exe</OutputType>
<CLRTestPriority>1</CLRTestPriority>
<RequiresProcessIsolation>true</RequiresProcessIsolation>
</PropertyGroup>
<PropertyGroup>
<DebugType>Full</DebugType>
<Optimize>False</Optimize>
</PropertyGroup>
<ItemGroup>
<Compile Include="xprecise4.cs" />
</ItemGroup>
<ItemGroup>
<ProjectReference Include="testlib.csproj" />
</ItemGroup>
</Project>
| -1 |
dotnet/runtime | 66,250 | use the name of the header when throwing exception for invalid chars | Fixes #65136 | pedrobsaila | 2022-03-05T20:18:11Z | 2022-03-08T16:15:18Z | 8fe2d2b0bbb7bec841c97372528531c071b29c16 | fa5dcda23c681031b5d2bd68482702baef80b836 | use the name of the header when throwing exception for invalid chars. Fixes #65136 | ./src/tests/JIT/HardwareIntrinsics/Arm/AdvSimd/ShiftLeftLogical.Vector128.UInt16.1.cs | // Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
/******************************************************************************
* This file is auto-generated from a template file by the GenerateTests.csx *
* script in tests\src\JIT\HardwareIntrinsics\X86\Shared. In order to make *
* changes, please update the corresponding template and run according to the *
* directions listed in the file. *
******************************************************************************/
using System;
using System.Runtime.CompilerServices;
using System.Runtime.InteropServices;
using System.Runtime.Intrinsics;
using System.Runtime.Intrinsics.Arm;
namespace JIT.HardwareIntrinsics.Arm
{
public static partial class Program
{
private static void ShiftLeftLogical_Vector128_UInt16_1()
{
var test = new ImmUnaryOpTest__ShiftLeftLogical_Vector128_UInt16_1();
if (test.IsSupported)
{
// Validates basic functionality works, using Unsafe.Read
test.RunBasicScenario_UnsafeRead();
if (AdvSimd.IsSupported)
{
// Validates basic functionality works, using Load
test.RunBasicScenario_Load();
}
// Validates calling via reflection works, using Unsafe.Read
test.RunReflectionScenario_UnsafeRead();
if (AdvSimd.IsSupported)
{
// Validates calling via reflection works, using Load
test.RunReflectionScenario_Load();
}
// Validates passing a static member works
test.RunClsVarScenario();
if (AdvSimd.IsSupported)
{
// Validates passing a static member works, using pinning and Load
test.RunClsVarScenario_Load();
}
// Validates passing a local works, using Unsafe.Read
test.RunLclVarScenario_UnsafeRead();
if (AdvSimd.IsSupported)
{
// Validates passing a local works, using Load
test.RunLclVarScenario_Load();
}
// Validates passing the field of a local class works
test.RunClassLclFldScenario();
if (AdvSimd.IsSupported)
{
// Validates passing the field of a local class works, using pinning and Load
test.RunClassLclFldScenario_Load();
}
// Validates passing an instance member of a class works
test.RunClassFldScenario();
if (AdvSimd.IsSupported)
{
// Validates passing an instance member of a class works, using pinning and Load
test.RunClassFldScenario_Load();
}
// Validates passing the field of a local struct works
test.RunStructLclFldScenario();
if (AdvSimd.IsSupported)
{
// Validates passing the field of a local struct works, using pinning and Load
test.RunStructLclFldScenario_Load();
}
// Validates passing an instance member of a struct works
test.RunStructFldScenario();
if (AdvSimd.IsSupported)
{
// Validates passing an instance member of a struct works, using pinning and Load
test.RunStructFldScenario_Load();
}
}
else
{
// Validates we throw on unsupported hardware
test.RunUnsupportedScenario();
}
if (!test.Succeeded)
{
throw new Exception("One or more scenarios did not complete as expected.");
}
}
}
public sealed unsafe class ImmUnaryOpTest__ShiftLeftLogical_Vector128_UInt16_1
{
private struct DataTable
{
private byte[] inArray;
private byte[] outArray;
private GCHandle inHandle;
private GCHandle outHandle;
private ulong alignment;
public DataTable(UInt16[] inArray, UInt16[] outArray, int alignment)
{
int sizeOfinArray = inArray.Length * Unsafe.SizeOf<UInt16>();
int sizeOfoutArray = outArray.Length * Unsafe.SizeOf<UInt16>();
if ((alignment != 16 && alignment != 8) || (alignment * 2) < sizeOfinArray || (alignment * 2) < sizeOfoutArray)
{
throw new ArgumentException("Invalid value of alignment");
}
this.inArray = new byte[alignment * 2];
this.outArray = new byte[alignment * 2];
this.inHandle = GCHandle.Alloc(this.inArray, GCHandleType.Pinned);
this.outHandle = GCHandle.Alloc(this.outArray, GCHandleType.Pinned);
this.alignment = (ulong)alignment;
Unsafe.CopyBlockUnaligned(ref Unsafe.AsRef<byte>(inArrayPtr), ref Unsafe.As<UInt16, byte>(ref inArray[0]), (uint)sizeOfinArray);
}
public void* inArrayPtr => Align((byte*)(inHandle.AddrOfPinnedObject().ToPointer()), alignment);
public void* outArrayPtr => Align((byte*)(outHandle.AddrOfPinnedObject().ToPointer()), alignment);
public void Dispose()
{
inHandle.Free();
outHandle.Free();
}
private static unsafe void* Align(byte* buffer, ulong expectedAlignment)
{
return (void*)(((ulong)buffer + expectedAlignment - 1) & ~(expectedAlignment - 1));
}
}
private struct TestStruct
{
public Vector128<UInt16> _fld;
public static TestStruct Create()
{
var testStruct = new TestStruct();
for (var i = 0; i < Op1ElementCount; i++) { _data[i] = TestLibrary.Generator.GetUInt16(); }
Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector128<UInt16>, byte>(ref testStruct._fld), ref Unsafe.As<UInt16, byte>(ref _data[0]), (uint)Unsafe.SizeOf<Vector128<UInt16>>());
return testStruct;
}
public void RunStructFldScenario(ImmUnaryOpTest__ShiftLeftLogical_Vector128_UInt16_1 testClass)
{
var result = AdvSimd.ShiftLeftLogical(_fld, 1);
Unsafe.Write(testClass._dataTable.outArrayPtr, result);
testClass.ValidateResult(_fld, testClass._dataTable.outArrayPtr);
}
public void RunStructFldScenario_Load(ImmUnaryOpTest__ShiftLeftLogical_Vector128_UInt16_1 testClass)
{
fixed (Vector128<UInt16>* pFld = &_fld)
{
var result = AdvSimd.ShiftLeftLogical(
AdvSimd.LoadVector128((UInt16*)(pFld)),
1
);
Unsafe.Write(testClass._dataTable.outArrayPtr, result);
testClass.ValidateResult(_fld, testClass._dataTable.outArrayPtr);
}
}
}
private static readonly int LargestVectorSize = 16;
private static readonly int Op1ElementCount = Unsafe.SizeOf<Vector128<UInt16>>() / sizeof(UInt16);
private static readonly int RetElementCount = Unsafe.SizeOf<Vector128<UInt16>>() / sizeof(UInt16);
private static readonly byte Imm = 1;
private static UInt16[] _data = new UInt16[Op1ElementCount];
private static Vector128<UInt16> _clsVar;
private Vector128<UInt16> _fld;
private DataTable _dataTable;
static ImmUnaryOpTest__ShiftLeftLogical_Vector128_UInt16_1()
{
for (var i = 0; i < Op1ElementCount; i++) { _data[i] = TestLibrary.Generator.GetUInt16(); }
Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector128<UInt16>, byte>(ref _clsVar), ref Unsafe.As<UInt16, byte>(ref _data[0]), (uint)Unsafe.SizeOf<Vector128<UInt16>>());
}
public ImmUnaryOpTest__ShiftLeftLogical_Vector128_UInt16_1()
{
Succeeded = true;
for (var i = 0; i < Op1ElementCount; i++) { _data[i] = TestLibrary.Generator.GetUInt16(); }
Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector128<UInt16>, byte>(ref _fld), ref Unsafe.As<UInt16, byte>(ref _data[0]), (uint)Unsafe.SizeOf<Vector128<UInt16>>());
for (var i = 0; i < Op1ElementCount; i++) { _data[i] = TestLibrary.Generator.GetUInt16(); }
_dataTable = new DataTable(_data, new UInt16[RetElementCount], LargestVectorSize);
}
public bool IsSupported => AdvSimd.IsSupported;
public bool Succeeded { get; set; }
public void RunBasicScenario_UnsafeRead()
{
TestLibrary.TestFramework.BeginScenario(nameof(RunBasicScenario_UnsafeRead));
var result = AdvSimd.ShiftLeftLogical(
Unsafe.Read<Vector128<UInt16>>(_dataTable.inArrayPtr),
1
);
Unsafe.Write(_dataTable.outArrayPtr, result);
ValidateResult(_dataTable.inArrayPtr, _dataTable.outArrayPtr);
}
public void RunBasicScenario_Load()
{
TestLibrary.TestFramework.BeginScenario(nameof(RunBasicScenario_Load));
var result = AdvSimd.ShiftLeftLogical(
AdvSimd.LoadVector128((UInt16*)(_dataTable.inArrayPtr)),
1
);
Unsafe.Write(_dataTable.outArrayPtr, result);
ValidateResult(_dataTable.inArrayPtr, _dataTable.outArrayPtr);
}
public void RunReflectionScenario_UnsafeRead()
{
TestLibrary.TestFramework.BeginScenario(nameof(RunReflectionScenario_UnsafeRead));
var result = typeof(AdvSimd).GetMethod(nameof(AdvSimd.ShiftLeftLogical), new Type[] { typeof(Vector128<UInt16>), typeof(byte) })
.Invoke(null, new object[] {
Unsafe.Read<Vector128<UInt16>>(_dataTable.inArrayPtr),
(byte)1
});
Unsafe.Write(_dataTable.outArrayPtr, (Vector128<UInt16>)(result));
ValidateResult(_dataTable.inArrayPtr, _dataTable.outArrayPtr);
}
public void RunReflectionScenario_Load()
{
TestLibrary.TestFramework.BeginScenario(nameof(RunReflectionScenario_Load));
var result = typeof(AdvSimd).GetMethod(nameof(AdvSimd.ShiftLeftLogical), new Type[] { typeof(Vector128<UInt16>), typeof(byte) })
.Invoke(null, new object[] {
AdvSimd.LoadVector128((UInt16*)(_dataTable.inArrayPtr)),
(byte)1
});
Unsafe.Write(_dataTable.outArrayPtr, (Vector128<UInt16>)(result));
ValidateResult(_dataTable.inArrayPtr, _dataTable.outArrayPtr);
}
public void RunClsVarScenario()
{
TestLibrary.TestFramework.BeginScenario(nameof(RunClsVarScenario));
var result = AdvSimd.ShiftLeftLogical(
_clsVar,
1
);
Unsafe.Write(_dataTable.outArrayPtr, result);
ValidateResult(_clsVar, _dataTable.outArrayPtr);
}
public void RunClsVarScenario_Load()
{
TestLibrary.TestFramework.BeginScenario(nameof(RunClsVarScenario_Load));
fixed (Vector128<UInt16>* pClsVar = &_clsVar)
{
var result = AdvSimd.ShiftLeftLogical(
AdvSimd.LoadVector128((UInt16*)(pClsVar)),
1
);
Unsafe.Write(_dataTable.outArrayPtr, result);
ValidateResult(_clsVar, _dataTable.outArrayPtr);
}
}
public void RunLclVarScenario_UnsafeRead()
{
TestLibrary.TestFramework.BeginScenario(nameof(RunLclVarScenario_UnsafeRead));
var firstOp = Unsafe.Read<Vector128<UInt16>>(_dataTable.inArrayPtr);
var result = AdvSimd.ShiftLeftLogical(firstOp, 1);
Unsafe.Write(_dataTable.outArrayPtr, result);
ValidateResult(firstOp, _dataTable.outArrayPtr);
}
public void RunLclVarScenario_Load()
{
TestLibrary.TestFramework.BeginScenario(nameof(RunLclVarScenario_Load));
var firstOp = AdvSimd.LoadVector128((UInt16*)(_dataTable.inArrayPtr));
var result = AdvSimd.ShiftLeftLogical(firstOp, 1);
Unsafe.Write(_dataTable.outArrayPtr, result);
ValidateResult(firstOp, _dataTable.outArrayPtr);
}
public void RunClassLclFldScenario()
{
TestLibrary.TestFramework.BeginScenario(nameof(RunClassLclFldScenario));
var test = new ImmUnaryOpTest__ShiftLeftLogical_Vector128_UInt16_1();
var result = AdvSimd.ShiftLeftLogical(test._fld, 1);
Unsafe.Write(_dataTable.outArrayPtr, result);
ValidateResult(test._fld, _dataTable.outArrayPtr);
}
public void RunClassLclFldScenario_Load()
{
TestLibrary.TestFramework.BeginScenario(nameof(RunClassLclFldScenario_Load));
var test = new ImmUnaryOpTest__ShiftLeftLogical_Vector128_UInt16_1();
fixed (Vector128<UInt16>* pFld = &test._fld)
{
var result = AdvSimd.ShiftLeftLogical(
AdvSimd.LoadVector128((UInt16*)(pFld)),
1
);
Unsafe.Write(_dataTable.outArrayPtr, result);
ValidateResult(test._fld, _dataTable.outArrayPtr);
}
}
public void RunClassFldScenario()
{
TestLibrary.TestFramework.BeginScenario(nameof(RunClassFldScenario));
var result = AdvSimd.ShiftLeftLogical(_fld, 1);
Unsafe.Write(_dataTable.outArrayPtr, result);
ValidateResult(_fld, _dataTable.outArrayPtr);
}
public void RunClassFldScenario_Load()
{
TestLibrary.TestFramework.BeginScenario(nameof(RunClassFldScenario_Load));
fixed (Vector128<UInt16>* pFld = &_fld)
{
var result = AdvSimd.ShiftLeftLogical(
AdvSimd.LoadVector128((UInt16*)(pFld)),
1
);
Unsafe.Write(_dataTable.outArrayPtr, result);
ValidateResult(_fld, _dataTable.outArrayPtr);
}
}
public void RunStructLclFldScenario()
{
TestLibrary.TestFramework.BeginScenario(nameof(RunStructLclFldScenario));
var test = TestStruct.Create();
var result = AdvSimd.ShiftLeftLogical(test._fld, 1);
Unsafe.Write(_dataTable.outArrayPtr, result);
ValidateResult(test._fld, _dataTable.outArrayPtr);
}
public void RunStructLclFldScenario_Load()
{
TestLibrary.TestFramework.BeginScenario(nameof(RunStructLclFldScenario_Load));
var test = TestStruct.Create();
var result = AdvSimd.ShiftLeftLogical(
AdvSimd.LoadVector128((UInt16*)(&test._fld)),
1
);
Unsafe.Write(_dataTable.outArrayPtr, result);
ValidateResult(test._fld, _dataTable.outArrayPtr);
}
public void RunStructFldScenario()
{
TestLibrary.TestFramework.BeginScenario(nameof(RunStructFldScenario));
var test = TestStruct.Create();
test.RunStructFldScenario(this);
}
public void RunStructFldScenario_Load()
{
TestLibrary.TestFramework.BeginScenario(nameof(RunStructFldScenario_Load));
var test = TestStruct.Create();
test.RunStructFldScenario_Load(this);
}
public void RunUnsupportedScenario()
{
TestLibrary.TestFramework.BeginScenario(nameof(RunUnsupportedScenario));
bool succeeded = false;
try
{
RunBasicScenario_UnsafeRead();
}
catch (PlatformNotSupportedException)
{
succeeded = true;
}
if (!succeeded)
{
Succeeded = false;
}
}
private void ValidateResult(Vector128<UInt16> firstOp, void* result, [CallerMemberName] string method = "")
{
UInt16[] inArray = new UInt16[Op1ElementCount];
UInt16[] outArray = new UInt16[RetElementCount];
Unsafe.WriteUnaligned(ref Unsafe.As<UInt16, byte>(ref inArray[0]), firstOp);
Unsafe.CopyBlockUnaligned(ref Unsafe.As<UInt16, byte>(ref outArray[0]), ref Unsafe.AsRef<byte>(result), (uint)Unsafe.SizeOf<Vector128<UInt16>>());
ValidateResult(inArray, outArray, method);
}
private void ValidateResult(void* firstOp, void* result, [CallerMemberName] string method = "")
{
UInt16[] inArray = new UInt16[Op1ElementCount];
UInt16[] outArray = new UInt16[RetElementCount];
Unsafe.CopyBlockUnaligned(ref Unsafe.As<UInt16, byte>(ref inArray[0]), ref Unsafe.AsRef<byte>(firstOp), (uint)Unsafe.SizeOf<Vector128<UInt16>>());
Unsafe.CopyBlockUnaligned(ref Unsafe.As<UInt16, byte>(ref outArray[0]), ref Unsafe.AsRef<byte>(result), (uint)Unsafe.SizeOf<Vector128<UInt16>>());
ValidateResult(inArray, outArray, method);
}
private void ValidateResult(UInt16[] firstOp, UInt16[] result, [CallerMemberName] string method = "")
{
bool succeeded = true;
for (var i = 0; i < RetElementCount; i++)
{
if (Helpers.ShiftLeftLogical(firstOp[i], Imm) != result[i])
{
succeeded = false;
break;
}
}
if (!succeeded)
{
TestLibrary.TestFramework.LogInformation($"{nameof(AdvSimd)}.{nameof(AdvSimd.ShiftLeftLogical)}<UInt16>(Vector128<UInt16>, 1): {method} failed:");
TestLibrary.TestFramework.LogInformation($" firstOp: ({string.Join(", ", firstOp)})");
TestLibrary.TestFramework.LogInformation($" result: ({string.Join(", ", result)})");
TestLibrary.TestFramework.LogInformation(string.Empty);
Succeeded = false;
}
}
}
}
| // Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
/******************************************************************************
* This file is auto-generated from a template file by the GenerateTests.csx *
* script in tests\src\JIT\HardwareIntrinsics\X86\Shared. In order to make *
* changes, please update the corresponding template and run according to the *
* directions listed in the file. *
******************************************************************************/
using System;
using System.Runtime.CompilerServices;
using System.Runtime.InteropServices;
using System.Runtime.Intrinsics;
using System.Runtime.Intrinsics.Arm;
namespace JIT.HardwareIntrinsics.Arm
{
public static partial class Program
{
private static void ShiftLeftLogical_Vector128_UInt16_1()
{
var test = new ImmUnaryOpTest__ShiftLeftLogical_Vector128_UInt16_1();
if (test.IsSupported)
{
// Validates basic functionality works, using Unsafe.Read
test.RunBasicScenario_UnsafeRead();
if (AdvSimd.IsSupported)
{
// Validates basic functionality works, using Load
test.RunBasicScenario_Load();
}
// Validates calling via reflection works, using Unsafe.Read
test.RunReflectionScenario_UnsafeRead();
if (AdvSimd.IsSupported)
{
// Validates calling via reflection works, using Load
test.RunReflectionScenario_Load();
}
// Validates passing a static member works
test.RunClsVarScenario();
if (AdvSimd.IsSupported)
{
// Validates passing a static member works, using pinning and Load
test.RunClsVarScenario_Load();
}
// Validates passing a local works, using Unsafe.Read
test.RunLclVarScenario_UnsafeRead();
if (AdvSimd.IsSupported)
{
// Validates passing a local works, using Load
test.RunLclVarScenario_Load();
}
// Validates passing the field of a local class works
test.RunClassLclFldScenario();
if (AdvSimd.IsSupported)
{
// Validates passing the field of a local class works, using pinning and Load
test.RunClassLclFldScenario_Load();
}
// Validates passing an instance member of a class works
test.RunClassFldScenario();
if (AdvSimd.IsSupported)
{
// Validates passing an instance member of a class works, using pinning and Load
test.RunClassFldScenario_Load();
}
// Validates passing the field of a local struct works
test.RunStructLclFldScenario();
if (AdvSimd.IsSupported)
{
// Validates passing the field of a local struct works, using pinning and Load
test.RunStructLclFldScenario_Load();
}
// Validates passing an instance member of a struct works
test.RunStructFldScenario();
if (AdvSimd.IsSupported)
{
// Validates passing an instance member of a struct works, using pinning and Load
test.RunStructFldScenario_Load();
}
}
else
{
// Validates we throw on unsupported hardware
test.RunUnsupportedScenario();
}
if (!test.Succeeded)
{
throw new Exception("One or more scenarios did not complete as expected.");
}
}
}
public sealed unsafe class ImmUnaryOpTest__ShiftLeftLogical_Vector128_UInt16_1
{
private struct DataTable
{
private byte[] inArray;
private byte[] outArray;
private GCHandle inHandle;
private GCHandle outHandle;
private ulong alignment;
public DataTable(UInt16[] inArray, UInt16[] outArray, int alignment)
{
int sizeOfinArray = inArray.Length * Unsafe.SizeOf<UInt16>();
int sizeOfoutArray = outArray.Length * Unsafe.SizeOf<UInt16>();
if ((alignment != 16 && alignment != 8) || (alignment * 2) < sizeOfinArray || (alignment * 2) < sizeOfoutArray)
{
throw new ArgumentException("Invalid value of alignment");
}
this.inArray = new byte[alignment * 2];
this.outArray = new byte[alignment * 2];
this.inHandle = GCHandle.Alloc(this.inArray, GCHandleType.Pinned);
this.outHandle = GCHandle.Alloc(this.outArray, GCHandleType.Pinned);
this.alignment = (ulong)alignment;
Unsafe.CopyBlockUnaligned(ref Unsafe.AsRef<byte>(inArrayPtr), ref Unsafe.As<UInt16, byte>(ref inArray[0]), (uint)sizeOfinArray);
}
public void* inArrayPtr => Align((byte*)(inHandle.AddrOfPinnedObject().ToPointer()), alignment);
public void* outArrayPtr => Align((byte*)(outHandle.AddrOfPinnedObject().ToPointer()), alignment);
public void Dispose()
{
inHandle.Free();
outHandle.Free();
}
private static unsafe void* Align(byte* buffer, ulong expectedAlignment)
{
return (void*)(((ulong)buffer + expectedAlignment - 1) & ~(expectedAlignment - 1));
}
}
private struct TestStruct
{
public Vector128<UInt16> _fld;
public static TestStruct Create()
{
var testStruct = new TestStruct();
for (var i = 0; i < Op1ElementCount; i++) { _data[i] = TestLibrary.Generator.GetUInt16(); }
Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector128<UInt16>, byte>(ref testStruct._fld), ref Unsafe.As<UInt16, byte>(ref _data[0]), (uint)Unsafe.SizeOf<Vector128<UInt16>>());
return testStruct;
}
public void RunStructFldScenario(ImmUnaryOpTest__ShiftLeftLogical_Vector128_UInt16_1 testClass)
{
var result = AdvSimd.ShiftLeftLogical(_fld, 1);
Unsafe.Write(testClass._dataTable.outArrayPtr, result);
testClass.ValidateResult(_fld, testClass._dataTable.outArrayPtr);
}
public void RunStructFldScenario_Load(ImmUnaryOpTest__ShiftLeftLogical_Vector128_UInt16_1 testClass)
{
fixed (Vector128<UInt16>* pFld = &_fld)
{
var result = AdvSimd.ShiftLeftLogical(
AdvSimd.LoadVector128((UInt16*)(pFld)),
1
);
Unsafe.Write(testClass._dataTable.outArrayPtr, result);
testClass.ValidateResult(_fld, testClass._dataTable.outArrayPtr);
}
}
}
private static readonly int LargestVectorSize = 16;
private static readonly int Op1ElementCount = Unsafe.SizeOf<Vector128<UInt16>>() / sizeof(UInt16);
private static readonly int RetElementCount = Unsafe.SizeOf<Vector128<UInt16>>() / sizeof(UInt16);
private static readonly byte Imm = 1;
private static UInt16[] _data = new UInt16[Op1ElementCount];
private static Vector128<UInt16> _clsVar;
private Vector128<UInt16> _fld;
private DataTable _dataTable;
static ImmUnaryOpTest__ShiftLeftLogical_Vector128_UInt16_1()
{
for (var i = 0; i < Op1ElementCount; i++) { _data[i] = TestLibrary.Generator.GetUInt16(); }
Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector128<UInt16>, byte>(ref _clsVar), ref Unsafe.As<UInt16, byte>(ref _data[0]), (uint)Unsafe.SizeOf<Vector128<UInt16>>());
}
public ImmUnaryOpTest__ShiftLeftLogical_Vector128_UInt16_1()
{
Succeeded = true;
for (var i = 0; i < Op1ElementCount; i++) { _data[i] = TestLibrary.Generator.GetUInt16(); }
Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector128<UInt16>, byte>(ref _fld), ref Unsafe.As<UInt16, byte>(ref _data[0]), (uint)Unsafe.SizeOf<Vector128<UInt16>>());
for (var i = 0; i < Op1ElementCount; i++) { _data[i] = TestLibrary.Generator.GetUInt16(); }
_dataTable = new DataTable(_data, new UInt16[RetElementCount], LargestVectorSize);
}
public bool IsSupported => AdvSimd.IsSupported;
public bool Succeeded { get; set; }
public void RunBasicScenario_UnsafeRead()
{
TestLibrary.TestFramework.BeginScenario(nameof(RunBasicScenario_UnsafeRead));
var result = AdvSimd.ShiftLeftLogical(
Unsafe.Read<Vector128<UInt16>>(_dataTable.inArrayPtr),
1
);
Unsafe.Write(_dataTable.outArrayPtr, result);
ValidateResult(_dataTable.inArrayPtr, _dataTable.outArrayPtr);
}
public void RunBasicScenario_Load()
{
TestLibrary.TestFramework.BeginScenario(nameof(RunBasicScenario_Load));
var result = AdvSimd.ShiftLeftLogical(
AdvSimd.LoadVector128((UInt16*)(_dataTable.inArrayPtr)),
1
);
Unsafe.Write(_dataTable.outArrayPtr, result);
ValidateResult(_dataTable.inArrayPtr, _dataTable.outArrayPtr);
}
public void RunReflectionScenario_UnsafeRead()
{
TestLibrary.TestFramework.BeginScenario(nameof(RunReflectionScenario_UnsafeRead));
var result = typeof(AdvSimd).GetMethod(nameof(AdvSimd.ShiftLeftLogical), new Type[] { typeof(Vector128<UInt16>), typeof(byte) })
.Invoke(null, new object[] {
Unsafe.Read<Vector128<UInt16>>(_dataTable.inArrayPtr),
(byte)1
});
Unsafe.Write(_dataTable.outArrayPtr, (Vector128<UInt16>)(result));
ValidateResult(_dataTable.inArrayPtr, _dataTable.outArrayPtr);
}
public void RunReflectionScenario_Load()
{
TestLibrary.TestFramework.BeginScenario(nameof(RunReflectionScenario_Load));
var result = typeof(AdvSimd).GetMethod(nameof(AdvSimd.ShiftLeftLogical), new Type[] { typeof(Vector128<UInt16>), typeof(byte) })
.Invoke(null, new object[] {
AdvSimd.LoadVector128((UInt16*)(_dataTable.inArrayPtr)),
(byte)1
});
Unsafe.Write(_dataTable.outArrayPtr, (Vector128<UInt16>)(result));
ValidateResult(_dataTable.inArrayPtr, _dataTable.outArrayPtr);
}
public void RunClsVarScenario()
{
TestLibrary.TestFramework.BeginScenario(nameof(RunClsVarScenario));
var result = AdvSimd.ShiftLeftLogical(
_clsVar,
1
);
Unsafe.Write(_dataTable.outArrayPtr, result);
ValidateResult(_clsVar, _dataTable.outArrayPtr);
}
public void RunClsVarScenario_Load()
{
TestLibrary.TestFramework.BeginScenario(nameof(RunClsVarScenario_Load));
fixed (Vector128<UInt16>* pClsVar = &_clsVar)
{
var result = AdvSimd.ShiftLeftLogical(
AdvSimd.LoadVector128((UInt16*)(pClsVar)),
1
);
Unsafe.Write(_dataTable.outArrayPtr, result);
ValidateResult(_clsVar, _dataTable.outArrayPtr);
}
}
public void RunLclVarScenario_UnsafeRead()
{
TestLibrary.TestFramework.BeginScenario(nameof(RunLclVarScenario_UnsafeRead));
var firstOp = Unsafe.Read<Vector128<UInt16>>(_dataTable.inArrayPtr);
var result = AdvSimd.ShiftLeftLogical(firstOp, 1);
Unsafe.Write(_dataTable.outArrayPtr, result);
ValidateResult(firstOp, _dataTable.outArrayPtr);
}
public void RunLclVarScenario_Load()
{
TestLibrary.TestFramework.BeginScenario(nameof(RunLclVarScenario_Load));
var firstOp = AdvSimd.LoadVector128((UInt16*)(_dataTable.inArrayPtr));
var result = AdvSimd.ShiftLeftLogical(firstOp, 1);
Unsafe.Write(_dataTable.outArrayPtr, result);
ValidateResult(firstOp, _dataTable.outArrayPtr);
}
public void RunClassLclFldScenario()
{
TestLibrary.TestFramework.BeginScenario(nameof(RunClassLclFldScenario));
var test = new ImmUnaryOpTest__ShiftLeftLogical_Vector128_UInt16_1();
var result = AdvSimd.ShiftLeftLogical(test._fld, 1);
Unsafe.Write(_dataTable.outArrayPtr, result);
ValidateResult(test._fld, _dataTable.outArrayPtr);
}
public void RunClassLclFldScenario_Load()
{
TestLibrary.TestFramework.BeginScenario(nameof(RunClassLclFldScenario_Load));
var test = new ImmUnaryOpTest__ShiftLeftLogical_Vector128_UInt16_1();
fixed (Vector128<UInt16>* pFld = &test._fld)
{
var result = AdvSimd.ShiftLeftLogical(
AdvSimd.LoadVector128((UInt16*)(pFld)),
1
);
Unsafe.Write(_dataTable.outArrayPtr, result);
ValidateResult(test._fld, _dataTable.outArrayPtr);
}
}
public void RunClassFldScenario()
{
TestLibrary.TestFramework.BeginScenario(nameof(RunClassFldScenario));
var result = AdvSimd.ShiftLeftLogical(_fld, 1);
Unsafe.Write(_dataTable.outArrayPtr, result);
ValidateResult(_fld, _dataTable.outArrayPtr);
}
public void RunClassFldScenario_Load()
{
TestLibrary.TestFramework.BeginScenario(nameof(RunClassFldScenario_Load));
fixed (Vector128<UInt16>* pFld = &_fld)
{
var result = AdvSimd.ShiftLeftLogical(
AdvSimd.LoadVector128((UInt16*)(pFld)),
1
);
Unsafe.Write(_dataTable.outArrayPtr, result);
ValidateResult(_fld, _dataTable.outArrayPtr);
}
}
public void RunStructLclFldScenario()
{
TestLibrary.TestFramework.BeginScenario(nameof(RunStructLclFldScenario));
var test = TestStruct.Create();
var result = AdvSimd.ShiftLeftLogical(test._fld, 1);
Unsafe.Write(_dataTable.outArrayPtr, result);
ValidateResult(test._fld, _dataTable.outArrayPtr);
}
public void RunStructLclFldScenario_Load()
{
TestLibrary.TestFramework.BeginScenario(nameof(RunStructLclFldScenario_Load));
var test = TestStruct.Create();
var result = AdvSimd.ShiftLeftLogical(
AdvSimd.LoadVector128((UInt16*)(&test._fld)),
1
);
Unsafe.Write(_dataTable.outArrayPtr, result);
ValidateResult(test._fld, _dataTable.outArrayPtr);
}
public void RunStructFldScenario()
{
TestLibrary.TestFramework.BeginScenario(nameof(RunStructFldScenario));
var test = TestStruct.Create();
test.RunStructFldScenario(this);
}
public void RunStructFldScenario_Load()
{
TestLibrary.TestFramework.BeginScenario(nameof(RunStructFldScenario_Load));
var test = TestStruct.Create();
test.RunStructFldScenario_Load(this);
}
public void RunUnsupportedScenario()
{
TestLibrary.TestFramework.BeginScenario(nameof(RunUnsupportedScenario));
bool succeeded = false;
try
{
RunBasicScenario_UnsafeRead();
}
catch (PlatformNotSupportedException)
{
succeeded = true;
}
if (!succeeded)
{
Succeeded = false;
}
}
private void ValidateResult(Vector128<UInt16> firstOp, void* result, [CallerMemberName] string method = "")
{
UInt16[] inArray = new UInt16[Op1ElementCount];
UInt16[] outArray = new UInt16[RetElementCount];
Unsafe.WriteUnaligned(ref Unsafe.As<UInt16, byte>(ref inArray[0]), firstOp);
Unsafe.CopyBlockUnaligned(ref Unsafe.As<UInt16, byte>(ref outArray[0]), ref Unsafe.AsRef<byte>(result), (uint)Unsafe.SizeOf<Vector128<UInt16>>());
ValidateResult(inArray, outArray, method);
}
private void ValidateResult(void* firstOp, void* result, [CallerMemberName] string method = "")
{
UInt16[] inArray = new UInt16[Op1ElementCount];
UInt16[] outArray = new UInt16[RetElementCount];
Unsafe.CopyBlockUnaligned(ref Unsafe.As<UInt16, byte>(ref inArray[0]), ref Unsafe.AsRef<byte>(firstOp), (uint)Unsafe.SizeOf<Vector128<UInt16>>());
Unsafe.CopyBlockUnaligned(ref Unsafe.As<UInt16, byte>(ref outArray[0]), ref Unsafe.AsRef<byte>(result), (uint)Unsafe.SizeOf<Vector128<UInt16>>());
ValidateResult(inArray, outArray, method);
}
private void ValidateResult(UInt16[] firstOp, UInt16[] result, [CallerMemberName] string method = "")
{
bool succeeded = true;
for (var i = 0; i < RetElementCount; i++)
{
if (Helpers.ShiftLeftLogical(firstOp[i], Imm) != result[i])
{
succeeded = false;
break;
}
}
if (!succeeded)
{
TestLibrary.TestFramework.LogInformation($"{nameof(AdvSimd)}.{nameof(AdvSimd.ShiftLeftLogical)}<UInt16>(Vector128<UInt16>, 1): {method} failed:");
TestLibrary.TestFramework.LogInformation($" firstOp: ({string.Join(", ", firstOp)})");
TestLibrary.TestFramework.LogInformation($" result: ({string.Join(", ", result)})");
TestLibrary.TestFramework.LogInformation(string.Empty);
Succeeded = false;
}
}
}
}
| -1 |
dotnet/runtime | 66,250 | use the name of the header when throwing exception for invalid chars | Fixes #65136 | pedrobsaila | 2022-03-05T20:18:11Z | 2022-03-08T16:15:18Z | 8fe2d2b0bbb7bec841c97372528531c071b29c16 | fa5dcda23c681031b5d2bd68482702baef80b836 | use the name of the header when throwing exception for invalid chars. Fixes #65136 | ./src/native/libs/System.Native/pal_random.c | // Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
#include <stdlib.h>
#include <stdint.h>
#include <stdbool.h>
#include <sys/stat.h>
#include <fcntl.h>
#include <assert.h>
#include <unistd.h>
#include <time.h>
#include <errno.h>
#if defined(__APPLE__) && __APPLE__
#include <CommonCrypto/CommonRandom.h>
#endif
#include "pal_config.h"
#include "pal_random.h"
/*
Generate random bytes. The generated bytes are not cryptographically strong.
*/
void SystemNative_GetNonCryptographicallySecureRandomBytes(uint8_t* buffer, int32_t bufferLength)
{
assert(buffer != NULL);
#if HAVE_ARC4RANDOM_BUF
arc4random_buf(buffer, (size_t)bufferLength);
#else
long num = 0;
static bool sInitializedMRand;
// Fall back to the secure version
SystemNative_GetCryptographicallySecureRandomBytes(buffer, bufferLength);
if (!sInitializedMRand)
{
srand48((long int)time(NULL));
sInitializedMRand = true;
}
// always xor srand48 over the whole buffer to get some randomness
// in case /dev/urandom is not really random
for (int i = 0; i < bufferLength; i++)
{
if (i % 4 == 0)
{
num = lrand48();
}
*(buffer + i) ^= num;
num >>= 8;
}
#endif // HAVE_ARC4RANDOM_BUF
}
/*
Generate cryptographically strong random bytes.
Return 0 on success, -1 on failure.
*/
int32_t SystemNative_GetCryptographicallySecureRandomBytes(uint8_t* buffer, int32_t bufferLength)
{
assert(buffer != NULL);
#ifdef __EMSCRIPTEN__
extern int32_t dotnet_browser_entropy(uint8_t* buffer, int32_t bufferLength);
static bool sMissingBrowserCrypto;
if (!sMissingBrowserCrypto)
{
int32_t bff = dotnet_browser_entropy(buffer, bufferLength);
if (bff == -1)
sMissingBrowserCrypto = true;
else
return 0;
}
#elif defined(__APPLE__) && __APPLE__
CCRNGStatus status = CCRandomGenerateBytes(buffer, bufferLength);
if (status == kCCSuccess)
{
return 0;
}
else
{
return -1;
}
#else
static volatile int rand_des = -1;
static bool sMissingDevURandom;
if (!sMissingDevURandom)
{
if (rand_des == -1)
{
int fd;
do
{
#if HAVE_O_CLOEXEC
fd = open("/dev/urandom", O_RDONLY | O_CLOEXEC);
#else
fd = open("/dev/urandom", O_RDONLY);
fcntl(fd, F_SETFD, FD_CLOEXEC);
#endif
}
while ((fd == -1) && (errno == EINTR));
if (fd != -1)
{
int expected = -1;
if (!__atomic_compare_exchange_n(&rand_des, &expected, fd, false, __ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST))
{
// Another thread has already set the rand_des
close(fd);
}
}
else if (errno == ENOENT)
{
sMissingDevURandom = true;
}
}
if (rand_des != -1)
{
int32_t offset = 0;
do
{
ssize_t n = read(rand_des, buffer + offset , (size_t)(bufferLength - offset));
if (n == -1)
{
if (errno == EINTR)
{
continue;
}
return -1;
}
offset += n;
}
while (offset != bufferLength);
return 0;
}
}
#endif
return -1;
}
| // Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
#include <stdlib.h>
#include <stdint.h>
#include <stdbool.h>
#include <sys/stat.h>
#include <fcntl.h>
#include <assert.h>
#include <unistd.h>
#include <time.h>
#include <errno.h>
#if defined(__APPLE__) && __APPLE__
#include <CommonCrypto/CommonRandom.h>
#endif
#include "pal_config.h"
#include "pal_random.h"
/*
Generate random bytes. The generated bytes are not cryptographically strong.
*/
void SystemNative_GetNonCryptographicallySecureRandomBytes(uint8_t* buffer, int32_t bufferLength)
{
assert(buffer != NULL);
#if HAVE_ARC4RANDOM_BUF
arc4random_buf(buffer, (size_t)bufferLength);
#else
long num = 0;
static bool sInitializedMRand;
// Fall back to the secure version
SystemNative_GetCryptographicallySecureRandomBytes(buffer, bufferLength);
if (!sInitializedMRand)
{
srand48((long int)time(NULL));
sInitializedMRand = true;
}
// always xor srand48 over the whole buffer to get some randomness
// in case /dev/urandom is not really random
for (int i = 0; i < bufferLength; i++)
{
if (i % 4 == 0)
{
num = lrand48();
}
*(buffer + i) ^= num;
num >>= 8;
}
#endif // HAVE_ARC4RANDOM_BUF
}
/*
Generate cryptographically strong random bytes.
Return 0 on success, -1 on failure.
*/
int32_t SystemNative_GetCryptographicallySecureRandomBytes(uint8_t* buffer, int32_t bufferLength)
{
assert(buffer != NULL);
#ifdef __EMSCRIPTEN__
extern int32_t dotnet_browser_entropy(uint8_t* buffer, int32_t bufferLength);
static bool sMissingBrowserCrypto;
if (!sMissingBrowserCrypto)
{
int32_t bff = dotnet_browser_entropy(buffer, bufferLength);
if (bff == -1)
sMissingBrowserCrypto = true;
else
return 0;
}
#elif defined(__APPLE__) && __APPLE__
CCRNGStatus status = CCRandomGenerateBytes(buffer, bufferLength);
if (status == kCCSuccess)
{
return 0;
}
else
{
return -1;
}
#else
static volatile int rand_des = -1;
static bool sMissingDevURandom;
if (!sMissingDevURandom)
{
if (rand_des == -1)
{
int fd;
do
{
#if HAVE_O_CLOEXEC
fd = open("/dev/urandom", O_RDONLY | O_CLOEXEC);
#else
fd = open("/dev/urandom", O_RDONLY);
fcntl(fd, F_SETFD, FD_CLOEXEC);
#endif
}
while ((fd == -1) && (errno == EINTR));
if (fd != -1)
{
int expected = -1;
if (!__atomic_compare_exchange_n(&rand_des, &expected, fd, false, __ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST))
{
// Another thread has already set the rand_des
close(fd);
}
}
else if (errno == ENOENT)
{
sMissingDevURandom = true;
}
}
if (rand_des != -1)
{
int32_t offset = 0;
do
{
ssize_t n = read(rand_des, buffer + offset , (size_t)(bufferLength - offset));
if (n == -1)
{
if (errno == EINTR)
{
continue;
}
return -1;
}
offset += n;
}
while (offset != bufferLength);
return 0;
}
}
#endif
return -1;
}
| -1 |
dotnet/runtime | 66,250 | use the name of the header when throwing exception for invalid chars | Fixes #65136 | pedrobsaila | 2022-03-05T20:18:11Z | 2022-03-08T16:15:18Z | 8fe2d2b0bbb7bec841c97372528531c071b29c16 | fa5dcda23c681031b5d2bd68482702baef80b836 | use the name of the header when throwing exception for invalid chars. Fixes #65136 | ./src/libraries/System.Security.Permissions/src/System/Net/NetworkInformation/NetworkInformationPermission.cs | // Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
using System.Security;
using System.Security.Permissions;
namespace System.Net.NetworkInformation
{
#if NETCOREAPP
[Obsolete(Obsoletions.CodeAccessSecurityMessage, DiagnosticId = Obsoletions.CodeAccessSecurityDiagId, UrlFormat = Obsoletions.SharedUrlFormat)]
#endif
public sealed class NetworkInformationPermission : CodeAccessPermission, IUnrestrictedPermission
{
public NetworkInformationPermission(PermissionState state) { }
public NetworkInformationPermission(NetworkInformationAccess access) { }
public NetworkInformationAccess Access { get; }
public void AddPermission(NetworkInformationAccess access) { }
public bool IsUnrestricted() => true;
public override IPermission Copy() => this;
public override IPermission Union(IPermission target) => null;
public override IPermission Intersect(IPermission target) => null;
public override bool IsSubsetOf(IPermission target) => false;
public override void FromXml(SecurityElement securityElement) { }
public override SecurityElement ToXml() => null;
}
}
| // Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
using System.Security;
using System.Security.Permissions;
namespace System.Net.NetworkInformation
{
#if NETCOREAPP
[Obsolete(Obsoletions.CodeAccessSecurityMessage, DiagnosticId = Obsoletions.CodeAccessSecurityDiagId, UrlFormat = Obsoletions.SharedUrlFormat)]
#endif
public sealed class NetworkInformationPermission : CodeAccessPermission, IUnrestrictedPermission
{
public NetworkInformationPermission(PermissionState state) { }
public NetworkInformationPermission(NetworkInformationAccess access) { }
public NetworkInformationAccess Access { get; }
public void AddPermission(NetworkInformationAccess access) { }
public bool IsUnrestricted() => true;
public override IPermission Copy() => this;
public override IPermission Union(IPermission target) => null;
public override IPermission Intersect(IPermission target) => null;
public override bool IsSubsetOf(IPermission target) => false;
public override void FromXml(SecurityElement securityElement) { }
public override SecurityElement ToXml() => null;
}
}
| -1 |
dotnet/runtime | 66,250 | use the name of the header when throwing exception for invalid chars | Fixes #65136 | pedrobsaila | 2022-03-05T20:18:11Z | 2022-03-08T16:15:18Z | 8fe2d2b0bbb7bec841c97372528531c071b29c16 | fa5dcda23c681031b5d2bd68482702baef80b836 | use the name of the header when throwing exception for invalid chars. Fixes #65136 | ./src/installer/tests/Directory.Build.props | <Project>
<Import Project="$([MSBuild]::GetPathOfFileAbove(Directory.Build.props, $(MSBuildThisFileDirectory)..))" />
<PropertyGroup>
<TestDir>$(InstallerProjectRoot)tests\</TestDir>
<TestAssetsDir>$(TestDir)Assets\</TestAssetsDir>
<TestStabilizedLegacyPackagesDir>$(ArtifactsObjDir)TestStabilizedPackages\</TestStabilizedLegacyPackagesDir>
<TestRestorePackagesPath>$(ArtifactsObjDir)TestPackageCache\</TestRestorePackagesPath>
<TestRestoreNuGetConfigFile>$(ArtifactsObjDir)TestNuGetConfig\NuGet.config</TestRestoreNuGetConfigFile>
<InternalNupkgCacheDir>$(ArtifactsObjDir)ExtraNupkgsForTestRestore\</InternalNupkgCacheDir>
<TestArchitectures>$(TargetArchitecture)</TestArchitectures>
<TestInfraTargetFramework>$(NetCoreAppToolCurrent)</TestInfraTargetFramework>
<TestRunnerAdditionalArguments>-notrait category=failing</TestRunnerAdditionalArguments>
<RunAnalyzers>false</RunAnalyzers>
</PropertyGroup>
</Project>
| <Project>
<Import Project="$([MSBuild]::GetPathOfFileAbove(Directory.Build.props, $(MSBuildThisFileDirectory)..))" />
<PropertyGroup>
<TestDir>$(InstallerProjectRoot)tests\</TestDir>
<TestAssetsDir>$(TestDir)Assets\</TestAssetsDir>
<TestStabilizedLegacyPackagesDir>$(ArtifactsObjDir)TestStabilizedPackages\</TestStabilizedLegacyPackagesDir>
<TestRestorePackagesPath>$(ArtifactsObjDir)TestPackageCache\</TestRestorePackagesPath>
<TestRestoreNuGetConfigFile>$(ArtifactsObjDir)TestNuGetConfig\NuGet.config</TestRestoreNuGetConfigFile>
<InternalNupkgCacheDir>$(ArtifactsObjDir)ExtraNupkgsForTestRestore\</InternalNupkgCacheDir>
<TestArchitectures>$(TargetArchitecture)</TestArchitectures>
<TestInfraTargetFramework>$(NetCoreAppToolCurrent)</TestInfraTargetFramework>
<TestRunnerAdditionalArguments>-notrait category=failing</TestRunnerAdditionalArguments>
<RunAnalyzers>false</RunAnalyzers>
</PropertyGroup>
</Project>
| -1 |
dotnet/runtime | 66,250 | use the name of the header when throwing exception for invalid chars | Fixes #65136 | pedrobsaila | 2022-03-05T20:18:11Z | 2022-03-08T16:15:18Z | 8fe2d2b0bbb7bec841c97372528531c071b29c16 | fa5dcda23c681031b5d2bd68482702baef80b836 | use the name of the header when throwing exception for invalid chars. Fixes #65136 | ./src/mono/mono/tests/exception10.cs | using System;
public class Test {
public static int Main (string[] args) {
int c = 0;
try
{
throw new Exception("Test exception");
}
catch (Exception e)
{
Console.WriteLine("Exception: {0}", e.Message);
}
finally
{
Console.WriteLine("Finally... {0}", c++);
}
if (c != 1)
return 1;
return 0;
}
}
| using System;
public class Test {
public static int Main (string[] args) {
int c = 0;
try
{
throw new Exception("Test exception");
}
catch (Exception e)
{
Console.WriteLine("Exception: {0}", e.Message);
}
finally
{
Console.WriteLine("Finally... {0}", c++);
}
if (c != 1)
return 1;
return 0;
}
}
| -1 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.